--- source-7.1.5/vmblock-only/linux/dentry.c 2011-09-23 19:57:00.000000000 -0400 +++ patched/vmblock-only/linux/dentry.c 2011-12-11 18:36:33.000000000 -0500 @@ -104,7 +104,11 @@ return actualDentry->d_op->d_revalidate(actualDentry, nd); } - if (path_lookup(iinfo->name, 0, &actualNd)) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) + if (compat_path_lookup(iinfo->name, 0, &actualNd)) { +#else + if (kern_path(iinfo->name, 0, &(actualNd.path))) { +#endif LOG(4, "DentryOpRevalidate: [%s] no longer exists\n", iinfo->name); return 0; } --- source-7.1.5/vmblock-only/linux/filesystem.c 2011-09-23 19:57:00.000000000 -0400 +++ patched/vmblock-only/linux/filesystem.c 2011-12-11 18:36:33.000000000 -0500 @@ -44,9 +44,14 @@ /* File system operations */ #if defined(VMW_GETSB_2618) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) static int FsOpGetSb(struct file_system_type *fsType, int flags, const char *devName, void *rawData, struct vfsmount *mnt); #else +static struct dentry *FsOpMount(struct file_system_type *fsType, int flags, + const char *devName, void *rawData); +#endif +#else static struct super_block *FsOpGetSb(struct file_system_type *fsType, int flags, const char *devName, void *rawData); #endif @@ -66,7 +71,11 @@ static struct file_system_type fsType = { .owner = THIS_MODULE, .name = VMBLOCK_FS_NAME, + #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) .get_sb = FsOpGetSb, + #else + .mount = FsOpMount, + #endif .kill_sb = kill_anon_super, }; @@ -336,7 +345,11 @@ goto error_inode; } +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) if (compat_path_lookup(iinfo->name, 0, &actualNd)) { +#else + if (kern_path(iinfo->name, 0, &(actualNd.path))) { +#endif /* * This file does not exist, so we create an inode that doesn't know * about its underlying file. Operations that create files and @@ -533,18 +546,17 @@ return 0; } - #if defined(VMW_GETSB_2618) /* *----------------------------------------------------------------------------- * - * FsOpGetSb -- + * FsOpGetSb/FsOpMount -- * * Invokes generic kernel code to prepare superblock for * deviceless filesystem. * * Results: - * 0 on success + * 0/dentry on success * negative error code on failure * * Side effects: @@ -552,7 +564,7 @@ * *----------------------------------------------------------------------------- */ - +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) static int FsOpGetSb(struct file_system_type *fs_type, // IN: file system type of mount int flags, // IN: mount flags @@ -563,6 +575,16 @@ return get_sb_nodev(fs_type, flags, rawData, FsOpReadSuper, mnt); } #else +struct dentry * +FsOpMount(struct file_system_type *fs_type, // IN: file system type of mount + int flags, // IN: mount flags + const char *dev_name, // IN: device mounting on + void *rawData) // IN: mount arguments +{ + return mount_nodev(fs_type, flags, rawData, FsOpReadSuper); +} +#endif +#else /* *----------------------------------------------------------------------------- * --- source-7.1.5/vmci-only/linux/driver.c 2011-09-23 22:05:49.000000000 -0400 +++ patched/vmci-only/linux/driver.c 2011-12-11 18:36:33.000000000 -0500 @@ -42,7 +42,6 @@ #include #include #include -#include #include "compat_file.h" #include "compat_highmem.h" --- source-7.1.5/vmmon-only/linux/driver.c 2011-09-23 22:05:44.000000000 -0400 +++ patched/vmmon-only/linux/driver.c 2011-12-11 18:36:33.000000000 -0500 @@ -785,7 +785,7 @@ #define POLLQUEUE_MAX_TASK 1000 -static spinlock_t pollQueueLock __attribute__((unused)) = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(pollQueueLock); static void *pollQueue[POLLQUEUE_MAX_TASK]; static unsigned int pollQueueCount = 0; @@ -1046,7 +1046,8 @@ * but unfortunately there is no way how to detect that * we are building for RedHat's kernel... */ - static spinlock_t timerLock = SPIN_LOCK_UNLOCKED; + + static DEFINE_SPINLOCK(timerLock); spin_lock(&timerLock); mod_timer(&linuxState.pollTimer, jiffies + 1); --- source-7.1.5/vmmon-only/linux/hostif.c 2011-09-23 22:05:43.000000000 -0400 +++ patched/vmmon-only/linux/hostif.c 2011-12-11 18:36:33.000000000 -0500 @@ -46,7 +46,6 @@ #include #include -#include #include #include --- source-7.1.5/vmmon-only/linux/iommu.c 2011-09-23 22:05:43.000000000 -0400 +++ patched/vmmon-only/linux/iommu.c 2011-12-11 18:36:33.000000000 -0500 @@ -44,7 +44,7 @@ static LIST_HEAD(passthruDeviceList); -static spinlock_t passthruDeviceListLock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(passthruDeviceListLock); static void *pciHolePage = NULL; /* --- source-7.1.5/vmnet-only/Makefile.kernel 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/Makefile.kernel 2011-11-14 02:16:55.000000000 -0500 @@ -20,12 +20,9 @@ INCLUDE := -I$(SRCROOT) EXTRA_CFLAGS := $(CC_OPTS) $(INCLUDE) -EXTRA_CFLAGS += $(call vm_check_build, $(SRCROOT)/epoll.c, -DVMW_HAVE_EPOLL, ) -EXTRA_CFLAGS += $(call vm_check_build, $(SRCROOT)/sk_alloc.c,-DVMW_HAVE_SK_ALLOC_WITH_PROTO, ) EXTRA_CFLAGS += $(call vm_check_build, $(SRCROOT)/netdev_has_net.c,-DVMW_NETDEV_HAS_NET, ) EXTRA_CFLAGS += $(call vm_check_build, $(SRCROOT)/netdev_has_dev_net.c,-DVMW_NETDEV_HAS_DEV_NET, ) EXTRA_CFLAGS += $(call vm_check_build, $(SRCROOT)/nfhook_uses_skb.c,-DVMW_NFHOOK_USES_SKB, ) -EXTRA_CFLAGS += $(call vm_check_build, $(SRCROOT)/setnice.c, -DVMW_HAVE_SET_USER_NICE, ) EXTRA_CFLAGS += $(call vm_check_build, $(SRCROOT)/skblin.c, -DVMW_SKB_LINEARIZE_2618, ) obj-m += $(DRIVER).o --- source-7.1.5/vmnet-only/Makefile.normal 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/Makefile.normal 2011-11-14 02:16:55.000000000 -0500 @@ -46,11 +46,8 @@ INCLUDE += $(shell $(CC) $(INCLUDE) -E geninclude.c \ | sed -n -e 's!^APATH!-I$(HEADER_DIR)/asm!p') -CC_OPTS += $(call vm_check_build, $(SRCROOT)/epoll.c, -DVMW_HAVE_EPOLL, ) -CC_OPTS += $(call vm_check_build, $(SRCROOT)/sk_alloc.c,-DVMW_HAVE_SK_ALLOC_WITH_PROTO, ) CC_OPTS += $(call vm_check_build, $(SRCROOT)/netdev_has_net.c,-DVMW_NETDEV_HAS_NET, ) CC_OPTS += $(call vm_check_build, $(SRCROOT)/netdev_has_dev_net.c,-DVMW_NETDEV_HAS_DEV_NET, ) -CC_OPTS += $(call vm_check_build, $(SRCROOT)/setnice.c, -DVMW_HAVE_SET_USER_NICE, ) CC_OPTS += $(call vm_check_build, $(SRCROOT)/skblin.c, -DVMW_SKB_LINEARIZE_2618, ) CFLAGS := -O $(CC_WARNINGS) $(CC_OPTS) $(INCLUDE) $(GLOBAL_DEFS) --- source-7.1.5/vmnet-only/bridge.c 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/bridge.c 2011-11-14 02:16:55.000000000 -0500 @@ -31,6 +31,7 @@ #include #include "compat_skbuff.h" #include +#include #include "compat_sock.h" #define __KERNEL_SYSCALLS__ @@ -47,7 +48,6 @@ # include #endif #include "vmnetInt.h" -#include "compat_spinlock.h" #include "compat_netdevice.h" #include "vnetInt.h" #include "smac.h" @@ -94,17 +94,10 @@ static int VNetBridgeNotify(struct notifier_block *this, u_long msg, void *data); -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) && \ - !defined(VMW_TL10S64_WORKAROUND) -static int VNetBridgeReceiveFromDev(struct sk_buff *skb, - struct net_device *dev, - struct packet_type *pt); -#else static int VNetBridgeReceiveFromDev(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *real_dev); -#endif static void VNetBridgeFree(VNetJack *this); static void VNetBridgeReceiveFromVNet(VNetJack *this, struct sk_buff *skb); @@ -203,48 +196,6 @@ /* *---------------------------------------------------------------------- * - * VNetBridgeCheckPromisc -- - * - * Make sure IFF_PROMISC on the peer interface is set. - * - * This can be called periodically. - * - * Results: - * None. - * - * Side effects: - * Hopefully enables promiscuous mode again if it should have been enabled. - * - *---------------------------------------------------------------------- - */ - -static INLINE_SINGLE_CALLER void -VNetBridgeCheckPromisc(VNetBridge *bridge) -{ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) - if (bridge->enabledPromisc && !bridge->wirelessAdapter) { - struct net_device *dev = bridge->dev; - Bool devPromisc = (dev->flags & IFF_PROMISC) != 0; - - if (!devPromisc) { - if (!bridge->warnPromisc) { - bridge->warnPromisc = TRUE; - LOG(0, (KERN_NOTICE "bridge-%s: someone disabled promiscuous mode\n" - "Your Ethernet driver is not compatible with VMware's bridged networking.\n", - bridge->name)); - } - rtnl_lock(); - dev_set_promiscuity(dev, 0); - rtnl_unlock(); - } - } -#endif -} - - -/* - *---------------------------------------------------------------------- - * * VNetBridgeDevCompatible -- * * Check whether bridge and network device are compatible. @@ -593,12 +544,6 @@ memcpy(dest, SKB_2_DESTMAC(skb), ETH_ALEN); - /* - * Check promiscuous bit periodically - */ - - VNetBridgeCheckPromisc(bridge); - #ifdef notdef // xxx; /* @@ -687,7 +632,7 @@ * We save it so we can recognize it (and its clones) again. */ - if (VNetPacketMatch(dest, dev->dev_addr, allMultiFilter, dev->flags)) { + if (VNetPacketMatch(dest, dev->dev_addr, NULL, 0, allMultiFilter, dev->flags)) { clone = skb_clone(skb, GFP_ATOMIC); if (clone) { unsigned long flags; @@ -919,7 +864,7 @@ return res; } event.header.eventId = 0; - event.header.classSet = VNET_EVENT_CLASS_BRIDGE; + event.header.classSet = VNET_EVENT_CLASS_UPLINK; event.header.type = VNET_EVENT_TYPE_LINK_STATE; event.adapter = adapter; event.up = up; @@ -1013,7 +958,7 @@ goto out; } sock_init_data(NULL, bridge->sk); - SET_SK_DEAD(bridge->sk); + sock_set_flag(bridge->sk, SOCK_DEAD); if (VNetBridgeIsDeviceWireless(bridge->dev)) { LOG(1, (KERN_NOTICE "bridge-%s: device is wireless, enabling SMAC\n", @@ -1045,16 +990,7 @@ bridge->pt.type = htons(ETH_P_ALL); bridge->pt.dev = bridge->dev; - /* - * TurboLinux10 uses 2.6.0-test5, which we do not support, so special case it, - * 2.6.0 with tl_kernel_version_h is 2.6.0-pre5... - */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) || \ - (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 0) && defined(__tl_kernel_version_h__)) - bridge->pt.data = bridge->sk; -#else bridge->pt.af_packet_priv = bridge->sk; -#endif bridge->enabledPromisc = FALSE; bridge->warnPromisc = FALSE; dev_add_pack(&bridge->pt); @@ -1064,11 +1000,11 @@ * Put in promiscuous mode if need be. */ - compat_mutex_lock(&vnetStructureMutex); + mutex_lock(&vnetStructureMutex); if (VNetGetAttachedPorts(&bridge->port.jack)) { VNetBridgeStartPromisc(bridge, rtnlLock); } - compat_mutex_unlock(&vnetStructureMutex); + mutex_unlock(&vnetStructureMutex); /* send link state up event */ retval = VNetBridgeSendLinkStateEvent(bridge, bridge->dev->ifindex, TRUE); @@ -1682,19 +1618,11 @@ *---------------------------------------------------------------------- */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) && \ - !defined(VMW_TL10S64_WORKAROUND) -int -VNetBridgeReceiveFromDev(struct sk_buff *skb, // IN: packet to receive - struct net_device *dev, // IN: unused - struct packet_type *pt) // IN: pt (pointer to bridge) -#else int VNetBridgeReceiveFromDev(struct sk_buff *skb, // IN: packet to receive struct net_device *dev, // IN: unused struct packet_type *pt, // IN: pt (pointer to bridge) struct net_device *real_dev) // IN: real device, unused -#endif { VNetBridge *bridge = list_entry(pt, VNetBridge, pt); int i; @@ -1770,12 +1698,10 @@ } } -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 4) /* * Unbelievable... Caller sets h.raw = nh.raw before invoking us... */ VNetBridgeComputeHeaderPos(skb); -#endif skb_push(skb, skb->data - compat_skb_mac_header(skb)); LOG(3, (KERN_DEBUG "bridge-%s: receive %d\n", --- source-7.1.5/vmnet-only/community_source.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/community_source.h 2011-11-14 02:16:55.000000000 -0500 @@ -29,12 +29,9 @@ #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON -#define INCLUDE_ALLOW_VMX -#define INCLUDE_ALLOW_VMMEXT #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_VMKDRIVERS #define INCLUDE_ALLOW_VMK_MODULE -#define INCLUDE_ALLOW_VMNIXMOD #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMIROM --- source-7.1.5/vmnet-only/compat_autoconf.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_autoconf.h 2011-11-14 02:16:55.000000000 -0500 @@ -22,7 +22,6 @@ #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMCORE -#define INCLUDE_ALLOW_VMNIXMOD #define INCLUDE_ALLOW_DISTRIBUTE #include "includeCheck.h" --- source-7.1.5/vmnet-only/compat_file.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_file.h 1969-12-31 19:00:00.000000000 -0500 @@ -1,56 +0,0 @@ -/********************************************************* - * Copyright (C) 2002 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -#ifndef __COMPAT_FILE_H__ -# define __COMPAT_FILE_H__ - - -/* The fput() API is modified in 2.2.0 --hpreg */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 2, 0) -# define compat_fput(_file) fput(_file) -#else -# define compat_fput(_file) fput(_file, (_file)->f_inode) -#endif - - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0) -# define compat_get_file(_file) get_file(_file) -# define compat_file_count(_file) file_count(_file) -#else -# define compat_get_file(_file) (_file)->f_count++ -# define compat_file_count(_file) (_file)->f_count -#endif - - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 2, 4) -# define compat_filp_close(_file, _files) filp_close(_file, _files) -#else -static inline void compat_filp_close(struct file* filp, fl_owner_t files) { - if (filp->f_op && filp->f_op->flush) { - filp->f_op->flush(filp); - } - /* - * Hopefully there are no locks to release on this filp. - * locks_remove_posix is not exported so we cannot use it... - */ - fput(filp); -} -#endif - - -#endif /* __COMPAT_FILE_H__ */ --- source-7.1.5/vmnet-only/compat_highmem.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_highmem.h 1969-12-31 19:00:00.000000000 -0500 @@ -1,40 +0,0 @@ -/********************************************************* - * Copyright (C) 2002 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -#ifndef __COMPAT_HIGHMEM_H__ -# define __COMPAT_HIGHMEM_H__ - - -/* - * BIGMEM (4 GB) support appeared in 2.3.16: kmap() API added - * HIGHMEM (4 GB + 64 GB) support appeared in 2.3.23: kmap() API modified - * In 2.3.27, kmap() API modified again - * - * --hpreg - */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 27) -# include -#else -/* For page_address --hpreg */ -# include - -# define kmap(_page) (void*)page_address(_page) -# define kunmap(_page) -#endif - -#endif /* __COMPAT_HIGHMEM_H__ */ --- source-7.1.5/vmnet-only/compat_kdev_t.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_kdev_t.h 1969-12-31 19:00:00.000000000 -0500 @@ -1,33 +0,0 @@ -/********************************************************* - * Copyright (C) 2002 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -#ifndef __COMPAT_KDEV_T_H__ -# define __COMPAT_KDEV_T_H__ - - -#include - - -/* The major() API appeared in 2.5.2 --hpreg */ -#ifndef major -# define major MAJOR -# define minor MINOR -#endif - - -#endif /* __COMPAT_KDEV_T_H__ */ --- source-7.1.5/vmnet-only/compat_mm.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_mm.h 1969-12-31 19:00:00.000000000 -0500 @@ -1,134 +0,0 @@ -/********************************************************* - * Copyright (C) 2002 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -#ifndef __COMPAT_MM_H__ -# define __COMPAT_MM_H__ - - -#include - - -/* The get_page() API appeared in 2.3.7 --hpreg */ -/* Sometime during development it became function instead of macro --petr */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) && !defined(get_page) -# define get_page(_page) atomic_inc(&(_page)->count) -/* The __free_page() API is exported in 2.1.67 --hpreg */ -# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 67) -# define put_page __free_page -# else -# include "compat_page.h" - -# define page_to_phys(_page) (page_to_pfn(_page) << PAGE_SHIFT) -# define put_page(_page) free_page(page_to_phys(_page)) -# endif -#endif - - -/* page_count() is 2.4.0 invention. Unfortunately unavailable in some RedHat - * kernels (for example 2.4.21-4-RHEL3). */ -/* It is function since 2.6.0, and hopefully RedHat will not play silly games - * with mm_inline.h again... */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(page_count) -# define page_count(page) atomic_read(&(page)->count) -#endif - - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) -# define compat_vm_pgoff(vma) ((vma)->vm_offset >> PAGE_SHIFT) - -static inline unsigned long compat_do_mmap_pgoff(struct file *file, unsigned long addr, - unsigned long len, unsigned long prot, - unsigned long flag, unsigned long pgoff) -{ - unsigned long ret = -EINVAL; - - if (pgoff < 1 << (32 - PAGE_SHIFT)) { - ret = do_mmap(file, addr, len, prot, flag, pgoff << PAGE_SHIFT); - } - return ret; -} - -#else -# define compat_vm_pgoff(vma) (vma)->vm_pgoff -# ifdef VMW_SKAS_MMAP -# define compat_do_mmap_pgoff(f, a, l, p, g, o) \ - do_mmap_pgoff(current->mm, f, a, l, p, g, o) -# else -# define compat_do_mmap_pgoff(f, a, l, p, g, o) \ - do_mmap_pgoff(f, a, l, p, g, o) -# endif -#endif - - -/* 2.2.x uses 0 instead of some define */ -#ifndef NOPAGE_SIGBUS -#define NOPAGE_SIGBUS (0) -#endif - - -/* 2.2.x does not have HIGHMEM support */ -#ifndef GFP_HIGHUSER -#define GFP_HIGHUSER (GFP_USER) -#endif - - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) - -#include "compat_page.h" - -static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int order) -{ - unsigned long addr; - - addr = __get_free_pages(gfp_mask, order); - if (!addr) { - return NULL; - } - return virt_to_page(addr); -} -#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) - -#endif - -/* - * In 2.4.14, the logic behind the UnlockPage macro was moved to the - * unlock_page() function. Later (in 2.5.12), the UnlockPage macro was removed - * altogether, and nowadays everyone uses unlock_page(). - */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 14) -#define compat_unlock_page(page) UnlockPage(page) -#else -#define compat_unlock_page(page) unlock_page(page) -#endif - -/* - * In 2.4.10, vmtruncate was changed from returning void to returning int. - */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10) -#define compat_vmtruncate(inode, size) \ -({ \ - int result = 0; \ - vmtruncate(inode, size); \ - result; \ -}) -#else -#define compat_vmtruncate(inode, size) vmtruncate(inode, size) -#endif - - -#endif /* __COMPAT_MM_H__ */ --- source-7.1.5/vmnet-only/compat_mutex.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_mutex.h 1969-12-31 19:00:00.000000000 -0500 @@ -1,51 +0,0 @@ -/********************************************************* - * Copyright (C) 2009 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -#ifndef __COMPAT_MUTEX_H__ -# define __COMPAT_MUTEX_H__ - - -/* Blocking mutexes were introduced in 2.6.16. */ - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16) - -#include "compat_semaphore.h" - -typedef struct semaphore compat_mutex_t; - -# define compat_define_mutex(_mx) DECLARE_MUTEX(_mx) -# define compat_mutex_init(_mx) init_MUTEX(_mx) -# define compat_mutex_lock(_mx) down(_mx) -# define compat_mutex_lock_interruptible(_mx) down_interruptible(_mx) -# define compat_mutex_unlock(_mx) up(_mx) - -#else - -#include - -typedef struct mutex compat_mutex_t; - -# define compat_define_mutex(_mx) DEFINE_MUTEX(_mx) -# define compat_mutex_init(_mx) mutex_init(_mx) -# define compat_mutex_lock(_mx) mutex_lock(_mx) -# define compat_mutex_lock_interruptible(_mx) mutex_lock_interruptible(_mx) -# define compat_mutex_unlock(_mx) mutex_unlock(_mx) - -#endif - -#endif /* __COMPAT_MUTEX_H__ */ --- source-7.1.5/vmnet-only/compat_netdevice.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_netdevice.h 2011-11-21 09:47:33.000000000 -0500 @@ -36,6 +36,16 @@ #endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) +#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev + functions are available. */ +#define HAVE_FREE_NETDEV /* free_netdev() */ +#define HAVE_NETDEV_PRIV /* netdev_priv() */ +#define HAVE_NETIF_QUEUE +#define HAVE_NET_DEVICE_OPS +#endif + + /* The netif_rx_ni() API appeared in 2.4.8 --hpreg */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 8) # define netif_rx_ni netif_rx @@ -48,6 +58,17 @@ #endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) +#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev + functions are available. */ +#define HAVE_FREE_NETDEV /* free_netdev() */ +#define HAVE_NETDEV_PRIV /* netdev_priv() */ +#define HAVE_NETIF_QUEUE +#define HAVE_NET_DEVICE_OPS +#endif + + + /* * SET_MODULE_OWNER appeared sometime during 2.3.x. It was setting * dev->owner = THIS_MODULE until 2.5.70, where netdevice refcounting @@ -179,20 +200,12 @@ #endif /* - * All compat_* business is good but when we can we should just provide - * missing implementation to ease upstreaming task. + * In 3.1 merge window feature maros were removed from mainline, + * so let's add back ones we care about. */ -#ifndef HAVE_ALLOC_NETDEV -#define alloc_netdev(sz, name, setup) compat_alloc_netdev(sz, name, setup) -#define alloc_etherdev(sz) compat_alloc_etherdev(sz) -#endif - -#ifndef HAVE_FREE_NETDEV -#define free_netdev(dev) kfree(dev) -#endif - -#ifndef HAVE_NETDEV_PRIV -#define netdev_priv(dev) ((dev)->priv) +#if !defined(HAVE_NET_DEVICE_OPS) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) +# define HAVE_NET_DEVICE_OPS 1 #endif #if defined(NETDEV_TX_OK) @@ -203,55 +216,6 @@ # define COMPAT_NETDEV_TX_BUSY 1 #endif -#ifndef HAVE_NETIF_QUEUE -static inline void -netif_start_queue(struct device *dev) -{ - clear_bit(0, &dev->tbusy); -} - -static inline void -netif_stop_queue(struct device *dev) -{ - set_bit(0, &dev->tbusy); -} - -static inline int -netif_queue_stopped(struct device *dev) -{ - return test_bit(0, &dev->tbusy); -} - -static inline void -netif_wake_queue(struct device *dev) -{ - clear_bit(0, &dev->tbusy); - mark_bh(NET_BH); -} - -static inline int -netif_running(struct device *dev) -{ - return dev->start == 0; -} - -static inline int -netif_carrier_ok(struct device *dev) -{ - return 1; -} - -static inline void -netif_carrier_on(struct device *dev) -{ -} - -static inline void -netif_carrier_off(struct device *dev) -{ -} -#endif - /* Keep compat_* defines for now */ #define compat_netif_start_queue(dev) netif_start_queue(dev) #define compat_netif_stop_queue(dev) netif_stop_queue(dev) @@ -299,27 +263,47 @@ #else -# define compat_netif_napi_add(dev, napi, pollcb, quota) \ - do { \ - (dev)->poll = (pollcb); \ - (dev)->weight = (quota);\ - } while (0) +# define compat_napi_complete(dev, napi) netif_rx_complete(dev) # define compat_napi_schedule(dev, napi) netif_rx_schedule(dev) # define compat_napi_enable(dev, napi) netif_poll_enable(dev) # define compat_napi_disable(dev, napi) netif_poll_disable(dev) /* RedHat ported GRO to 2.6.18 bringing new napi_struct with it */ # if defined NETIF_F_GRO -# define compat_napi_complete(dev, napi) napi_complete(napi) +# define compat_netif_napi_add(netdev, napi, pollcb, quota) \ + do { \ + (netdev)->poll = (pollcb); \ + (netdev)->weight = (quota);\ + (napi)->dev = (netdev); \ + } while (0) + # else -# define compat_napi_complete(dev, napi) netif_rx_complete(dev) struct napi_struct { int dummy; }; +# define compat_netif_napi_add(dev, napi, pollcb, quota) \ + do { \ + (dev)->poll = (pollcb); \ + (dev)->weight = (quota);\ + } while (0) + # endif #endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18) +# define compat_netif_tx_lock(dev) netif_tx_lock(dev) +# define compat_netif_tx_unlock(dev) netif_tx_unlock(dev) +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16) +# define compat_netif_tx_lock(dev) spin_lock(&dev->xmit_lock) +# define compat_netif_tx_unlock(dev) spin_unlock(&dev->xmit_lock) +#else +/* Vendor backporting (SLES 10) has muddled the tx_lock situation. Pick whichever + * of the above works for you. */ +# define compat_netif_tx_lock(dev) do {} while (0) +# define compat_netif_tx_unlock(dev) do {} while (0) +#endif + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37) # define COMPAT_VLAN_GROUP_ARRAY_LEN VLAN_N_VID #else --- source-7.1.5/vmnet-only/compat_page.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_page.h 1969-12-31 19:00:00.000000000 -0500 @@ -1,75 +0,0 @@ -/********************************************************* - * Copyright (C) 2002 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -#ifndef __COMPAT_PAGE_H__ -# define __COMPAT_PAGE_H__ - - -#include -#include - - -/* The pfn_to_page() API appeared in 2.5.14 and changed to function during 2.6.x */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(pfn_to_page) -# define pfn_to_page(_pfn) (mem_map + (_pfn)) -# define page_to_pfn(_page) ((_page) - mem_map) -#endif - - -/* The virt_to_page() API appeared in 2.4.0 --hpreg */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) && !defined(virt_to_page) -# define virt_to_page(_kvAddr) pfn_to_page(MAP_NR(_kvAddr)) -#endif - - -/* - * The get_order() API appeared at some point in 2.3.x, and was then backported - * in 2.2.17-21mdk and in the stock 2.2.18. Because we can only detect its - * definition through makefile tricks, we provide our own for now --hpreg - */ -static inline int -compat_get_order(unsigned long size) // IN -{ - int order; - - size = (size - 1) >> (PAGE_SHIFT - 1); - order = -1; - do { - size >>= 1; - order++; - } while (size); - - return order; -} - -/* - * BUG() was added to in 2.2.18, and was moved to - * in 2.5.58. - * - * XXX: Technically, this belongs in some sort of "compat_asm_page.h" file, but - * since our compatibility wrappers don't distinguish between and - * , putting it here is reasonable. - */ -#ifndef BUG -#define BUG() do { \ - printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ - __asm__ __volatile__(".byte 0x0f,0x0b"); \ -} while (0) -#endif - -#endif /* __COMPAT_PAGE_H__ */ --- source-7.1.5/vmnet-only/compat_pgtable.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_pgtable.h 1969-12-31 19:00:00.000000000 -0500 @@ -1,139 +0,0 @@ -/********************************************************* - * Copyright (C) 2002 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -#ifndef __COMPAT_PGTABLE_H__ -# define __COMPAT_PGTABLE_H__ - - -#if defined(CONFIG_PARAVIRT) && defined(CONFIG_HIGHPTE) -# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21) -# include -# undef paravirt_map_pt_hook -# define paravirt_map_pt_hook(type, va, pfn) do {} while (0) -# endif -#endif -#include - - -/* pte_page() API modified in 2.3.23 to return a struct page * --hpreg */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 23) -# define compat_pte_page pte_page -#else -# include "compat_page.h" - -# define compat_pte_page(_pte) virt_to_page(pte_page(_pte)) -#endif - - -/* Appeared in 2.5.5 --hpreg */ -#ifndef pte_offset_map -/* Appeared in SuSE 8.0's 2.4.18 --hpreg */ -# ifdef pte_offset_atomic -# define pte_offset_map pte_offset_atomic -# define pte_unmap pte_kunmap -# else -# define pte_offset_map pte_offset -# define pte_unmap(_pte) -# endif -#endif - - -/* Appeared in 2.5.74-mmX --petr */ -#ifndef pmd_offset_map -# define pmd_offset_map(pgd, address) pmd_offset(pgd, address) -# define pmd_unmap(pmd) -#endif - - -/* - * Appeared in 2.6.10-rc2-mm1. Older kernels did L4 page tables as - * part of pgd_offset, or they did not have L4 page tables at all. - * In 2.6.11 pml4 -> pgd -> pmd -> pte hierarchy was replaced by - * pgd -> pud -> pmd -> pte hierarchy. - */ -#ifdef PUD_MASK -# define compat_pgd_offset(mm, address) pgd_offset(mm, address) -# define compat_pgd_present(pgd) pgd_present(pgd) -# define compat_pud_offset(pgd, address) pud_offset(pgd, address) -# define compat_pud_present(pud) pud_present(pud) -typedef pgd_t compat_pgd_t; -typedef pud_t compat_pud_t; -#elif defined(pml4_offset) -# define compat_pgd_offset(mm, address) pml4_offset(mm, address) -# define compat_pgd_present(pml4) pml4_present(pml4) -# define compat_pud_offset(pml4, address) pml4_pgd_offset(pml4, address) -# define compat_pud_present(pgd) pgd_present(pgd) -typedef pml4_t compat_pgd_t; -typedef pgd_t compat_pud_t; -#else -# define compat_pgd_offset(mm, address) pgd_offset(mm, address) -# define compat_pgd_present(pgd) pgd_present(pgd) -# define compat_pud_offset(pgd, address) (pgd) -# define compat_pud_present(pud) (1) -typedef pgd_t compat_pgd_t; -typedef pgd_t compat_pud_t; -#endif - - -#define compat_pgd_offset_k(mm, address) pgd_offset_k(address) - - -/* Introduced somewhere in 2.6.0, + backported to some 2.4 RedHat kernels */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(pte_pfn) -# define pte_pfn(pte) page_to_pfn(compat_pte_page(pte)) -#endif - - -/* A page_table_lock field is added to struct mm_struct in 2.3.10 --hpreg */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 10) -# define compat_get_page_table_lock(_mm) (&(_mm)->page_table_lock) -#else -# define compat_get_page_table_lock(_mm) NULL -#endif - - -/* - * Define VM_PAGE_KERNEL_EXEC for vmapping executable pages. - * - * On ia32 PAGE_KERNEL_EXEC was introduced in 2.6.8.1. Unfortunately it accesses - * __PAGE_KERNEL_EXEC which is not exported for modules. So we use - * __PAGE_KERNEL and just cut _PAGE_NX bit from it. - * - * For ia32 kernels before 2.6.8.1 we use PAGE_KERNEL directly, these kernels - * do not have noexec support. - * - * On x86-64 situation is a bit better: they always supported noexec, but - * before 2.6.8.1 flag was named PAGE_KERNEL_EXECUTABLE, and it was renamed - * to PAGE_KERNEL_EXEC when ia32 got noexec too (see above). - */ -#ifdef CONFIG_X86 -#ifdef _PAGE_NX -#define VM_PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL & ~_PAGE_NX) -#else -#define VM_PAGE_KERNEL_EXEC PAGE_KERNEL -#endif -#else -#ifdef PAGE_KERNEL_EXECUTABLE -#define VM_PAGE_KERNEL_EXEC PAGE_KERNEL_EXECUTABLE -#else -#define VM_PAGE_KERNEL_EXEC PAGE_KERNEL_EXEC -#endif -#endif - - -#endif /* __COMPAT_PGTABLE_H__ */ --- source-7.1.5/vmnet-only/compat_sched.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_sched.h 1969-12-31 19:00:00.000000000 -0500 @@ -1,305 +0,0 @@ -/********************************************************* - * Copyright (C) 2002 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -#ifndef __COMPAT_SCHED_H__ -# define __COMPAT_SCHED_H__ - - -#include - -/* CLONE_KERNEL available in 2.5.35 and higher. */ -#ifndef CLONE_KERNEL -#define CLONE_KERNEL CLONE_FILES | CLONE_FS | CLONE_SIGHAND -#endif - -/* TASK_COMM_LEN become available in 2.6.11. */ -#ifndef TASK_COMM_LEN -#define TASK_COMM_LEN 16 -#endif - -/* The capable() API appeared in 2.1.92 --hpreg */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 1, 92) -# define capable(_capability) suser() -#endif - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0) -# define need_resched() need_resched -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 3) -# define need_resched() (current->need_resched) -#endif - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 3) -# define cond_resched() (need_resched() ? schedule() : (void) 0) -#endif - -/* Oh well. We need yield... Happy us! */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 20) -# ifdef __x86_64__ -# define compat_yield() there_is_nothing_like_yield() -# else -# include -# include - -/* - * Used by _syscallX macros. Note that this is global variable, so - * do not rely on its contents too much. As exit() is only function - * we use, and we never check return value from exit(), we have - * no problem... - */ -extern int errno; - -/* - * compat_exit() provides an access to the exit() function. It must - * be named compat_exit(), as exit() (with different signature) is - * provided by x86-64, arm and other (but not by i386). - */ -# define __NR_compat_yield __NR_sched_yield -static inline _syscall0(int, compat_yield); -# endif -#else -# define compat_yield() yield() -#endif - - -/* - * Since 2.5.34 there are two methods to enumerate tasks: - * for_each_process(p) { ... } which enumerates only tasks and - * do_each_thread(g,t) { ... } while_each_thread(g,t) which enumerates - * also threads even if they share same pid. - */ -#ifndef for_each_process -# define for_each_process(p) for_each_task(p) -#endif - -#ifndef do_each_thread -# define do_each_thread(g, t) for_each_task(g) { t = g; do -# define while_each_thread(g, t) while (0) } -#endif - - -/* - * Lock for signal mask is moving target... - */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 40) && defined(CLONE_PID) -/* 2.4.x without NPTL patches or early 2.5.x */ -#define compat_sigmask_lock sigmask_lock -#define compat_dequeue_signal_current(siginfo_ptr) \ - dequeue_signal(¤t->blocked, (siginfo_ptr)) -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 60) && !defined(INIT_SIGHAND) -/* RedHat's 2.4.x with first version of NPTL support, or 2.5.40 to 2.5.59 */ -#define compat_sigmask_lock sig->siglock -#define compat_dequeue_signal_current(siginfo_ptr) \ - dequeue_signal(¤t->blocked, (siginfo_ptr)) -#else -/* RedHat's 2.4.x with second version of NPTL support, or 2.5.60+. */ -#define compat_sigmask_lock sighand->siglock -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) -#define compat_dequeue_signal_current(siginfo_ptr) \ - dequeue_signal(¤t->blocked, (siginfo_ptr)) -#else -#define compat_dequeue_signal_current(siginfo_ptr) \ - dequeue_signal(current, ¤t->blocked, (siginfo_ptr)) -#endif -#endif - -/* - * recalc_sigpending() had task argument in the past - */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 29) && defined(CLONE_PID) -/* 2.4.x without NPTL patches or early 2.5.x */ -#define compat_recalc_sigpending() recalc_sigpending(current) -#else -/* RedHat's 2.4.x with NPTL support, or 2.5.29+ */ -#define compat_recalc_sigpending() recalc_sigpending() -#endif - - -/* - * reparent_to_init() was introduced in 2.4.8. In 2.5.38 (or possibly - * earlier, but later than 2.5.31) a call to it was added into - * daemonize(), so compat_daemonize no longer needs to call it. - * - * In 2.4.x kernels reparent_to_init() forgets to do correct refcounting - * on current->user. It is better to count one too many than one too few... - */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 8) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 38) -#define compat_reparent_to_init() do { \ - reparent_to_init(); \ - atomic_inc(¤t->user->__count); \ - } while (0) -#else -#define compat_reparent_to_init() do {} while (0) -#endif - - -/* - * daemonize appeared in 2.2.18. Except 2.2.17-4-RH7.0, which has it too. - * Fortunately 2.2.17-4-RH7.0 uses versioned symbols, so we can check - * its existence with defined(). - */ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18)) && !defined(daemonize) -static inline void daemonize(void) { - struct fs_struct *fs; - - exit_mm(current); - current->session = 1; - current->pgrp = 1; - exit_fs(current); - fs = init_task.fs; - current->fs = fs; - atomic_inc(&fs->count); -} -#endif - - -/* - * flush_signals acquires sighand->siglock since 2.5.61... Verify RH's kernels! - */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 61) -#define compat_flush_signals(task) do { \ - spin_lock_irq(&task->compat_sigmask_lock); \ - flush_signals(task); \ - spin_unlock_irq(&task->compat_sigmask_lock); \ - } while (0) -#else -#define compat_flush_signals(task) flush_signals(task) -#endif - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 61) -#define compat_allow_signal(signr) do { \ - spin_lock_irq(¤t->compat_sigmask_lock); \ - sigdelset(¤t->blocked, signr); \ - compat_recalc_sigpending(); \ - spin_unlock_irq(¤t->compat_sigmask_lock); \ - } while (0) -#else -#define compat_allow_signal(signr) allow_signal(signr) -#endif - -/* - * daemonize can set process name since 2.5.61. Prior to 2.5.61, daemonize - * didn't block signals on our behalf. - */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 61) -#define compat_daemonize(x...) \ -({ \ - /* Beware! No snprintf here, so verify arguments! */ \ - sprintf(current->comm, x); \ - \ - /* Block all signals. */ \ - spin_lock_irq(¤t->compat_sigmask_lock); \ - sigfillset(¤t->blocked); \ - compat_recalc_sigpending(); \ - spin_unlock_irq(¤t->compat_sigmask_lock); \ - compat_flush_signals(current); \ - \ - daemonize(); \ - compat_reparent_to_init(); \ -}) -#else -#define compat_daemonize(x...) daemonize(x) -#endif - - -/* - * set priority for specified thread. Exists on 2.6.x kernels and some - * 2.4.x vendor's kernels. - */ -#if defined(VMW_HAVE_SET_USER_NICE) -#define compat_set_user_nice(task, n) set_user_nice((task), (n)) -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) -#define compat_set_user_nice(task, n) do { (task)->priority = 20 - (n); } while (0) -#elif !defined(VMW_HAVE_SET_USER_NICE) -#define compat_set_user_nice(task, n) do { (task)->nice = (n); } while (0) -#endif - -/* - * try to freeze a process. For kernels 2.6.11 or newer, we know how to choose - * the interface. The problem is that the oldest interface, introduced in - * 2.5.18, was backported to 2.4.x kernels. So if we're older than 2.6.11, - * we'll decide what to do based on whether or not swsusp was configured - * for the kernel. For kernels 2.6.20 and newer, we'll also need to include - * freezer.h since the try_to_freeze definition was pulled out of sched.h. - */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) -#include -#endif -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) || defined(VMW_TL10S64_WORKAROUND) -#define compat_try_to_freeze() try_to_freeze() -#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11) -#define compat_try_to_freeze() try_to_freeze(PF_FREEZE) -#elif defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_SOFTWARE_SUSPEND2) -#include "compat_mm.h" -#include -#include -static inline int compat_try_to_freeze(void) { - if (current->flags & PF_FREEZE) { - refrigerator(PF_FREEZE); - return 1; - } else { - return 0; - } -} -#else -static inline int compat_try_to_freeze(void) { return 0; } -#endif - -/* - * As of 2.6.23-rc1, kernel threads are no longer freezable by - * default. Instead, kernel threads that need to be frozen must opt-in - * by calling set_freezable() as soon as the thread is created. - */ - -#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22) -#define compat_set_freezable() do { set_freezable(); } while (0) -#else -#define compat_set_freezable() do {} while (0) -#endif - -/* - * Around 2.6.27 kernel stopped sending signals to kernel - * threads being frozen, instead threads have to check - * freezing() or use wait_event_freezable(). Unfortunately - * wait_event_freezable() completely hides the fact that - * thread was frozen from calling code and sometimes we do - * want to know that. - */ -#ifdef PF_FREEZER_NOSIG -#define compat_wait_check_freezing() freezing(current) -#else -#define compat_wait_check_freezing() (0) -#endif - -/* - * Since 2.6.27-rc2 kill_proc() is gone... Replacement (GPL-only!) - * API is available since 2.6.19. Use them from 2.6.27-rc1 up. - */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) -typedef int compat_pid; -#define compat_find_get_pid(pid) (pid) -#define compat_put_pid(pid) do { } while (0) -#define compat_kill_pid(pid, sig, flag) kill_proc(pid, sig, flag) -#else -typedef struct pid * compat_pid; -#define compat_find_get_pid(pid) find_get_pid(pid) -#define compat_put_pid(pid) put_pid(pid) -#define compat_kill_pid(pid, sig, flag) kill_pid(pid, sig, flag) -#endif - - -#endif /* __COMPAT_SCHED_H__ */ --- source-7.1.5/vmnet-only/compat_semaphore.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_semaphore.h 1969-12-31 19:00:00.000000000 -0500 @@ -1,66 +0,0 @@ -/********************************************************* - * Copyright (C) 2002 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -#ifndef __COMPAT_SEMAPHORE_H__ -# define __COMPAT_SEMAPHORE_H__ - - -/* <= 2.6.25 have asm only, 2.6.26 has both, and 2.6.27-rc2+ has linux only. */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) -# include -#else -# include -#endif - - -#if defined CONFIG_PREEMPT_RT && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) - /* - * The -rt patch series changes the name of semaphore/mutex initialization - * routines (across the entire kernel). Probably to identify locations that - * need to be audited for spinlock vs. true semaphore. We always assumed - * true semaphore, so just apply the rename. - * - * The -rt patchset added the rename between 2.6.29-rt and 2.6.31-rt. - */ - #ifndef DECLARE_MUTEX - #define DECLARE_MUTEX(_m) DEFINE_SEMAPHORE(_m) - #endif - #ifndef init_MUTEX - #define init_MUTEX(_m) semaphore_init(_m) - #endif -#endif - -/* -* The init_MUTEX_LOCKED() API appeared in 2.2.18, and is also in -* 2.2.17-21mdk --hpreg -*/ - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) - #ifndef init_MUTEX_LOCKED - #define init_MUTEX_LOCKED(_sem) *(_sem) = MUTEX_LOCKED - #endif - #ifndef DECLARE_MUTEX - #define DECLARE_MUTEX(name) struct semaphore name = MUTEX - #endif - #ifndef DECLARE_MUTEX_LOCKED - #define DECLARE_MUTEX_LOCKED(name) struct semaphore name = MUTEX_LOCKED - #endif -#endif - - -#endif /* __COMPAT_SEMAPHORE_H__ */ --- source-7.1.5/vmnet-only/compat_skbuff.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_skbuff.h 2011-11-14 02:16:55.000000000 -0500 @@ -36,8 +36,11 @@ #define compat_skb_tail_pointer(skb) skb_tail_pointer(skb) #define compat_skb_end_pointer(skb) skb_end_pointer(skb) #define compat_skb_ip_header(skb) ((struct iphdr *)skb_network_header(skb)) +#define compat_skb_ipv6_header(skb) ((struct ipv6hdr *)skb_network_header(skb)) #define compat_skb_tcp_header(skb) ((struct tcphdr *)skb_transport_header(skb)) -#define compat_skb_reset_mac_header(skb) skb_reset_mac_header(skb) +#define compat_skb_reset_mac_header(skb) skb_reset_mac_header(skb) +#define compat_skb_reset_network_header(skb) skb_reset_network_header(skb) +#define compat_skb_reset_transport_header(skb) skb_reset_transport_header(skb) #define compat_skb_set_network_header(skb, off) skb_set_network_header(skb, off) #define compat_skb_set_transport_header(skb, off) skb_set_transport_header(skb, off) #else @@ -50,8 +53,11 @@ #define compat_skb_tail_pointer(skb) (skb)->tail #define compat_skb_end_pointer(skb) (skb)->end #define compat_skb_ip_header(skb) (skb)->nh.iph +#define compat_skb_ipv6_header(skb) (skb)->nh.ipv6h #define compat_skb_tcp_header(skb) (skb)->h.th #define compat_skb_reset_mac_header(skb) ((skb)->mac.raw = (skb)->data) +#define compat_skb_reset_network_header(skb) ((skb)->nh.raw = (skb)->data) +#define compat_skb_reset_transport_header(skb) ((skb)->h.raw = (skb)->data) #define compat_skb_set_network_header(skb, off) ((skb)->nh.raw = (skb)->data + (off)) #define compat_skb_set_transport_header(skb, off) ((skb)->h.raw = (skb)->data + (off)) #endif --- source-7.1.5/vmnet-only/compat_slab.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_slab.h 1969-12-31 19:00:00.000000000 -0500 @@ -1,85 +0,0 @@ -/********************************************************* - * Copyright (C) 2005 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -#ifndef __COMPAT_SLAB_H__ -# define __COMPAT_SLAB_H__ - - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 2, 0) -# include -#else -# include -#endif - -/* - * Before 2.6.20, kmem_cache_t was the accepted way to refer to a kmem_cache - * structure. Prior to 2.6.15, this structure was called kmem_cache_s, and - * afterwards it was renamed to kmem_cache. Here we keep things simple and use - * the accepted typedef until it became deprecated, at which point we switch - * over to the kmem_cache name. - */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) -# define compat_kmem_cache struct kmem_cache -#else -# define compat_kmem_cache kmem_cache_t -#endif - -/* - * Up to 2.6.22 kmem_cache_create has 6 arguments - name, size, alignment, flags, - * constructor, and destructor. Then for some time kernel was asserting that - * destructor is NULL, and since 2.6.23-pre1 kmem_cache_create takes only 5 - * arguments - destructor is gone. - */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) || defined(VMW_KMEMCR_HAS_DTOR) -#define compat_kmem_cache_create(name, size, align, flags, ctor) \ - kmem_cache_create(name, size, align, flags, ctor, NULL) -#else -#define compat_kmem_cache_create(name, size, align, flags, ctor) \ - kmem_cache_create(name, size, align, flags, ctor) -#endif - -/* - * Up to 2.6.23 kmem_cache constructor has three arguments - pointer to block to - * prepare (aka "this"), from which cache it came, and some unused flags. After - * 2.6.23 flags were removed, and order of "this" and cache parameters was swapped... - * Since 2.6.27-rc2 everything is different again, and ctor has only one argument. - * - * HAS_3_ARGS has precedence over HAS_2_ARGS if both are defined. - */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23) && !defined(VMW_KMEMCR_CTOR_HAS_3_ARGS) -# define VMW_KMEMCR_CTOR_HAS_3_ARGS -#endif -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26) && !defined(VMW_KMEMCR_CTOR_HAS_2_ARGS) -# define VMW_KMEMCR_CTOR_HAS_2_ARGS -#endif - -#if defined(VMW_KMEMCR_CTOR_HAS_3_ARGS) -typedef void compat_kmem_cache_ctor(void *, compat_kmem_cache *, unsigned long); -#define COMPAT_KMEM_CACHE_CTOR_ARGS(arg) void *arg, \ - compat_kmem_cache *cache, \ - unsigned long flags -#elif defined(VMW_KMEMCR_CTOR_HAS_2_ARGS) -typedef void compat_kmem_cache_ctor(compat_kmem_cache *, void *); -#define COMPAT_KMEM_CACHE_CTOR_ARGS(arg) compat_kmem_cache *cache, \ - void *arg -#else -typedef void compat_kmem_cache_ctor(void *); -#define COMPAT_KMEM_CACHE_CTOR_ARGS(arg) void *arg -#endif - -#endif /* __COMPAT_SLAB_H__ */ --- source-7.1.5/vmnet-only/compat_sock.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_sock.h 2011-11-14 02:16:55.000000000 -0500 @@ -22,79 +22,13 @@ #include /* for NULL */ #include -/* - * Between 2.5.70 and 2.5.71 all sock members were renamed from XXX to sk_XXX. - */ - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 71) -# define compat_sk_backlog_rcv backlog_rcv -# define compat_sk_destruct destruct -# define compat_sk_shutdown shutdown -# define compat_sk_receive_queue receive_queue -# define compat_sk_sleep sleep -# define compat_sk_err err -# define compat_sk_state_change state_change -# define compat_sk_data_ready data_ready -# define compat_sk_write_space write_space -# define compat_sk_error_report error_report -# define compat_sk_type type -# define compat_sk_refcnt refcnt -# define compat_sk_state state -# define compat_sk_error_report error_report -# define compat_sk_socket socket -# define compat_sk_ack_backlog ack_backlog -# define compat_sk_max_ack_backlog max_ack_backlog -# define compat_sk_user_data user_data -# define compat_sk_rcvtimeo rcvtimeo -#else -# define compat_sk_backlog_rcv sk_backlog_rcv -# define compat_sk_destruct sk_destruct -# define compat_sk_shutdown sk_shutdown -# define compat_sk_receive_queue sk_receive_queue -# define compat_sk_sleep sk_sleep -# define compat_sk_err sk_err -# define compat_sk_state_change sk_state_change -# define compat_sk_data_ready sk_data_ready -# define compat_sk_write_space sk_write_space -# define compat_sk_error_report sk_error_report -# define compat_sk_type sk_type -# define compat_sk_refcnt sk_refcnt -# define compat_sk_state sk_state -# define compat_sk_error_report sk_error_report -# define compat_sk_socket sk_socket -# define compat_sk_ack_backlog sk_ack_backlog -# define compat_sk_max_ack_backlog sk_max_ack_backlog -# define compat_sk_user_data sk_user_data -# define compat_sk_rcvtimeo sk_rcvtimeo -#endif - #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) static inline wait_queue_head_t *sk_sleep(struct sock *sk) { - return sk->compat_sk_sleep; + return sk->sk_sleep; } #endif -/* - * Prior to 2.5.65, struct sock contained individual fields for certain - * socket flags including SOCK_DONE. Between 2.5.65 and 2.5.71 these were - * replaced with a bitmask but the generic bit test functions were used. - * In 2.5.71, these were replaced with socket specific functions. - */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 71) -# define compat_sock_test_done(sk) sock_flag(sk, SOCK_DONE) -# define compat_sock_set_done(sk) sock_set_flag(sk, SOCK_DONE) -# define compat_sock_reset_done(sk) sock_reset_flag(sk, SOCK_DONE) -#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 65) -# define compat_sock_test_done(sk) test_bit(SOCK_DONE, &(sk)->flags) -# define compat_sock_set_done(sk) __set_bit(SOCK_DONE, &(sk)->flags) -# define compat_sock_reset_done(sk) __clear_bit(SOCK_DONE, &(sk)->flags) -#else -# define compat_sock_test_done(sk) (sk)->done -# define compat_sock_set_done(sk) ((sk)->done = 1) -# define compat_sock_reset_done(sk) ((sk)->done = 0) -#endif - /* * Prior to 2.6.24, there was no sock network namespace member. In 2.6.26, it @@ -107,73 +41,31 @@ # define compat_sock_net(sk) sk->sk_net #endif -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 42) -# define compat_sock_owned_by_user(sk) ((sk)->lock.users != 0) -#else -# define compat_sock_owned_by_user(sk) sock_owned_by_user(sk) -#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16) -/* - * Up until 2.4.21 for the 2.4 series and 2.5.60 for the 2.5 series, - * sk_filter() calls were protected with CONFIG_FILTER. Wrapping our compat - * definition in a similar check allows us to build on those kernels. - * - */ -#ifdef CONFIG_FILTER -/* - * Unfortunately backports for certain kernels require the use of an autoconf - * program to check the interface for sk_filter(). - */ -# ifndef VMW_HAVE_NEW_SKFILTER -/* - * Up until 2.4.21 for the 2.4 series and 2.5.60 for the 2.5 series, - * callers to sk->filter were responsible for ensuring that the filter - * was not NULL. - * Additionally, the new version of sk_filter returns 0 or -EPERM on error - * while the old function returned 0 or 1. Return -EPERM here as well to - * be consistent. - */ -# define compat_sk_filter(sk, skb, needlock) \ - ({ \ - int rc = 0; \ - \ - if ((sk)->filter) { \ - rc = sk_filter(skb, (sk)->filter); \ - if (rc) { \ - rc = -EPERM; \ - } \ - } \ - \ - rc; \ - }) -# else -# define compat_sk_filter(sk, skb, needlock) sk_filter(sk, skb, needlock) -# endif -#else -# define compat_sk_filter(sk, skb, needlock) 0 +#ifndef CONFIG_FILTER +# define sk_filter(sk, skb, needlock) 0 #endif -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16) /* Taken from 2.6.16's sock.h and modified for macro. */ # define compat_sk_receive_skb(sk, skb, nested) \ ({ \ int rc = NET_RX_SUCCESS; \ \ - if (compat_sk_filter(sk, skb, 0)) { \ + if (sk_filter(sk, skb, 0)) { \ kfree_skb(skb); \ - sock_put(sk); \ } else { \ skb->dev = NULL; \ bh_lock_sock(sk); \ - if (!compat_sock_owned_by_user(sk)) { \ - rc = (sk)->compat_sk_backlog_rcv(sk, skb); \ + if (!sock_owned_by_user(sk)) { \ + rc = (sk)->sk_backlog_rcv(sk, skb); \ } else { \ sk_add_backlog(sk, skb); \ } \ bh_unlock_sock(sk); \ - sock_put(sk); \ } \ \ + sock_put(sk); \ rc; \ }) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) @@ -182,33 +74,4 @@ # define compat_sk_receive_skb(sk, skb, nested) sk_receive_skb(sk, skb, nested) #endif -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 72) -/* - * Before 2.5.72, the helper socket functions for hlist management did not - * exist, so we use the sklist_ functions instead. These are not ideal since - * they grab a system-wide sklist lock despite not needing it since we provide - * our own list. - */ -#define compat_sk_next next /* for when we find out it became sk_next */ -# define compat_sklist_table struct sock * -/* This isn't really used in the iterator, but we need something. */ -# define compat_sklist_table_entry struct sock -# define compat_sk_for_each(sk, node, list) \ - for (sk = *(list), node = NULL; sk != NULL; sk = (sk)->compat_sk_next) -# define compat_sk_add_node(sk, list) sklist_insert_socket(list, sk) -# define compat_sk_del_node_init(sk, list) sklist_remove_socket(list, sk) -#else -# define compat_sklist_table struct hlist_head -# define compat_sklist_table_entry struct hlist_node -# define compat_sk_for_each(sk, node, list) sk_for_each(sk, node, list) -# define compat_sk_add_node(sk, list) sk_add_node(sk, list) -# define compat_sk_del_node_init(sk, list) sk_del_node_init(sk) -#endif - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 6) -# define compat_sock_create_kern sock_create -#else -# define compat_sock_create_kern sock_create_kern -#endif - #endif /* __COMPAT_SOCK_H__ */ --- source-7.1.5/vmnet-only/compat_spinlock.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_spinlock.h 1969-12-31 19:00:00.000000000 -0500 @@ -1,68 +0,0 @@ -/********************************************************* - * Copyright (C) 2005 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -#ifndef __COMPAT_SPINLOCK_H__ -# define __COMPAT_SPINLOCK_H__ - - -/* - * The spin_lock() API appeared in 2.1.25 in asm/smp_lock.h - * It moved in 2.1.30 to asm/spinlock.h - * It moved again in 2.3.18 to linux/spinlock.h - * - * --hpreg - */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 18) -# include -#else -# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 30) -# include -# else -typedef struct {} spinlock_t; -# define spin_lock_init(lock) -# define spin_lock(lock) -# define spin_unlock(lock) -# define spin_lock_irqsave(lock, flags) do { \ - save_flags(flags); \ - cli(); \ - spin_lock(lock); \ - } while (0) -# define spin_unlock_irqrestore(lock, flags) do { \ - spin_unlock(lock); \ - restore_flags(flags); \ - } while (0) -# endif -#endif - - -/* - * Preempt support was added during 2.5.x development cycle, and later - * it was backported to 2.4.x. In 2.4.x backport these definitions - * live in linux/spinlock.h, that's why we put them here (in 2.6.x they - * are defined in linux/preempt.h which is included by linux/spinlock.h). - */ -#ifdef CONFIG_PREEMPT -#define compat_preempt_disable() preempt_disable() -#define compat_preempt_enable() preempt_enable() -#else -#define compat_preempt_disable() do { } while (0) -#define compat_preempt_enable() do { } while (0) -#endif - - -#endif /* __COMPAT_SPINLOCK_H__ */ --- source-7.1.5/vmnet-only/compat_uaccess.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_uaccess.h 1969-12-31 19:00:00.000000000 -0500 @@ -1,79 +0,0 @@ -/********************************************************* - * Copyright (C) 2002 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -#ifndef __COMPAT_UACCESS_H__ -# define __COMPAT_UACCESS_H__ - - -/* User space access functions moved in 2.1.7 to asm/uaccess.h --hpreg */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 7) -# include -#else -# include -#endif - - -/* get_user() API modified in 2.1.4 to take 2 arguments --hpreg */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 4) -# define compat_get_user get_user -#else -/* - * We assign 0 to the variable in case of failure to prevent "`_var' might be - * used uninitialized in this function" compiler warnings. I think it is OK, - * because the hardware-based version in newer kernels probably has the same - * semantics and does not guarantee that the value of _var will not be - * modified, should the access fail --hpreg - */ -# define compat_get_user(_var, _uvAddr) ({ \ - int _status; \ - \ - _status = verify_area(VERIFY_READ, _uvAddr, sizeof(*(_uvAddr))); \ - if (_status == 0) { \ - (_var) = get_user(_uvAddr); \ - } else { \ - (_var) = 0; \ - } \ - _status; \ -}) -#endif - - -/* - * The copy_from_user() API appeared in 2.1.4 - * - * The emulation is not perfect here, but it is conservative: on failure, we - * always return the total size, instead of the potentially smaller faulty - * size --hpreg - * - * Since 2.5.55 copy_from_user() is no longer macro. - */ -#if !defined(copy_from_user) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0) -# define copy_from_user(_to, _from, _size) ( \ - verify_area(VERIFY_READ, _from, _size) \ - ? (_size) \ - : (memcpy_fromfs(_to, _from, _size), 0) \ -) -# define copy_to_user(_to, _from, _size) ( \ - verify_area(VERIFY_WRITE, _to, _size) \ - ? (_size) \ - : (memcpy_tofs(_to, _from, _size), 0) \ -) -#endif - - -#endif /* __COMPAT_UACCESS_H__ */ --- source-7.1.5/vmnet-only/compat_version.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_version.h 2011-11-14 02:16:55.000000000 -0500 @@ -22,7 +22,6 @@ #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMCORE -#define INCLUDE_ALLOW_VMNIXMOD #define INCLUDE_ALLOW_DISTRIBUTE #include "includeCheck.h" @@ -34,9 +33,8 @@ #include -/* Appeared in 2.1.90 --hpreg */ #ifndef KERNEL_VERSION -# define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +# error KERNEL_VERSION macro is not defined, environment is busted #endif @@ -102,11 +100,6 @@ # define KERNEL_2_4_8 #endif -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22) -/* New vmap() */ -# define KERNEL_2_4_22 -#endif - #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 2) /* New kdev_t, major()/minor() API --hpreg */ # define KERNEL_2_5_2 --- source-7.1.5/vmnet-only/compat_wait.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/compat_wait.h 1969-12-31 19:00:00.000000000 -0500 @@ -1,234 +0,0 @@ -/********************************************************* - * Copyright (C) 2002 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -#ifndef __COMPAT_WAIT_H__ -# define __COMPAT_WAIT_H__ - - -#include -#include -#include - -#include "compat_file.h" - - -/* - * The DECLARE_WAITQUEUE() API appeared in 2.3.1 - * It was back ported in 2.2.18 - * - * --hpreg - */ - -#ifndef DECLARE_WAITQUEUE - -typedef struct wait_queue *wait_queue_head_t; -# define init_waitqueue_head(_headPtr) *(_headPtr) = NULL -# define DECLARE_WAITQUEUE(_var, _task) \ - struct wait_queue _var = {_task, NULL, } - -typedef struct wait_queue wait_queue_t; -# define init_waitqueue_entry(_wait, _task) ((_wait)->task = (_task)) - -#endif - -/* - * The 'struct poll_wqueues' appeared in 2.5.48, when global - * /dev/epoll interface was added. It was backported to the - * 2.4.20-wolk4.0s. - */ - -#ifdef VMW_HAVE_EPOLL // { -#define compat_poll_wqueues struct poll_wqueues -#else // } { -#define compat_poll_wqueues poll_table -#endif // } - -#ifdef VMW_HAVE_EPOLL // { - -/* If prototype does not match, build will abort here */ -extern void poll_initwait(compat_poll_wqueues *); - -#define compat_poll_initwait(wait, table) ( \ - poll_initwait((table)), \ - (wait) = &(table)->pt \ -) - -#define compat_poll_freewait(wait, table) ( \ - poll_freewait((table)) \ -) - -#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0) // { - -/* If prototype does not match, build will abort here */ -extern void poll_initwait(compat_poll_wqueues *); - -#define compat_poll_initwait(wait, table) ( \ - (wait) = (table), \ - poll_initwait(wait) \ -) - -#define compat_poll_freewait(wait, table) ( \ - poll_freewait((table)) \ -) - -#else // } { - -#define compat_poll_initwait(wait, table) ( \ - (wait) = (table), /* confuse compiler */ \ - (wait) = (poll_table *) __get_free_page(GFP_KERNEL), \ - (wait)->nr = 0, \ - (wait)->entry = (struct poll_table_entry *)((wait) + 1), \ - (wait)->next = NULL \ -) - -static inline void -poll_freewait(poll_table *wait) -{ - while (wait) { - struct poll_table_entry * entry; - poll_table *old; - - entry = wait->entry + wait->nr; - while (wait->nr > 0) { - wait->nr--; - entry--; - remove_wait_queue(entry->wait_address, &entry->wait); - compat_fput(entry->filp); - } - old = wait; - wait = wait->next; - free_page((unsigned long) old); - } -} - -#define compat_poll_freewait(wait, table) ( \ - poll_freewait((wait)) \ -) - -#endif // } - -/* - * The wait_event_interruptible_timeout() interface is not - * defined in pre-2.6 kernels. - */ -#ifndef wait_event_interruptible_timeout -#define __wait_event_interruptible_timeout(wq, condition, ret) \ -do { \ - wait_queue_t __wait; \ - init_waitqueue_entry(&__wait, current); \ - \ - add_wait_queue(&wq, &__wait); \ - for (;;) { \ - set_current_state(TASK_INTERRUPTIBLE); \ - if (condition) \ - break; \ - if (!signal_pending(current)) { \ - ret = schedule_timeout(ret); \ - if (!ret) \ - break; \ - continue; \ - } \ - ret = -ERESTARTSYS; \ - break; \ - } \ - set_current_state(TASK_RUNNING); \ - remove_wait_queue(&wq, &__wait); \ -} while (0) - -#define wait_event_interruptible_timeout(wq, condition, timeout) \ -({ \ - long __ret = timeout; \ - if (!(condition)) \ - __wait_event_interruptible_timeout(wq, condition, __ret); \ - __ret; \ -}) -#endif - -/* - * The wait_event_timeout() interface is not - * defined in pre-2.6 kernels. - */ -#ifndef wait_event_timeout -#define __wait_event_timeout(wq, condition, ret) \ -do { \ - wait_queue_t __wait; \ - init_waitqueue_entry(&__wait, current); \ - \ - add_wait_queue(&wq, &__wait); \ - for (;;) { \ - set_current_state(TASK_UNINTERRUPTIBLE); \ - if (condition) \ - break; \ - ret = schedule_timeout(ret); \ - if (!ret) \ - break; \ - } \ - set_current_state(TASK_RUNNING); \ - remove_wait_queue(&wq, &__wait); \ -} while (0) - -#define wait_event_timeout(wq, condition, timeout) \ -({ \ - long __ret = timeout; \ - if (!(condition)) \ - __wait_event_timeout(wq, condition, __ret); \ - __ret; \ -}) -#endif - -/* - * DEFINE_WAIT() and friends were added in 2.5.39 and backported to 2.4.28. - * - * Unfortunately it is not true. While some distros may have done it the - * change has never made it into vanilla 2.4 kernel. Instead of testing - * particular kernel versions let's just test for presence of DEFINE_WAIT - * when figuring out whether we need to provide replacement implementation - * or simply alias existing one. - */ - -#ifndef DEFINE_WAIT - -# define COMPAT_DEFINE_WAIT(_wait) \ - DECLARE_WAITQUEUE(_wait, current) -# define compat_init_prepare_to_wait(_sleep, _wait, _state) \ - do { \ - __set_current_state(_state); \ - add_wait_queue(_sleep, _wait); \ - } while (0) -# define compat_cont_prepare_to_wait(_sleep, _wait, _state) \ - set_current_state(_state) -# define compat_finish_wait(_sleep, _wait, _state) \ - do { \ - __set_current_state(_state); \ - remove_wait_queue(_sleep, _wait); \ - } while (0) - -#else - -# define COMPAT_DEFINE_WAIT(_wait) \ - DEFINE_WAIT(_wait) -# define compat_init_prepare_to_wait(_sleep, _wait, _state) \ - prepare_to_wait(_sleep, _wait, _state) -# define compat_cont_prepare_to_wait(_sleep, _wait, _state) \ - prepare_to_wait(_sleep, _wait, _state) -# define compat_finish_wait(_sleep, _wait, _state) \ - finish_wait(_sleep, _wait) - -#endif /* #ifndef DEFINE_WAIT */ - -#endif /* __COMPAT_WAIT_H__ */ --- source-7.1.5/vmnet-only/driver-config.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/driver-config.h 2011-11-14 02:16:55.000000000 -0500 @@ -29,7 +29,6 @@ #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_MODULE -#define INCLUDE_ALLOW_VMNIXMOD #include "includeCheck.h" #include "compat_version.h" --- source-7.1.5/vmnet-only/driver.c 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/driver.c 2011-11-21 10:17:57.000000000 -0500 @@ -28,7 +28,7 @@ #include #include -#include +#include #include #include @@ -37,6 +37,8 @@ #include #include #include "compat_sock.h" +#include +#include #define __KERNEL_SYSCALLS__ #include @@ -50,10 +52,6 @@ #include "vnetInt.h" #include "vnetFilter.h" -#include "compat_uaccess.h" -#include "compat_kdev_t.h" -#include "compat_sched.h" -#include "compat_mutex.h" #include "vmnetInt.h" /* @@ -105,7 +103,7 @@ * not have vnetStructureMutex already acquired, * it is most certainly a bug. */ -static rwlock_t vnetPeerLock = RW_LOCK_UNLOCKED; +static DEFINE_RWLOCK(vnetPeerLock); /* * All concurrent changes to the network structure are @@ -114,7 +112,13 @@ * For change to peer field you must own both * vnetStructureMutex and vnetPeerLock for write. */ -compat_define_mutex(vnetStructureMutex); +DEFINE_MUTEX(vnetStructureMutex); + +/* + * The following method ensures that only one thread uses iotcl code + * in the absence of BKL on newer kernels. + */ +static DEFINE_MUTEX(vnetIoctlMutex); #if defined(VM_X86_64) && !defined(HAVE_COMPAT_IOCTL) /* @@ -126,7 +130,7 @@ */ static const unsigned int ioctl32_cmds[] = { SIOCGBRSTATUS, SIOCSPEER, SIOCSPEER2, SIOCSBIND, SIOCGETAPIVERSION2, - SIOCSFILTERRULES, SIOCSUSERLISTENER, SIOCSPEER3, 0, + SIOCSFILTERRULES, SIOCSUSERLISTENER, SIOCSMCASTFILTER, SIOCSPEER3, 0, }; #endif @@ -137,13 +141,11 @@ static VNetPort *vnetAllPorts = NULL; -#ifdef VMW_HAVE_SK_ALLOC_WITH_PROTO struct proto vmnet_proto = { .name = "VMNET", .owner = THIS_MODULE, .obj_size = sizeof(struct sock), }; -#endif /* * Device driver interface. @@ -159,7 +161,7 @@ loff_t *ppos); static int VNetFileOpIoctl(struct inode *inode, struct file *filp, unsigned int iocmd, unsigned long ioarg); -#if defined(HAVE_UNLOCKED_IOCTL) || defined(HAVE_COPAT_IOCTL) +#if defined(HAVE_UNLOCKED_IOCTL) || defined(HAVE_COMPAT_IOCTL) static long VNetFileOpUnlockedIoctl(struct file * filp, unsigned int iocmd, unsigned long ioarg); #endif @@ -264,11 +266,12 @@ struct file * filp) // IN: { int ret = -ENOTTY; - lock_kernel(); + + compat_mutex_lock(&vnetMutex); if (filp && filp->f_op && filp->f_op->ioctl == VNetFileOpIoctl) { ret = VNetFileOpIoctl(filp->f_dentry->d_inode, filp, iocmd, ioarg); } - unlock_kernel(); + compat_mutex_unlock(&vnetMutex); return ret; } @@ -589,7 +592,7 @@ * Sanity check the hub number. */ - hubNum = minor(inode->i_rdev); + hubNum = MINOR(inode->i_rdev); if (hubNum < 0 || hubNum >= VNET_NUM_VNETS) { return -ENODEV; } @@ -613,17 +616,17 @@ return -EBUSY; } - compat_mutex_lock(&vnetStructureMutex); + mutex_lock(&vnetStructureMutex); retval = VNetConnect(&port->jack, hubJack); if (retval) { - compat_mutex_unlock(&vnetStructureMutex); + mutex_unlock(&vnetStructureMutex); VNetFree(&port->jack); VNetFree(hubJack); return retval; } VNetAddPortToList(port); - compat_mutex_unlock(&vnetStructureMutex); + mutex_unlock(&vnetStructureMutex); /* * Store away jack in file pointer private field for later use. @@ -666,10 +669,10 @@ return -EBADF; } - compat_mutex_lock(&vnetStructureMutex); + mutex_lock(&vnetStructureMutex); peer = VNetDisconnect(&port->jack); VNetRemovePortFromList(port); - compat_mutex_unlock(&vnetStructureMutex); + mutex_unlock(&vnetStructureMutex); VNetFree(&port->jack); VNetFree(peer); @@ -884,13 +887,18 @@ if (!capable(CAP_NET_RAW)) { return -EACCES; } + + mutex_lock(&vnetIoctlMutex); retval = VNetBridge_Create(bridgeParams.name, bridgeParams.flags, port->jack.peer, &new); + if (!retval) { + retval = VNetSwitchToDifferentPeer(&port->jack, &new->jack, TRUE, + filp, port, new); + } + mutex_unlock(&vnetIoctlMutex); - return retval ? retval : VNetSwitchToDifferentPeer(&port->jack, - &new->jack, TRUE, - filp, port, new); break; + case SIOCSUSERLISTENER: { VNet_SetUserListener param; @@ -905,74 +913,97 @@ return -EINVAL; } + mutex_lock(&vnetIoctlMutex); /* create user listener */ retval = VNetUserListener_Create(param.classMask, port->jack.peer, &new); - if (retval != 0) { - return retval; + if (retval == 0) { + /* replace current port with user listener */ + retval = VNetSwitchToDifferentPeer(&port->jack, &new->jack, TRUE, + filp, port, new); } - - /* replace current port with user listener */ - retval = VNetSwitchToDifferentPeer(&port->jack, &new->jack, TRUE, - filp, port, new); + mutex_unlock(&vnetIoctlMutex); } break; + case SIOCPORT: + mutex_lock(&vnetIoctlMutex); retval = VNetUserIf_Create(&new); - - return retval ? retval : VNetSwitchToDifferentPeer(&port->jack, &new->jack, - TRUE, filp, port, new); + if (retval == 0) { + retval = VNetSwitchToDifferentPeer(&port->jack, &new->jack, + TRUE, filp, port, new); + } + mutex_unlock(&vnetIoctlMutex); break; + case SIOCNETIF: if (copy_from_user(name, (void *)ioarg, 8)) { - return -EFAULT; + return -EFAULT; } name[8] = '\0'; /* allow 8-char unterminated string */ - retval = VNetNetIf_Create(name, &new, minor(inode->i_rdev)); - - return retval ? retval : VNetSwitchToDifferentPeer(&port->jack, &new->jack, - TRUE, filp, port, new); + mutex_lock(&vnetIoctlMutex); + retval = VNetNetIf_Create(name, &new, MINOR(inode->i_rdev)); + if (retval == 0) { + retval = VNetSwitchToDifferentPeer(&port->jack, &new->jack, + TRUE, filp, port, new); + } + mutex_unlock(&vnetIoctlMutex); break; case SIOCSBIND: if (copy_from_user(&newNetwork, (void *)ioarg, sizeof newNetwork)) { - return -EFAULT; + return -EFAULT; } + if (newNetwork.version != VNET_BIND_VERSION) { - LOG(1, (KERN_NOTICE "/dev/vmnet: bad bind version: %u %u\n", - newNetwork.version, VNET_BIND_VERSION)); - return -EINVAL; + LOG(1, (KERN_NOTICE "/dev/vmnet: bad bind version: %u %u\n", + newNetwork.version, VNET_BIND_VERSION)); + return -EINVAL; } + + mutex_lock(&vnetIoctlMutex); + switch (newNetwork.bindType) { case VNET_BIND_TO_VNET: - if (newNetwork.number < 0 || newNetwork.number >= VNET_NUM_VNETS) { - LOG(1, (KERN_NOTICE "/dev/vmnet: invalid bind to vnet %d\n", - newNetwork.number)); - return -EINVAL; - } - hubJack = VNetHub_AllocVnet(newNetwork.number); - break; + if (newNetwork.number < 0 || newNetwork.number >= VNET_NUM_VNETS) { + LOG(1, (KERN_NOTICE "/dev/vmnet: invalid bind to vnet %d\n", + newNetwork.number)); + retval = -EINVAL; + } else { + hubJack = VNetHub_AllocVnet(newNetwork.number); + retval = 0; + } + break; + case VNET_BIND_TO_PVN: - { - uint8 id[VNET_PVN_ID_LEN] = {0}; + { + uint8 id[VNET_PVN_ID_LEN] = {0}; + + if (memcmp(id, newNetwork.id, + min(sizeof id, sizeof newNetwork.id)) == 0) { + LOG(0, (KERN_NOTICE "/dev/vmnet: invalid bind to pvn\n")); + retval = -EINVAL; + } else { + memcpy(id, newNetwork.id, + min(sizeof id, sizeof newNetwork.id)); + hubJack = VNetHub_AllocPvn(id); + retval = 0; + } + } + break; - if (memcmp(id, newNetwork.id, sizeof id < sizeof newNetwork.id ? - sizeof id : sizeof newNetwork.id) == 0) { - LOG(0, (KERN_NOTICE "/dev/vmnet: invalid bind to pvn\n")); - return -EINVAL; - } - memcpy(id, newNetwork.id, sizeof id < sizeof newNetwork.id ? - sizeof id : sizeof newNetwork.id); - hubJack = VNetHub_AllocPvn(id); - } - break; default: - LOG(1, (KERN_NOTICE "/dev/vmnet: bad bind type: %u\n", - newNetwork.bindType)); - return -EINVAL; + LOG(1, (KERN_NOTICE "/dev/vmnet: bad bind type: %u\n", + newNetwork.bindType)); + retval = -EINVAL; } - return VNetSwitchToDifferentPeer(&port->jack, hubJack, FALSE, NULL, NULL, NULL); + if (retval == 0) { + retval = VNetSwitchToDifferentPeer(&port->jack, hubJack, + FALSE, NULL, NULL, NULL); + } + + mutex_unlock(&vnetIoctlMutex); break; case SIOCSFILTERRULES: @@ -1002,30 +1033,36 @@ /* * Dispatch the sub-command. */ - return VNetFilter_HandleUserCall(&ruleHeader, ioarg); + mutex_lock(&vnetIoctlMutex); + retval = VNetFilter_HandleUserCall(&ruleHeader, ioarg); + mutex_unlock(&vnetIoctlMutex); + break; #else LOG(0, (KERN_NOTICE "/dev/vmnet: kernel doesn't support netfilter\n")); return -EINVAL; - break; #endif case SIOCGBRSTATUS: { uint32 flags; - read_lock(&vnetPeerLock); + read_lock(&vnetPeerLock); flags = VNetIsBridged(&port->jack); - read_unlock(&vnetPeerLock); + read_unlock(&vnetPeerLock); - if (copy_to_user((void *)ioarg, &flags, sizeof flags)) { - return -EFAULT; - } + retval = put_user(flags, (uint32 *)ioarg) ? -EFAULT : 0; } break; case SIOCGIFADDR: - if (copy_to_user((void *)ioarg, port->paddr, ETH_ALEN)) { - return -EFAULT; + { + uint8 paddr[ETH_ALEN]; + + mutex_lock(&vnetIoctlMutex); + memcpy(paddr, port->paddr, sizeof paddr); + mutex_unlock(&vnetIoctlMutex); + + retval = copy_to_user((void *)ioarg, paddr, ETH_ALEN) ? -EFAULT : 0; } break; @@ -1033,27 +1070,50 @@ return -EFAULT; case SIOCSLADRF: - if (copy_from_user(port->ladrf, (void *)ioarg, sizeof port->ladrf)) { - return -EFAULT; + { + uint8 ladrf[VNET_LADRF_LEN]; + + if (copy_from_user(ladrf, (void *)ioarg, sizeof ladrf)) { + return -EFAULT; + } + + mutex_lock(&vnetIoctlMutex); + memcpy(port->ladrf, ladrf, sizeof port->ladrf); + port->exactFilterLen = 0; + mutex_unlock(&vnetIoctlMutex); + + retval = 0; } break; case SIOCSIFFLAGS: - if (copy_from_user(&port->flags, (void *)ioarg, sizeof port->flags)) { - return -EFAULT; - } - port->flags = ((port->flags - & (IFF_UP|IFF_BROADCAST|IFF_DEBUG - |IFF_PROMISC|IFF_MULTICAST|IFF_ALLMULTI)) - | IFF_RUNNING); - if (port->fileOpIoctl) { - - /* - * Userif ports have some postprocessing when the IFF_UP flags is - * changed. - */ - port->fileOpIoctl(port, filp, iocmd, ioarg); + { + uint32 flags; + + if (get_user(flags, (uint32 *)ioarg)) { + return -EFAULT; + } + + /* Sanitize */ + flags &= IFF_UP | IFF_BROADCAST | IFF_DEBUG | IFF_PROMISC | + IFF_MULTICAST | IFF_ALLMULTI; + flags |= IFF_RUNNING; + + mutex_lock(&vnetIoctlMutex); + port->flags = flags; + if (port->fileOpIoctl) { + + /* + * Userif ports have some postprocessing when the IFF_UP flags is + * changed. + */ + port->fileOpIoctl(port, filp, iocmd, ioarg); + } + mutex_unlock(&vnetIoctlMutex); + + retval = 0; } + break; case SIOCSETMACADDR: @@ -1064,44 +1124,81 @@ switch (macAddr.version) { case 1: if (macAddr.flags & VNET_SETMACADDRF_UNIQUE) { - if (VMX86_IS_VIRT_ADAPTER_MAC(macAddr.addr)) { - return -EBUSY; - } - return VNetSetMACUnique(port, macAddr.addr); + if (VMX86_IS_VIRT_ADAPTER_MAC(macAddr.addr)) { + return -EBUSY; + } + mutex_lock(&vnetIoctlMutex); + retval = VNetSetMACUnique(port, macAddr.addr); + mutex_unlock(&vnetIoctlMutex); + + } else { + mutex_lock(&vnetIoctlMutex); + memcpy(port->paddr, macAddr.addr, ETH_ALEN); + mutex_unlock(&vnetIoctlMutex); + + retval = 0; } - memcpy(port->paddr, macAddr.addr, ETH_ALEN); + break; + default: - return -EINVAL; + retval = -EINVAL; break; } break; + + case SIOCSMCASTFILTER: + { + VNetMcastFilter vnetMcastFilter; + if (copy_from_user(&vnetMcastFilter, (void *)ioarg, + sizeof vnetMcastFilter)) { + return -EFAULT; + } + if (vnetMcastFilter.exactFilterLen > VNET_MAX_EXACT_FILTER_LEN) { + return -EFAULT; + } + + mutex_lock(&vnetIoctlMutex); + + memcpy(port->ladrf, vnetMcastFilter.ladrf, sizeof port->ladrf); + memcpy(port->exactFilter, vnetMcastFilter.exactFilter, + vnetMcastFilter.exactFilterLen * ETHER_ADDR_LEN); + port->exactFilterLen = vnetMcastFilter.exactFilterLen; + + mutex_unlock(&vnetIoctlMutex); + + retval = 0; + break; + } + case SIOCGETAPIVERSION2: { - uint32 verFromUser; - if (copy_from_user(&verFromUser, (void *)ioarg, sizeof verFromUser)) { - return -EFAULT; - } - /* Should we require verFromUser == VNET_API_VERSION? */ + uint32 verFromUser; + + if (get_user(verFromUser, (uint32 *)ioarg)) { + return -EFAULT; + } + /* Should we require verFromUser == VNET_API_VERSION? */ } /* fall thru */ + case SIOCGETAPIVERSION: - { - uint32 verToUser = VNET_API_VERSION; - if (copy_to_user((void*)ioarg, &verToUser, sizeof verToUser)) { - return -EFAULT; - } - } + retval = put_user(VNET_API_VERSION, (uint32 *)ioarg) ? -EFAULT : 0; break; + default: if (!port->fileOpIoctl) { return -ENOIOCTLCMD; } - return port->fileOpIoctl(port, filp, iocmd, ioarg); + + mutex_lock(&vnetIoctlMutex); + retval = port->fileOpIoctl(port, filp, iocmd, ioarg); + mutex_unlock(&vnetIoctlMutex); + break; } - return 0; + return retval; } @@ -1134,9 +1231,9 @@ if (filp && filp->f_dentry) { inode = filp->f_dentry->d_inode; } - lock_kernel(); + //mutex_lock(&vnetMutex); err = VNetFileOpIoctl(inode, filp, iocmd, ioarg); - unlock_kernel(); + //mutex_unlock(&vnetMutex); return err; } #endif @@ -1196,7 +1293,7 @@ * old peer if a cycle is detected. */ - compat_mutex_lock(&vnetStructureMutex); + mutex_lock(&vnetStructureMutex); /* Disconnect from the old peer */ oldPeer = VNetDisconnect(jack); @@ -1211,7 +1308,7 @@ /* Connect failed, so reconnect back to old peer */ int retval2 = VNetConnect(jack, oldPeer); - compat_mutex_unlock(&vnetStructureMutex); + mutex_unlock(&vnetStructureMutex); /* Free the new peer */ VNetFree(newPeer); @@ -1233,7 +1330,7 @@ VNetRemovePortFromList(jackPort); } - compat_mutex_unlock(&vnetStructureMutex); + mutex_unlock(&vnetStructureMutex); /* Connected to new peer, so dealloc the old peer */ if (connectNewToPeerOfJack) { @@ -1275,8 +1372,10 @@ #define CRC_POLYNOMIAL_BE 0x04c11db7UL /* Ethernet CRC, big endian */ static INLINE_SINGLE_CALLER Bool -VNetMulticastFilter(const uint8 *destAddr, // IN: multicast MAC - const uint8 *ladrf) // IN: multicast filter +VNetMulticastFilter(const uint8 *destAddr, // IN: multicast MAC + const uint8 *exactFilter, // IN: exact mc filter + const uint32 exactFilterLen, // IN: length of exact mc filter + const uint8 *ladrf) // IN: logical addr filter { uint16 hashcode; int32 crc; @@ -1285,6 +1384,23 @@ int bit; int byte; + /* first check exact multicast filter */ + if (exactFilterLen) { + uint32 i; + for (i = 0; i < exactFilterLen; i++) { + if (MAC_EQ(destAddr, exactFilter + i * ETHER_ADDR_LEN)){ + return TRUE; + } + } + /* + * Do not need to further compute and check ladrf if no match + * in exact multicast filter, since only one of them is + * used at a time. + */ + return FALSE; + } + + crc = 0xffffffff; /* init CRC for each address */ for (byte = 0; byte < ETH_ALEN; byte++) { /* for each address byte */ /* process each address bit */ @@ -1326,10 +1442,12 @@ */ Bool -VNetPacketMatch(const uint8 *destAddr, // IN: destination MAC - const uint8 *ifAddr, // IN: MAC of interface - const uint8 *ladrf, // IN: multicast filter - uint32 flags) // IN: filter flags +VNetPacketMatch(const uint8 *destAddr, // IN: destination MAC + const uint8 *ifAddr, // IN: MAC of interface + const uint8 *exactFilter, // IN: exact mc filter + const uint32 exactFilterLen, // IN: size of exact mc filter + const uint8 *ladrf, // IN: multicast filter + uint32 flags) // IN: filter flags { /* * Return TRUE if promiscuous requested, or unicast destined @@ -1342,7 +1460,7 @@ ((flags & IFF_BROADCAST) && MAC_EQ(destAddr, broadcast)) || ((destAddr[0] & 0x1) && (flags & IFF_ALLMULTI || (flags & IFF_MULTICAST && - VNetMulticastFilter(destAddr, ladrf))))); + VNetMulticastFilter(destAddr, exactFilter, exactFilterLen, ladrf))))); } @@ -1633,15 +1751,15 @@ { VNetPort *p; - compat_mutex_lock(&vnetStructureMutex); + mutex_lock(&vnetStructureMutex); for (p = vnetAllPorts; p != NULL; p = p->next) { if (p != port && MAC_EQ(p->paddr, mac)) { - compat_mutex_unlock(&vnetStructureMutex); + mutex_unlock(&vnetStructureMutex); return -EBUSY; } } memcpy(port->paddr, mac, ETH_ALEN); - compat_mutex_unlock(&vnetStructureMutex); + mutex_unlock(&vnetStructureMutex); return 0; } --- source-7.1.5/vmnet-only/epoll.c 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/epoll.c 1969-12-31 19:00:00.000000000 -0500 @@ -1,36 +0,0 @@ -/********************************************************* - * Copyright (C) 2004 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -/* - * Detect whether we have 'struct poll_wqueues' - * 2.6.x kernels always had this struct. Stock 2.4.x kernels - * never had it, but some distros backported epoll patch. - */ - -#include "compat_version.h" -#include "compat_autoconf.h" - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) -#include - -void poll_test(void) { - struct poll_wqueues test; - - return poll_initwait(&test); -} -#endif --- source-7.1.5/vmnet-only/filter.c 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/filter.c 2011-12-11 19:07:31.000000000 -0500 @@ -24,8 +24,7 @@ #include #include #include "compat_skbuff.h" -#include "compat_mutex.h" -#include "compat_semaphore.h" +#include #include /* * All this makes sense only if NETFILTER support is configured in our kernel. @@ -41,6 +40,10 @@ #include "vnetInt.h" #include "vmnetInt.h" +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) +#include /* for THIS_MODULE */ +#endif + // VNet_FilterLogPacket.action for dropped packets #define VNET_FILTER_ACTION_DRP (1) #define VNET_FILTER_ACTION_DRP_SHORT (2) @@ -58,12 +61,12 @@ static struct nf_hook_ops vmnet_nf_ops[] = { { .hook = VNetFilterHookFn, - compat_nf_hook_owner + .owner = THIS_MODULE, .pf = PF_INET, .hooknum = VMW_NF_INET_LOCAL_IN, .priority = NF_IP_PRI_FILTER - 1, }, { .hook = VNetFilterHookFn, - compat_nf_hook_owner + .owner = THIS_MODULE, .pf = PF_INET, .hooknum = VMW_NF_INET_POST_ROUTING, .priority = NF_IP_PRI_FILTER - 1, } @@ -78,14 +81,14 @@ RuleSet *activeRule = NULL; /* actual rule set for filter callback to use */ /* locks to protect against concurrent accesses. */ -static compat_define_mutex(filterIoctlMutex); /* serialize ioctl()s from user space. */ +static DEFINE_MUTEX(filterIoctlMutex); /* serialize ioctl()s from user space. */ /* * user/netfilter hook concurrency lock. * This spinlock doesn't scale well if/when in the future the netfilter * callbacks can be concurrently executing on multiple threads on multiple * CPUs, so we should revisit locking for allowing for that in the future. */ -spinlock_t activeRuleLock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(activeRuleLock); /* * Logging. @@ -1084,7 +1087,7 @@ int retval = 0; /* Serialize all ioctl()s. */ - retval = compat_mutex_lock_interruptible(&filterIoctlMutex); + retval = mutex_lock_interruptible(&filterIoctlMutex); if (retval != 0) { return retval; } @@ -1439,7 +1442,7 @@ goto out_unlock; } out_unlock: - compat_mutex_unlock(&filterIoctlMutex); + mutex_unlock(&filterIoctlMutex); return retval; } --- source-7.1.5/vmnet-only/hub.c 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/hub.c 2011-11-14 02:16:55.000000000 -0500 @@ -74,14 +74,8 @@ static int VNetHubProcRead(char *page, char **start, off_t off, int count, int *eof, void *data); -static VNetHub *vnetHub = NULL; - -/* - * UP spin_lock_irqsave() doesn't actually use the lock variable, - * so we use __attribute__((unused)) to quiet the compiler. - */ - -static spinlock_t vnetHubLock __attribute__((unused)) = SPIN_LOCK_UNLOCKED; +static VNetHub *vnetHub; +static DEFINE_SPINLOCK(vnetHubLock); /* --- source-7.1.5/vmnet-only/monitorAction_exported.h 1969-12-31 19:00:00.000000000 -0500 +++ patched/vmnet-only/monitorAction_exported.h 2011-11-14 02:16:55.000000000 -0500 @@ -0,0 +1,139 @@ +/********************************************************* + * Copyright (C) 2010 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + *********************************************************/ + +#ifndef _MONITORACTION_EXPORTED_H_ +#define _MONITORACTION_EXPORTED_H_ + +#define INCLUDE_ALLOW_VMX +#define INCLUDE_ALLOW_VMCORE +#define INCLUDE_ALLOW_USERLEVEL +#define INCLUDE_ALLOW_MODULE +#define INCLUDE_ALLOW_VMMON +#define INCLUDE_ALLOW_VMKERNEL +#define INCLUDE_ALLOW_DISTRIBUTE +#include "includeCheck.h" + +#include "vm_assert.h" +#include "vm_atomic.h" +#include "vm_basic_types.h" + +/* + * Please bump the version number if your change will break the + * compatability to the drivers. + */ +#define ACTION_EXPORTED_VERSION 2 + +#define ACTION_WORD_SIZE (sizeof(uint64) * 8) +#define ACTION_NUM_WORDS (2) +#define ACTION_NUM_IDS (ACTION_NUM_WORDS * ACTION_WORD_SIZE) + +#define MONACTION_INVALID MAX_UINT32 + +typedef uint32 MonitorIdemAction; + +/* + * Representation of a set of actions. + */ +typedef struct MonitorActionSet { + volatile uint64 word[ACTION_NUM_WORDS]; +} MonitorActionSet; + +/* + * Summary of action and interrupt states. + */ +typedef struct MonitorActionIntr { + MonitorActionSet pendingSet; + volatile Bool action; + Bool intr; + Bool nmi; + Bool db; + uint32 _pad; +} MonitorActionIntr; + +/* + *------------------------------------------------------------------------ + * MonitorActionSet_AtomicInclude -- + * + * This function atomically adds an action to an action set. + * + * Results: + * TRUE if the action being added did not exist in the action set. + * FALSE otherwise. + * + * Side effects: + * The given action set will be updated. + *------------------------------------------------------------------------ + */ +static INLINE Bool +MonitorActionSet_AtomicInclude(MonitorActionSet *set, const uint32 actionID) +{ + Atomic_uint64 *atomicSet = + Atomic_VolatileToAtomic64(&set->word[actionID / ACTION_WORD_SIZE]); + uint64 mask = (uint64)1 << (actionID % ACTION_WORD_SIZE); + uint64 oldWord; + uint64 newWord; + + ASSERT_ON_COMPILE((ACTION_WORD_SIZE & (ACTION_WORD_SIZE - 1)) == 0); +#ifdef VMX86_DEBUG + /* If ASSERT is not desirable, do explicit check. Please see PR 567811. */ +#ifdef MODULE + if (UNLIKELY(actionID / ACTION_WORD_SIZE >= ACTION_NUM_WORDS)) { + return FALSE; + } +#else + ASSERT(actionID / ACTION_WORD_SIZE < ACTION_NUM_WORDS); +#endif // MODULE +#endif // VMX86_DEBUG + do { + oldWord = Atomic_Read64(atomicSet); + newWord = oldWord | mask; + } while (!Atomic_CMPXCHG64(atomicSet, &oldWord, &newWord)); + return (oldWord & mask) == 0; +} + + +/* + *---------------------------------------------------------------------------- + * MonitorAction_SetBits -- + * + * The core logic for posting an action. Update the set of pending + * actions of the target VCPU in the shared area to mark the action + * as present. Make sure the bit is set in the pendingSet first to + * avoid a race with the drain loop. + * + * Results: + * TRUE if the action being posted was not pending before. + * FALSE otherwise (other threads could have posted the same action). + * + * Side effects: + * None. + *---------------------------------------------------------------------------- + */ +static INLINE Bool +MonitorAction_SetBits(MonitorActionIntr *actionIntr, MonitorIdemAction actionID) +{ + /* Careful if optimizing this: see PR70016. */ + Bool newAction = + MonitorActionSet_AtomicInclude(&actionIntr->pendingSet, actionID); + actionIntr->action = TRUE; + /* Ensure that actIntr->action is visible before any IPI is sent. */ + Atomic_MFence(); + return newAction; +} + +#endif // _MONITORACTION_EXPORTED_H_ --- source-7.1.5/vmnet-only/net.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/net.h 2011-11-14 02:16:55.000000000 -0500 @@ -32,12 +32,18 @@ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE -#define INCLUDE_ALLOW_VMMEXT +#define INCLUDE_ALLOW_VMCORE + #include "includeCheck.h" #include "vm_device_version.h" +#ifdef VMCORE +#include "config.h" +#include "str.h" +#include "strutil.h" +#endif + #define ETHERNET_MTU 1518 -#define ETH_MIN_FRAME_LEN 60 #ifndef ETHER_ADDR_LEN #define ETHER_ADDR_LEN 6 /* length of MAC address */ @@ -71,6 +77,15 @@ #define MORPH_PORT_SIZE 4 +#ifdef VMCORE +typedef struct Net_AdapterCount { + uint8 vlance; + uint8 vmxnet2; + uint8 vmxnet3; + uint8 e1000; + uint8 e1000e; +} Net_AdapterCount; +#endif #ifdef USERLEVEL @@ -130,7 +145,61 @@ ladrf[hashcode >> 3] |= 1 << (hashcode & 0x07); } - #endif // USERLEVEL +#ifdef VMCORE +/* + *---------------------------------------------------------------------- + * + * Net_GetNumAdapters -- + * + * Returns the number of each type of network adapter configured in this + * VM. + * + * Results: + * None. + * + * Side effects: + * None. + * + *---------------------------------------------------------------------- + */ + +static INLINE void +Net_GetNumAdapters(Net_AdapterCount *counts) +{ + uint32 i; + + counts->vlance = 0; + counts->vmxnet2 = 0; + counts->vmxnet3 = 0; + counts->e1000 = 0; + counts->e1000e = 0; + + for (i = 0; i < MAX_ETHERNET_CARDS; i++) { + char* adapterStr; + + if (!Config_GetBool(FALSE, "ethernet%d.present", i)) { + continue; + } + adapterStr = Config_GetString("vlance", "ethernet%d.virtualDev", i); + if (Str_Strcasecmp(adapterStr, "vmxnet3") == 0) { + counts->vmxnet3++; + } else if (Str_Strcasecmp(adapterStr, "vlance") == 0) { + counts->vlance++; + } else if (Str_Strcasecmp(adapterStr, "vmxnet") == 0) { + counts->vmxnet2++; + } else if (Str_Strcasecmp(adapterStr, "e1000") == 0) { + counts->e1000++; + } else if (Str_Strcasecmp(adapterStr, "e1000e") == 0) { + counts->e1000e++; + } else { + LOG_ONCE(("%s: unknown adapter: %s\n", __FUNCTION__, adapterStr)); + } + free(adapterStr); + } +} + +#endif // VMCORE + #endif // VMWARE_DEVICES_NET_H --- source-7.1.5/vmnet-only/netif.c 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/netif.c 2011-12-11 19:05:00.000000000 -0500 @@ -43,6 +43,7 @@ #include "compat_netdevice.h" #include "vmnetInt.h" +#include typedef struct VNetNetIF { VNetPort port; @@ -131,7 +132,11 @@ .ndo_stop = VNetNetifClose, .ndo_get_stats = VNetNetifGetStats, .ndo_set_mac_address = VNetNetifSetMAC, +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0) .ndo_set_multicast_list = VNetNetifSetMulticast, +#else + .ndo_set_rx_mode = VNetNetifSetMulticast, +#endif /* * We cannot stuck... If someone will report problems under * low memory conditions or some such, we should enable it. @@ -188,13 +193,9 @@ static INLINE VNetNetIF * VNetNetIfNetDeviceToNetIf(struct net_device *dev) { -#ifdef HAVE_NETDEV_PRIV VNetNetIF** devPriv = netdev_priv(dev); return *devPriv; -#else - return dev->priv; -#endif } @@ -251,6 +252,7 @@ netIf->port.jack.cycleDetect = VNetNetIfCycleDetect; netIf->port.jack.portsChanged = NULL; netIf->port.jack.isBridged = NULL; + netIf->port.exactFilterLen = 0; /* * Make proc entry for this jack. @@ -278,6 +280,7 @@ memset(netIf->port.paddr, 0, sizeof netIf->port.paddr); memset(netIf->port.ladrf, 0, sizeof netIf->port.ladrf); + memset(netIf->port.exactFilter, 0, sizeof netIf->port.exactFilter); /* This will generate the reserved MAC address c0:00:?? where ?? == hubNum. */ VMX86_BUILD_MAC(netIf->port.paddr, hubNum); @@ -298,21 +301,12 @@ memcpy(netIf->devName, devName, sizeof netIf->devName); NULL_TERMINATE_STRING(netIf->devName); -#ifdef HAVE_NETDEV_PRIV dev = compat_alloc_netdev(sizeof(VNetNetIF *), netIf->devName, VNetNetIfSetup); if (!dev) { retval = -ENOMEM; goto out; } *(VNetNetIF**)netdev_priv(dev) = netIf; -#else - dev = compat_alloc_netdev(0, netIf->devName, VNetNetIfSetup); - if (!dev) { - retval = -ENOMEM; - goto out; - } - dev->priv = netIf; -#endif netIf->dev = dev; memcpy(dev->dev_addr, netIf->port.paddr, sizeof netIf->port.paddr); @@ -391,14 +385,16 @@ { VNetNetIF *netIf = (VNetNetIF*)this->private; uint8 *dest = SKB_2_DESTMAC(skb); - + if (!NETDEV_UP_AND_RUNNING(netIf->dev)) { goto drop_packet; } if (!VNetPacketMatch(dest, netIf->dev->dev_addr, - allMultiFilter, + NULL, + 0, + allMultiFilter, netIf->dev->flags)) { goto drop_packet; } --- source-7.1.5/vmnet-only/pgtbl.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/pgtbl.h 1969-12-31 19:00:00.000000000 -0500 @@ -1,385 +0,0 @@ -/********************************************************* - * Copyright (C) 2002 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -#ifndef __PGTBL_H__ -# define __PGTBL_H__ - - -#include "compat_highmem.h" -#include "compat_pgtable.h" -#include "compat_spinlock.h" -#include "compat_page.h" - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 11) -# define compat_active_mm mm -#else -# define compat_active_mm active_mm -#endif - - -/* - *----------------------------------------------------------------------------- - * - * PgtblPte2MPN -- - * - * Returns the page structure associated to a Page Table Entry. - * - * This function is not allowed to schedule() because it can be called while - * holding a spinlock --hpreg - * - * Results: - * INVALID_MPN on failure - * mpn on success - * - * Side effects: - * None - * - *----------------------------------------------------------------------------- - */ - -static INLINE MPN -PgtblPte2MPN(pte_t *pte) // IN -{ - if (pte_present(*pte) == 0) { - return INVALID_MPN; - } - return pte_pfn(*pte); -} - - -/* - *----------------------------------------------------------------------------- - * - * PgtblPte2Page -- - * - * Returns the page structure associated to a Page Table Entry. - * - * This function is not allowed to schedule() because it can be called while - * holding a spinlock --hpreg - * - * Results: - * The page structure if the page table entry points to a physical page - * NULL if the page table entry does not point to a physical page - * - * Side effects: - * None - * - *----------------------------------------------------------------------------- - */ - -static INLINE struct page * -PgtblPte2Page(pte_t *pte) // IN -{ - if (pte_present(*pte) == 0) { - return NULL; - } - - return compat_pte_page(*pte); -} - - -/* - *----------------------------------------------------------------------------- - * - * PgtblPGD2PTELocked -- - * - * Walks through the hardware page tables to try to find the pte - * associated to a virtual address. - * - * Results: - * pte. Caller must call pte_unmap if valid pte returned. - * - * Side effects: - * None - * - *----------------------------------------------------------------------------- - */ - -static INLINE pte_t * -PgtblPGD2PTELocked(compat_pgd_t *pgd, // IN: PGD to start with - VA addr) // IN: Address in the virtual address - // space of that process -{ - compat_pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - if (compat_pgd_present(*pgd) == 0) { - return NULL; - } - - pud = compat_pud_offset(pgd, addr); - if (compat_pud_present(*pud) == 0) { - return NULL; - } - - pmd = pmd_offset_map(pud, addr); - if (pmd_present(*pmd) == 0) { - pmd_unmap(pmd); - return NULL; - } - - pte = pte_offset_map(pmd, addr); - pmd_unmap(pmd); - return pte; -} - - -/* - *----------------------------------------------------------------------------- - * - * PgtblVa2PTELocked -- - * - * Walks through the hardware page tables to try to find the pte - * associated to a virtual address. - * - * Results: - * pte. Caller must call pte_unmap if valid pte returned. - * - * Side effects: - * None - * - *----------------------------------------------------------------------------- - */ - -static INLINE pte_t * -PgtblVa2PTELocked(struct mm_struct *mm, // IN: Mm structure of a process - VA addr) // IN: Address in the virtual address - // space of that process -{ - return PgtblPGD2PTELocked(compat_pgd_offset(mm, addr), addr); -} - - -/* - *----------------------------------------------------------------------------- - * - * PgtblVa2MPNLocked -- - * - * Retrieve MPN for a given va. - * - * Caller must call pte_unmap if valid pte returned. The mm->page_table_lock - * must be held, so this function is not allowed to schedule() --hpreg - * - * Results: - * INVALID_MPN on failure - * mpn on success - * - * Side effects: - * None - * - *----------------------------------------------------------------------------- - */ - -static INLINE MPN -PgtblVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a process - VA addr) // IN: Address in the virtual address -{ - pte_t *pte; - - pte = PgtblVa2PTELocked(mm, addr); - if (pte != NULL) { - MPN mpn = PgtblPte2MPN(pte); - pte_unmap(pte); - return mpn; - } - return INVALID_MPN; -} - - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) -/* - *----------------------------------------------------------------------------- - * - * PgtblKVa2MPNLocked -- - * - * Retrieve MPN for a given kernel va. - * - * Caller must call pte_unmap if valid pte returned. The mm->page_table_lock - * must be held, so this function is not allowed to schedule() --hpreg - * - * Results: - * INVALID_MPN on failure - * mpn on success - * - * Side effects: - * None - * - *----------------------------------------------------------------------------- - */ - -static INLINE MPN -PgtblKVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a caller - VA addr) // IN: Address in the virtual address -{ - pte_t *pte; - - pte = PgtblPGD2PTELocked(compat_pgd_offset_k(mm, addr), addr); - if (pte != NULL) { - MPN mpn = PgtblPte2MPN(pte); - pte_unmap(pte); - return mpn; - } - return INVALID_MPN; -} -#endif - - -/* - *----------------------------------------------------------------------------- - * - * PgtblVa2PageLocked -- - * - * Return the "page" struct for a given va. - * - * Results: - * struct page or NULL. The mm->page_table_lock must be held, so this - * function is not allowed to schedule() --hpreg - * - * Side effects: - * None - * - *----------------------------------------------------------------------------- - */ - -static INLINE struct page * -PgtblVa2PageLocked(struct mm_struct *mm, // IN: Mm structure of a process - VA addr) // IN: Address in the virtual address -{ - pte_t *pte; - - pte = PgtblVa2PTELocked(mm, addr); - if (pte != NULL) { - struct page *page = PgtblPte2Page(pte); - pte_unmap(pte); - return page; - } else { - return NULL; - } -} - - -/* - *----------------------------------------------------------------------------- - * - * PgtblVa2MPN -- - * - * Walks through the hardware page tables of the current process to try to - * find the page structure associated to a virtual address. - * - * Results: - * Same as PgtblVa2MPNLocked() - * - * Side effects: - * None - * - *----------------------------------------------------------------------------- - */ - -static INLINE int -PgtblVa2MPN(VA addr) // IN -{ - struct mm_struct *mm; - MPN mpn; - - /* current->mm is NULL for kernel threads, so use active_mm. */ - mm = current->compat_active_mm; - if (compat_get_page_table_lock(mm)) { - spin_lock(compat_get_page_table_lock(mm)); - } - mpn = PgtblVa2MPNLocked(mm, addr); - if (compat_get_page_table_lock(mm)) { - spin_unlock(compat_get_page_table_lock(mm)); - } - return mpn; -} - - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) -/* - *----------------------------------------------------------------------------- - * - * PgtblKVa2MPN -- - * - * Walks through the hardware page tables of the current process to try to - * find the page structure associated to a virtual address. - * - * Results: - * Same as PgtblVa2MPNLocked() - * - * Side effects: - * None - * - *----------------------------------------------------------------------------- - */ - -static INLINE int -PgtblKVa2MPN(VA addr) // IN -{ - struct mm_struct *mm; - MPN mpn; - - mm = current->compat_active_mm; - if (compat_get_page_table_lock(mm)) { - spin_lock(compat_get_page_table_lock(mm)); - } - mpn = PgtblKVa2MPNLocked(mm, addr); - if (compat_get_page_table_lock(mm)) { - spin_unlock(compat_get_page_table_lock(mm)); - } - return mpn; -} -#endif - - -/* - *----------------------------------------------------------------------------- - * - * PgtblVa2Page -- - * - * Walks through the hardware page tables of the current process to try to - * find the page structure associated to a virtual address. - * - * Results: - * Same as PgtblVa2PageLocked() - * - * Side effects: - * None - * - *----------------------------------------------------------------------------- - */ - -static INLINE struct page * -PgtblVa2Page(VA addr) // IN -{ - struct mm_struct *mm; - struct page *page; - - mm = current->compat_active_mm; - if (compat_get_page_table_lock(mm)) { - spin_lock(compat_get_page_table_lock(mm)); - } - page = PgtblVa2PageLocked(mm, addr); - if (compat_get_page_table_lock(mm)) { - spin_unlock(compat_get_page_table_lock(mm)); - } - return page; -} - - -#endif /* __PGTBL_H__ */ --- source-7.1.5/vmnet-only/setnice.c 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/setnice.c 1969-12-31 19:00:00.000000000 -0500 @@ -1,32 +0,0 @@ -/********************************************************* - * Copyright (C) 2005 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -/* - * set_user_nice appeared in 2.4.21. But some distros - * backported it to older kernels. - */ -#include "compat_version.h" -#include "compat_autoconf.h" - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 21) -#include - -void test(void) { - set_user_nice(current, -20); -} -#endif --- source-7.1.5/vmnet-only/sk_alloc.c 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/sk_alloc.c 1969-12-31 19:00:00.000000000 -0500 @@ -1,39 +0,0 @@ -/********************************************************* - * Copyright (C) 2005 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - *********************************************************/ - -/* - * Detect whether sk_alloc takes a struct proto * as third parameter. - * This API change was introduced between 2.6.12-rc1 and 2.6.12-rc2. - */ - -#include "compat_version.h" -#include "compat_autoconf.h" - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13) -#include - -static struct proto test_proto = { - .name = "TEST", -}; - -struct sock * -vmware_sk_alloc(void) -{ - return sk_alloc(PF_NETLINK, 0, &test_proto, 1); -} -#endif --- source-7.1.5/vmnet-only/smac.c 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/smac.c 2011-11-14 02:16:55.000000000 -0500 @@ -327,7 +327,8 @@ EthClassIPv4, // IPv4 type EthClassARP, // one of the various ARP protocols EthClassVLAN, // VLAN type - EthClassIPv6 // IPv6 type + EthClassIPv6, // IPv6 type + EthClassEAPOL, // 802.1x type (EAPOL) } EthClass; /* @@ -1579,11 +1580,11 @@ #endif /* 0 */ /* - * DEBUG: if not IP & not ARP + * DEBUG: if not IP & not ARP & not EAPOL */ if (typeClass != EthClassIPv4 && typeClass != EthClassIPv6 && - typeClass != EthClassARP) { + typeClass != EthClassARP && typeClass != EthClassEAPOL) { /* * If not a common/known media type, then print a status @@ -1851,7 +1852,7 @@ * dest ethernet MAC (and potentially ARP MAC) with that of the lookup table entry */ - else { // if ARP packet: typeClass == EthClassARP + else if (typeClass == EthClassARP) { // ARP packet uint32 arpHeaderWord1; // first word in ARP header uint32 arpHeaderWord2; // second word in ARP header @@ -2284,6 +2285,19 @@ VNETKdPrint((MODULE_NAME "FromHostARP: unrecognized ARP type %08x\n", arpHeaderWord2)); return PacketStatusDropPacket; + } else { // if EAPOL packet: typeClass == EthClassEAPOL + + /* + * Allow incoming EAPOL packets to proceed unmolested provided the + * destination address matches the hardware address. + */ + + if (!MAC_EQ(state->macAddress, eh.destAddr)) { + VNETKdPrint((MODULE_NAME "FromHostEAPOL: incoming request using " + "non-wireless-hardware-addr eth dest MAC, dropping\n")); + return PacketStatusDropPacket; + } + return PacketStatusForwardPacket; } } @@ -2369,13 +2383,13 @@ } /* - * If the packet is not type IP or ARP, then drop the packet + * If the packet is not type IP, ARP or EAPOL, then drop the packet * unless it is a broadcast packet (broadcast packets don't * require much manipulating, so they should be safe to let thru). */ if (typeClass != EthClassIPv4 && typeClass != EthClassIPv6 && - typeClass != EthClassARP) { + typeClass != EthClassARP && typeClass != EthClassEAPOL) { /* * DEBUG: if not a common/known media type, then print a status message @@ -2616,7 +2630,7 @@ * ARP MAC, if appropriate) with that of the wireless hardware */ - else { // typeClass == EthClassARP + else if (typeClass == EthClassARP) { // ARP packet uint32 arpHeaderWord1; // first word of ARP header uint32 arpHeaderWord2; // second word of ARP header @@ -2810,12 +2824,14 @@ */ { + uint32 offset = ethHeaderLen + ARP_SENDER_MAC_OFFSET; + if (!ClonePacket(packets)) { VNETKdPrint((MODULE_NAME " ToHostARP: couldn't " "clone packet\n")); return PacketStatusDropPacket; } - + /* * Substitute sender ethernet MAC with the wireless hardware's MAC */ @@ -2837,31 +2853,57 @@ * Modify ARP source MAC */ - // substitute sender MAC with the wireless hardware's MAC +#if defined(_WIN32) && NDIS_SUPPORT_NDIS6 + /* + * In normal case, we will substitute ARP source MAC with the + * wireless hardware's MAC in the ARP payload. + * + * However if the target IP address is host IP address, + * the arp protocol payload should not be touched. Otherwise + * the Windows TCP/IP stack will not respond for the request. + * (This behavior is seen in Vista and after.) + * Since the ARP payload is not touched, We should change the + * destination MAC in the MAC header to wireless hardware MAC + * so that no other host will receive and process the ARP packet + */ + { + ULONG ipAddr; + if (!GetPacketData(packet, ethHeaderLen + ARP_TARGET_IP_OFFSET, + sizeof ipAddr, &ipAddr)) { + VNETKdPrint((MODULE_NAME " ToHostARP: couldn't get target " + "IP address\n")); + return PacketStatusTooShort; + } + if (BridgeIPv4MatchAddrMAC(ipAddr, state->macAddress)) { + offset = 0; + } + } +#endif + + #ifdef DBG - { - uint8 packetMac[ETH_ALEN]; - if (!GetPacketData(packet, ethHeaderLen + ARP_SENDER_MAC_OFFSET, - sizeof packetMac, packetMac)) { + { + uint8 macAddr[ETH_ALEN]; + if (!GetPacketData(packet, offset, sizeof macAddr, macAddr)) { VNETKdPrint((MODULE_NAME " ToHostARP: " - "couldn't read MAC\n")); + "couldn't read data at offset %u\n", offset)); return PacketStatusTooShort; } - W_VNETKdPrint((MODULE_NAME " ToHostARP: modifying ETH ARP " - "%02x.%02x.%02x.%02x.%02x.%02x source address to match " - "wireless hardware %02x.%02x.%02x.%02x.%02x.%02x \n", - packetMac[0], packetMac[1], packetMac[2], - packetMac[3], packetMac[4], packetMac[5], - state->macAddress[0], state->macAddress[1], - state->macAddress[2], state->macAddress[3], - state->macAddress[4], state->macAddress[5])); - } + W_VNETKdPrint((MODULE_NAME " ToHostARP: modifying %s from " + "%02x.%02x.%02x.%02x.%02x.%02x to " + "wireless hardware address " + "%02x.%02x.%02x.%02x.%02x.%02x \n", + (offset == 0) ? "destination address of MAC header" + : "ARP payload source MAC address", + macAddr[0], macAddr[1], macAddr[2], + macAddr[3], macAddr[4], macAddr[5], + state->macAddress[0], state->macAddress[1], + state->macAddress[2], state->macAddress[3], + state->macAddress[4], state->macAddress[5])); + } #endif - - CopyDataToClonedPacket(packets, state->macAddress, - ethHeaderLen + ARP_SENDER_MAC_OFFSET /* offset */, - ETH_ALEN /* length */); + CopyDataToClonedPacket(packets, state->macAddress, offset, ETH_ALEN); return PacketStatusForwardPacket; } } @@ -2869,6 +2911,19 @@ VNETKdPrint((MODULE_NAME " ToHostARP: unrecognized ARP type %08x\n", arpHeaderWord2)); return PacketStatusDropPacket; + } else { // if EAPOL packet: typeClass == EthClassEAPOL + + /* + * Allow outgoing EAPOL packets to proceed unmolested provided the + * source address matches the hardware address. + */ + + if (!MAC_EQ(state->macAddress, eh->srcAddr)) { + VNETKdPrint((MODULE_NAME "ToHostEAPOL: outgoing request using " + "non-wireless-hardware-addr eth source MAC, dropping\n")); + return PacketStatusDropPacket; + } + return PacketStatusForwardPacket; } } @@ -3408,7 +3463,7 @@ if (!SetPacketByte(packet, checksumOffset, newChecksum >> 8) || !SetPacketByte(packet, checksumOffset + 1, newChecksum & 0xff)) { - VNETKdPrint((MODULE_NAME, "%s: couldn't set new checksum in packet.\n", + VNETKdPrint((MODULE_NAME "%s: couldn't set new checksum in packet.\n", logPrefix)); return FALSE; } @@ -3417,7 +3472,7 @@ } -#if defined(_WIN32) && (NTDDI_VERSION >= NTDDI_LONGHORN) +#if defined(_WIN32) && NDIS_SUPPORT_NDIS6 /* @@ -3592,7 +3647,7 @@ uint8 nextHeader = 0; /* Silence compiler warnings. */ uint32 nextHeaderOffset = 0; /* Silence compiler warnings. */ const uint32 packetLen = GetPacketLength(packet); -#if defined(_WIN32) && (NTDDI_VERSION >= NTDDI_LONGHORN) +#if defined(_WIN32) && NDIS_SUPPORT_NDIS6 uint8 dstMACAddr[ETH_ALEN]; #endif @@ -3605,7 +3660,7 @@ ASSERT(packetLen >= IPv6_HEADER_LEN + ethHeaderLen); ASSERT(toHost && !*toHost); -#if defined(_WIN32) && (NTDDI_VERSION >= NTDDI_LONGHORN) +#if defined(_WIN32) && NDIS_SUPPORT_NDIS6 { IPv6Addr dstAddr; @@ -3675,7 +3730,7 @@ return TRUE; } -#if defined(_WIN32) && (NTDDI_VERSION >= NTDDI_LONGHORN) +#if defined(_WIN32) && NDIS_SUPPORT_NDIS6 if (type == ICMPv6_NDP_NBR_SOLICITATION) { IPv6Addr targetAddr; @@ -4246,6 +4301,9 @@ return EthClassUncommon; case 0x886f: // Microsoft 886f return EthClassCommon; + case 0x888e: // 802.1x (aka EAPOL) + case 0x88c7: // 802.11i pre-authentication (treated as EAPOL) + return EthClassEAPOL; default: return EthClassUnknown; } @@ -4372,6 +4430,16 @@ STRCPY(type, "Microsoft 886f"); } return EthClassCommon; + case 0x888e: + if (type) { + STRCPY(type, "EAPOL"); + } + return EthClassEAPOL; + case 0x88c7: + if (type) { + STRCPY(type, "802.11i pre-auth"); + } + return EthClassEAPOL; default: if (type) { SNPRINTF((type, 40, "unknown type 0x%04x", typeValue)); --- source-7.1.5/vmnet-only/smac.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/smac.h 2011-11-14 02:16:55.000000000 -0500 @@ -65,8 +65,9 @@ struct SMACState; -#if defined(_WIN32) && (NTDDI_VERSION >= NTDDI_LONGHORN) +#if defined(_WIN32) && NDIS_SUPPORT_NDIS6 Bool BridgeIPv6MatchAddrMAC(const IPv6Addr *addr, const uint8 *mac); +Bool BridgeIPv4MatchAddrMAC(const ULONG ipAddr, const uint8 *mac); #endif void SMACINT SMAC_InitState(struct SMACState **ptr); // IN: state to alloc/init --- source-7.1.5/vmnet-only/smac_compat.c 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/smac_compat.c 2011-11-14 02:16:55.000000000 -0500 @@ -294,19 +294,14 @@ SMACL_Print(const char * msg, // IN: format message ...) // IN: params (currently ignored) { - char buf[1024]; + char buf[512]; int len; va_list ap; - va_start(ap, msg); -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 8) + va_start(ap, msg); len = vsnprintf(buf, sizeof buf, msg, ap); -#else - len = vsprintf(buf, msg, ap); -#endif - va_end(ap); - buf[1023] = '\0'; + buf[sizeof buf - 1] = '\0'; printk(KERN_DEBUG "%s", buf); } --- source-7.1.5/vmnet-only/userif.c 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/userif.c 2011-12-11 18:55:19.000000000 -0500 @@ -20,38 +20,34 @@ #define EXPORT_SYMTAB -#include -#include -#include -#include -#include +#define __KERNEL_SYSCALLS__ +#include +#include +#include +#include #include #include -#include "compat_skbuff.h" -#include -#include -#include "compat_sock.h" - -#define __KERNEL_SYSCALLS__ -#include - +#include +#include #include -#include +#include +#include +#include +#include +#include -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 4) #include -#endif +#include -#include "vnetInt.h" +#include -#include "compat_uaccess.h" -#include "compat_highmem.h" -#include "compat_mm.h" -#include "pgtbl.h" -#include "compat_wait.h" +#include "vnetInt.h" +#include "compat_skbuff.h" #include "vmnetInt.h" #include "vm_atomic.h" +#include "vm_assert.h" +#include "monitorAction_exported.h" typedef struct VNetUserIFStats { unsigned read; @@ -67,19 +63,21 @@ VNetPort port; struct sk_buff_head packetQueue; uint32* pollPtr; - Atomic_uint32* actPtr; + MonitorActionIntr *actionIntr; uint32 pollMask; - uint32 actMask; + MonitorIdemAction actionID; uint32* recvClusterCount; wait_queue_head_t waitQueue; struct page* actPage; struct page* pollPage; struct page* recvClusterPage; VNetUserIFStats stats; + VNetEvent_Sender *eventSender; } VNetUserIF; static void VNetUserIfUnsetupNotify(VNetUserIF *userIf); static int VNetUserIfSetupNotify(VNetUserIF *userIf, VNet_Notify *vn); +static int VNetUserIfSetUplinkState(VNetPort *port, uint8 linkUp); /* *----------------------------------------------------------------------------- @@ -87,15 +85,14 @@ * UserifLockPage -- * * Lock in core the physical page associated to a valid virtual - * address --hpreg - * + * address. + * * Results: * The page structure on success * NULL on failure: memory pressure. Retry later * * Side effects: - * Loads page into memory - * Pre-2.4.19 version may temporarily lock another physical page + * Loads page into memory * *----------------------------------------------------------------------------- */ @@ -103,7 +100,6 @@ static INLINE struct page * UserifLockPage(VA addr) // IN { -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 19) struct page *page = NULL; int retval; @@ -117,74 +113,8 @@ } return page; -#else - struct page *page; - struct page *check; - volatile int c; - - /* - * Establish a virtual to physical mapping by touching the physical - * page. Because the address is valid, there is no need to check the return - * value here --hpreg - */ - compat_get_user(c, (char *)addr); - - page = PgtblVa2Page(addr); - if (page == NULL) { - /* The mapping went away --hpreg */ - return NULL; - } - - /* Lock the physical page --hpreg */ - get_page(page); - - check = PgtblVa2Page(addr); - if (check != page) { - /* - * The mapping went away or was modified, so we didn't lock the right - * physical page --hpreg - */ - - /* Unlock the physical page --hpreg */ - put_page(page); - - return NULL; - } - - /* We locked the right physical page --hpreg */ - return page; -#endif } -/* - *----------------------------------------------------------------------------- - * - * VNetUserIfInvalidPointer -- - * - * Reports if pointer provided by user is definitely wrong, - * or only potentially wrong. - * - * Results: - * non-zero if pointer is definitely wrong, otherwise returns - * 0 if the pointer might be okay. - * - * Side effects: - * Might sleep. - * - *----------------------------------------------------------------------------- - */ - -static INLINE int -VNetUserIfInvalidPointer(VA uAddr, // IN: user-provided pointer - size_t size) // IN: anticipated size of data -{ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) - return !access_ok(VERIFY_WRITE, (void *)uAddr, size); -#else - return verify_area(VERIFY_READ, (void *)uAddr, size) || - verify_area(VERIFY_WRITE, (void *)uAddr, size); -#endif -} /* *----------------------------------------------------------------------------- @@ -204,12 +134,13 @@ */ static INLINE int -VNetUserIfMapUint32Ptr(VA uAddr, // IN: pointer to user memory - struct page **p, // OUT: locked page - uint32 **ptr) // OUT: kernel mapped pointer +VNetUserIfMapPtr(VA uAddr, // IN: pointer to user memory + size_t size, // IN: size of data + struct page **p, // OUT: locked page + void **ptr) // OUT: kernel mapped pointer { - if (VNetUserIfInvalidPointer(uAddr, sizeof (uint32)) || - (((uAddr + sizeof(uint32) - 1) & ~(PAGE_SIZE - 1)) != + if (!access_ok(VERIFY_WRITE, (void *)uAddr, size) || + (((uAddr + size - 1) & ~(PAGE_SIZE - 1)) != (uAddr & ~(PAGE_SIZE - 1)))) { return -EINVAL; } @@ -219,10 +150,18 @@ return -EAGAIN; } - *ptr = (uint32 *)((char *)kmap(*p) + (uAddr & (PAGE_SIZE - 1))); + *ptr = (uint8 *)kmap(*p) + (uAddr & (PAGE_SIZE - 1)); return 0; } +static INLINE int +VNetUserIfMapUint32Ptr(VA uAddr, // IN: pointer to user memory + struct page **p, // OUT: locked page + uint32 **ptr) // OUT: kernel mapped pointer +{ + return VNetUserIfMapPtr(uAddr, sizeof **ptr, p, (void **)ptr); +} + /* *----------------------------------------------------------------------------- * @@ -244,35 +183,36 @@ static INLINE int VNetUserIfSetupNotify(VNetUserIF *userIf, // IN - VNet_Notify *vn) // IN + VNet_Notify *vn) // IN { int retval; - if (userIf->pollPtr || userIf->actPtr || userIf->recvClusterCount) { + if (userIf->pollPtr || userIf->actionIntr || userIf->recvClusterCount) { LOG(0, (KERN_DEBUG "vmnet: Notification mechanism already active\n")); return -EBUSY; } - if ((retval = VNetUserIfMapUint32Ptr((VA)vn->pollPtr, &userIf->pollPage, - &userIf->pollPtr)) < 0) { + if ((retval = VNetUserIfMapUint32Ptr((VA)vn->pollPtr, &userIf->pollPage, + &userIf->pollPtr)) < 0) { return retval; } - - if ((retval = VNetUserIfMapUint32Ptr((VA)vn->actPtr, &userIf->actPage, - (uint32 **)&userIf->actPtr)) < 0) { + + if ((retval = VNetUserIfMapPtr((VA)vn->actPtr, sizeof *userIf->actionIntr, + &userIf->actPage, + (void **)&userIf->actionIntr)) < 0) { VNetUserIfUnsetupNotify(userIf); return retval; } if ((retval = VNetUserIfMapUint32Ptr((VA)vn->recvClusterPtr, - &userIf->recvClusterPage, - &userIf->recvClusterCount)) < 0) { + &userIf->recvClusterPage, + &userIf->recvClusterCount)) < 0) { VNetUserIfUnsetupNotify(userIf); return retval; } userIf->pollMask = vn->pollMask; - userIf->actMask = vn->actMask; + userIf->actionID = vn->actionID; return 0; } @@ -316,12 +256,12 @@ } userIf->pollPtr = NULL; userIf->pollPage = NULL; - userIf->actPtr = NULL; + userIf->actionIntr = NULL; userIf->actPage = NULL; userIf->recvClusterCount = NULL; userIf->recvClusterPage = NULL; userIf->pollMask = 0; - userIf->actMask = 0; + userIf->actionID = -1; } @@ -359,6 +299,10 @@ VNetUserIfUnsetupNotify(userIf); } + if (userIf->eventSender) { + VNetEvent_DestroySender(userIf->eventSender); + } + if (this->procEntry) { VNetProc_RemoveEntry(this->procEntry); } @@ -397,6 +341,8 @@ if (!VNetPacketMatch(dest, userIf->port.paddr, + (const uint8 *)userIf->port.exactFilter, + userIf->port.exactFilterLen, userIf->port.ladrf, userIf->port.flags)) { userIf->stats.droppedMismatch++; @@ -419,7 +365,7 @@ if (userIf->pollPtr) { *userIf->pollPtr |= userIf->pollMask; if (skb_queue_len(&userIf->packetQueue) >= (*userIf->recvClusterCount)) { - Atomic_Or(userIf->actPtr, userIf->actMask); + MonitorAction_SetBits(userIf->actionIntr, userIf->actionID); } } wake_up(&userIf->waitQueue); @@ -486,7 +432,6 @@ } -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 4) /* *---------------------------------------------------------------------- * @@ -572,10 +517,18 @@ unsigned int tmpCsum; const void *vaddr; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0) vaddr = kmap(frag->page); +#else + vaddr = kmap(frag->page.p); +#endif tmpCsum = csum_and_copy_to_user(vaddr + frag->page_offset, curr, frag->size, 0, &err); - kunmap(frag->page); +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0) + kunmap(frag->page); +#else + kunmap(frag->page.p); +#endif if (err) { return err; } @@ -597,7 +550,6 @@ } return csum_fold(csum); } -#endif /* @@ -625,11 +577,6 @@ if (count > skb->len) { count = skb->len; } -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 4) - if (copy_to_user(buf, skb->data, count)) { - return -EFAULT; - } -#else /* * If truncation occurs, we do not bother with checksumming - caller cannot * verify checksum anyway in such case, and copy without checksum is @@ -661,7 +608,6 @@ return -EFAULT; } } -#endif return count; } @@ -712,18 +658,6 @@ if (skb_queue_empty(&userIf->packetQueue)) { *userIf->pollPtr &= ~userIf->pollMask; } -#if 0 - /* - * Disable this for now since the monitor likes to assert that - * actions are present and thus can't cope with them disappearing - * out from under it. See bug 47760. -Jeremy. 22 July 2004 - */ - - if (skb_queue_len(&userIf->packetQueue) < (*userIf->recvClusterCount) && - (Atomic_Read(userIf->actPtr) & userIf->actMask) != 0) { - Atomic_And(userIf->actPtr, ~userIf->actMask); - } -#endif } if (skb != NULL || filp->f_flags & O_NONBLOCK) { @@ -870,8 +804,12 @@ return -EFAULT; } - if (vn.version != 3) { - return -EINVAL; + ASSERT_ON_COMPILE(VNET_NOTIFY_VERSION == 5); + ASSERT_ON_COMPILE(ACTION_EXPORTED_VERSION == 2); + if (vn.version != VNET_NOTIFY_VERSION || + vn.actionVersion != ACTION_EXPORTED_VERSION || + vn.actionID / ACTION_WORD_SIZE >= ACTION_NUM_WORDS) { + return -ENOTTY; } retval = VNetUserIfSetupNotify(userIf, &vn); @@ -909,7 +847,21 @@ } } break; + case SIOCINJECTLINKSTATE: + { + uint8 linkUpFromUser; + if (copy_from_user(&linkUpFromUser, (void *)ioarg, + sizeof linkUpFromUser)) { + return -EFAULT; + } + + if (linkUpFromUser != 0 && linkUpFromUser != 1) { + return -EINVAL; + } + return VNetUserIfSetUplinkState(port, linkUpFromUser); + } + break; default: return -ENOIOCTLCMD; break; @@ -951,6 +903,72 @@ return 0; } +/* + *---------------------------------------------------------------------- + * + * VNetUserIfSetUplinkState -- + * + * Sends link state change event. + * + * Results: + * 0 on success, errno on failure. + * + * Side effects: + * Link state event is sent to all the event listeners + * + *---------------------------------------------------------------------- + */ + +int +VNetUserIfSetUplinkState(VNetPort *port, uint8 linkUp) +{ + VNetUserIF *userIf; + VNetJack *hubJack; + VNet_LinkStateEvent event; + int retval; + + userIf = (VNetUserIF *)port->jack.private; + hubJack = port->jack.peer; + + if (hubJack == NULL) { + return -EINVAL; + } + + if (userIf->eventSender == NULL) { + /* create event sender */ + retval = VNetHub_CreateSender(hubJack, &userIf->eventSender); + if (retval != 0) { + return retval; + } + } + + event.header.size = sizeof event; + retval = VNetEvent_GetSenderId(userIf->eventSender, &event.header.senderId); + if (retval != 0) { + LOG(1, (KERN_NOTICE "userif-%d: can't send link state event, " + "getSenderId failed (%d)\n", userIf->port.id, retval)); + return retval; + } + event.header.eventId = 0; + event.header.classSet = VNET_EVENT_CLASS_UPLINK; + event.header.type = VNET_EVENT_TYPE_LINK_STATE; + /* + * XXX kind of a hack, vmx will coalesce linkup/down if they come from the + * same adapter. + */ + event.adapter = linkUp; + event.up = linkUp; + retval = VNetEvent_Send(userIf->eventSender, &event.header); + if (retval != 0) { + LOG(1, (KERN_NOTICE "userif-%d: can't send link state event, send " + "failed (%d)\n", userIf->port.id, retval)); + } + + LOG(0, (KERN_NOTICE "userif-%d: sent link %s event.", + userIf->port.id, linkUp?"up":"down")); + + return retval; +} /* *---------------------------------------------------------------------- @@ -1001,12 +1019,15 @@ userIf->port.jack.portsChanged = NULL; userIf->port.jack.isBridged = NULL; userIf->pollPtr = NULL; - userIf->actPtr = NULL; + userIf->actionIntr = NULL; userIf->recvClusterCount = NULL; userIf->pollPage = NULL; userIf->actPage = NULL; userIf->recvClusterPage = NULL; - userIf->pollMask = userIf->actMask = 0; + userIf->pollMask = 0; + userIf->actionID = -1; + userIf->port.exactFilterLen = 0; + userIf->eventSender = NULL; /* * Make proc entry for this jack. @@ -1034,6 +1055,7 @@ memset(userIf->port.paddr, 0, sizeof userIf->port.paddr); memset(userIf->port.ladrf, 0, sizeof userIf->port.ladrf); + memset(userIf->port.exactFilter, 0, sizeof userIf->port.exactFilter); VNet_MakeMACAddress(&userIf->port); --- source-7.1.5/vmnet-only/vm_assert.h 1969-12-31 19:00:00.000000000 -0500 +++ patched/vmnet-only/vm_assert.h 2011-11-14 02:16:55.000000000 -0500 @@ -0,0 +1,342 @@ +/********************************************************* + * Copyright (C) 1998-2004 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + *********************************************************/ + +/* + * vm_assert.h -- + * + * The basic assertion facility for all VMware code. + * + * For proper use, see + * http://vmweb.vmware.com/~mts/WebSite/guide/programming/asserts.html + */ + +#ifndef _VM_ASSERT_H_ +#define _VM_ASSERT_H_ + +#define INCLUDE_ALLOW_USERLEVEL + +#define INCLUDE_ALLOW_MODULE +#define INCLUDE_ALLOW_VMMON +#define INCLUDE_ALLOW_VMKERNEL +#define INCLUDE_ALLOW_VMKDRIVERS +#define INCLUDE_ALLOW_VMK_MODULE +#define INCLUDE_ALLOW_DISTRIBUTE +#define INCLUDE_ALLOW_VMCORE +#define INCLUDE_ALLOW_VMIROM +#include "includeCheck.h" + +// XXX not necessary except some places include vm_assert.h improperly +#include "vm_basic_types.h" +#include "vm_basic_defs.h" + + +/* + * XXX old file code + */ + +#ifdef FILECODEINT +#error "Don't define FILECODEINT. It is obsolete." +#endif +#ifdef FILECODE +#error "Don't define FILECODE. It is obsolete." +#endif + + +/* + * Panic and log functions + */ + +EXTERN void Log(const char *fmt, ...) PRINTF_DECL(1, 2); +EXTERN void Warning(const char *fmt, ...) PRINTF_DECL(1, 2); +EXTERN NORETURN void Panic(const char *fmt, ...) PRINTF_DECL(1, 2); + +EXTERN void LogThrottled(uint32 *count, const char *fmt, ...) + PRINTF_DECL(2, 3); +EXTERN void WarningThrottled(uint32 *count, const char *fmt, ...) + PRINTF_DECL(2, 3); + +/* DB family: messages which are parsed by logfile database system */ +#define WarningDB Warning +#define LogDB Log +#define WarningThrottledDB WarningThrottled +#define LogThrottledDB LogThrottled + + +/* + * Stress testing: redefine ASSERT_IFNOT() to taste + */ + +#ifndef ASSERT_IFNOT + /* + * PR 271512: When compiling with gcc, catch assignments inside an ASSERT. + * + * 'UNLIKELY' is defined with __builtin_expect, which does not warn when + * passed an assignment (gcc bug 36050). To get around this, we put 'cond' + * in an 'if' statement and make sure it never gets executed by putting + * that inside of 'if (0)'. We use gcc's statement expression syntax to + * make ASSERT an expression because some code uses it that way. + * + * Since statement expression syntax is a gcc extension and since it's + * not clear if this is a problem with other compilers, the ASSERT + * definition was not changed for them. Using a bare 'cond' with the + * ternary operator may provide a solution. + */ + + #ifdef __GNUC__ + #define ASSERT_IFNOT(cond, panic) \ + ({if (UNLIKELY(!(cond))) { panic; if (0) { if (cond) { ; } } } (void)0;}) + #else + #define ASSERT_IFNOT(cond, panic) \ + (UNLIKELY(!(cond)) ? (panic) : (void)0) + #endif +#endif + + +/* + * Assert, panic, and log macros + * + * Some of these are redefined below undef !VMX86_DEBUG. + * ASSERT() is special cased because of interaction with Windows DDK. + */ + +#if defined VMX86_DEBUG || defined ASSERT_ALWAYS_AVAILABLE +#undef ASSERT +#define ASSERT(cond) \ + ASSERT_IFNOT(cond, _ASSERT_PANIC(AssertAssert)) +#endif +#define ASSERT_BUG(bug, cond) \ + ASSERT_IFNOT(cond, _ASSERT_PANIC_BUG(bug, AssertAssert)) +#define ASSERT_BUG_DEBUGONLY(bug, cond) ASSERT_BUG(bug, cond) + +#define PANIC() _ASSERT_PANIC(AssertPanic) +#define PANIC_BUG(bug) _ASSERT_PANIC_BUG(bug, AssertPanic) + +#define ASSERT_NOT_IMPLEMENTED(cond) \ + ASSERT_IFNOT(cond, NOT_IMPLEMENTED()) +#define ASSERT_NOT_IMPLEMENTED_BUG(bug, cond) \ + ASSERT_IFNOT(cond, NOT_IMPLEMENTED_BUG(bug)) + +#define NOT_IMPLEMENTED() _ASSERT_PANIC(AssertNotImplemented) +#define NOT_IMPLEMENTED_BUG(bug) _ASSERT_PANIC_BUG(bug, AssertNotImplemented) + +#define NOT_REACHED() _ASSERT_PANIC(AssertNotReached) +#define NOT_REACHED_BUG(bug) _ASSERT_PANIC_BUG(bug, AssertNotReached) + +#define ASSERT_MEM_ALLOC(cond) \ + ASSERT_IFNOT(cond, _ASSERT_PANIC(AssertMemAlloc)) + +#ifdef VMX86_DEVEL + #define ASSERT_LENGTH(real, expected) \ + ASSERT_IFNOT((real) == (expected), \ + Panic(AssertLengthFmt, __FILE__, __LINE__, real, expected)) +#else + #define ASSERT_LENGTH(real, expected) ASSERT((real) == (expected)) +#endif + +#ifdef VMX86_DEVEL + #define ASSERT_DEVEL(cond) ASSERT(cond) +#else + #define ASSERT_DEVEL(cond) ((void) 0) +#endif + +#define ASSERT_NO_INTERRUPTS() ASSERT(!INTERRUPTS_ENABLED()) +#define ASSERT_HAS_INTERRUPTS() ASSERT(INTERRUPTS_ENABLED()) + +#define ASSERT_LOG_UNEXPECTED(bug, cond) \ + (UNLIKELY(!(cond)) ? LOG_UNEXPECTED(bug) : 0) +#ifdef VMX86_DEVEL + #define LOG_UNEXPECTED(bug) \ + Warning(AssertUnexpectedFmt, __FILE__, __LINE__, bug) +#else + #define LOG_UNEXPECTED(bug) \ + Log(AssertUnexpectedFmt, __FILE__, __LINE__, bug) +#endif + +#define ASSERT_NOT_TESTED(cond) (UNLIKELY(!(cond)) ? NOT_TESTED() : 0) +#ifdef VMX86_DEVEL + #define NOT_TESTED() Warning(AssertNotTestedFmt, __FILE__, __LINE__) +#else + #define NOT_TESTED() Log(AssertNotTestedFmt, __FILE__, __LINE__) +#endif + +#define NOT_TESTED_ONCE() \ + do { \ + static Bool alreadyPrinted = FALSE; \ + if (UNLIKELY(!alreadyPrinted)) { \ + alreadyPrinted = TRUE; \ + NOT_TESTED(); \ + } \ + } while (0) + +#define NOT_TESTED_1024() \ + do { \ + static uint16 count = 0; \ + if (UNLIKELY(count == 0)) { NOT_TESTED(); } \ + count = (count + 1) & 1023; \ + } while (0) + +#define LOG_ONCE(_s) \ + do { \ + static Bool logged = FALSE; \ + if (!logged) { \ + Log _s; \ + logged = TRUE; \ + } \ + } while (0) + + +/* + * Redefine macros that are only in debug versions + */ + +#if !defined VMX86_DEBUG && !defined ASSERT_ALWAYS_AVAILABLE // { + +#undef ASSERT +#define ASSERT(cond) ((void) 0) + +#undef ASSERT_BUG_DEBUGONLY +#define ASSERT_BUG_DEBUGONLY(bug, cond) ((void) 0) + +#undef ASSERT_LENGTH +#define ASSERT_LENGTH(real, expected) ((void) 0) + +/* + * Expand NOT_REACHED() as appropriate for each situation. + * + * Mainly, we want the compiler to infer the same control-flow + * information as it would from Panic(). Otherwise, different + * compilation options will lead to different control-flow-derived + * errors, causing some make targets to fail while others succeed. + * + * VC++ has the __assume() built-in function which we don't trust + * (see bug 43485); gcc has no such construct; we just panic in + * userlevel code. The monitor doesn't want to pay the size penalty + * (measured at 212 bytes for the release vmm for a minimal infinite + * loop; panic would cost even more) so it does without and lives + * with the inconsistency. + */ + +#ifdef VMM +#undef NOT_REACHED +#define NOT_REACHED() ((void) 0) +#else +// keep debug definition +#endif + +#undef ASSERT_LOG_UNEXPECTED +#define ASSERT_LOG_UNEXPECTED(bug, cond) ((void) 0) + +#undef LOG_UNEXPECTED +#define LOG_UNEXPECTED(bug) ((void) 0) + +#undef ASSERT_NOT_TESTED +#define ASSERT_NOT_TESTED(cond) ((void) 0) +#undef NOT_TESTED +#define NOT_TESTED() ((void) 0) +#undef NOT_TESTED_ONCE +#define NOT_TESTED_ONCE() ((void) 0) +#undef NOT_TESTED_1024 +#define NOT_TESTED_1024() ((void) 0) + +#endif // !VMX86_DEBUG } + + +/* + * Compile-time assertions. + * + * ASSERT_ON_COMPILE does not use the common + * switch (0) { case 0: case (e): ; } trick because some compilers (e.g. MSVC) + * generate code for it. + * + * The implementation uses both enum and typedef because the typedef alone is + * insufficient; gcc allows arrays to be declared with non-constant expressions + * (even in typedefs, where it makes no sense). + */ + +#define ASSERT_ON_COMPILE(e) \ + do { \ + enum { AssertOnCompileMisused = ((e) ? 1 : -1) }; \ + typedef char AssertOnCompileFailed[AssertOnCompileMisused]; \ + } while (0) + + +/* + * To put an ASSERT_ON_COMPILE() outside a function, wrap it + * in MY_ASSERTS(). The first parameter must be unique in + * each .c file where it appears. For example, + * + * MY_ASSERTS(FS3_INT, + * ASSERT_ON_COMPILE(sizeof(FS3_DiskLock) == 128); + * ASSERT_ON_COMPILE(sizeof(FS3_DiskLockReserved) == DISK_BLOCK_SIZE); + * ASSERT_ON_COMPILE(sizeof(FS3_DiskBlock) == DISK_BLOCK_SIZE); + * ASSERT_ON_COMPILE(sizeof(Hardware_DMIUUID) == 16); + * ) + * + * Caution: ASSERT() within MY_ASSERTS() is silently ignored. + * The same goes for anything else not evaluated at compile time. + */ + +#define MY_ASSERTS(name, assertions) \ + static INLINE void name(void) { \ + assertions \ + } + + +/* + * Internal macros, functions, and strings + * + * The monitor wants to save space at call sites, so it has specialized + * functions for each situation. User level wants to save on implementation + * so it uses generic functions. + */ + +#if !defined VMM || defined MONITOR_APP // { + +#if defined VMKERNEL +// vmkernel Panic() function does not want a trailing newline. +#define _ASSERT_PANIC(name) \ + Panic(_##name##Fmt, __FILE__, __LINE__) +#define _ASSERT_PANIC_BUG(bug, name) \ + Panic(_##name##Fmt " bugNr=%d", __FILE__, __LINE__, bug) + +#else /* !VMKERNEL */ +#define _ASSERT_PANIC(name) \ + Panic(_##name##Fmt "\n", __FILE__, __LINE__) +#define _ASSERT_PANIC_BUG(bug, name) \ + Panic(_##name##Fmt " bugNr=%d\n", __FILE__, __LINE__, bug) +#endif /* VMKERNEL */ + +#define AssertLengthFmt _AssertLengthFmt +#define AssertUnexpectedFmt _AssertUnexpectedFmt +#define AssertNotTestedFmt _AssertNotTestedFmt + +#endif // } + +// these don't have newline so a bug can be tacked on +#define _AssertPanicFmt "PANIC %s:%d" +#define _AssertAssertFmt "ASSERT %s:%d" +#define _AssertNotImplementedFmt "NOT_IMPLEMENTED %s:%d" +#define _AssertNotReachedFmt "NOT_REACHED %s:%d" +#define _AssertMemAllocFmt "MEM_ALLOC %s:%d" + +// these are complete formats with newline +#define _AssertLengthFmt "LENGTH %s:%d r=%#x e=%#x\n" +#define _AssertUnexpectedFmt "UNEXPECTED %s:%d bugNr=%d\n" +#define _AssertNotTestedFmt "NOT_TESTED %s:%d\n" + +#endif /* ifndef _VM_ASSERT_H_ */ --- source-7.1.5/vmnet-only/vm_atomic.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/vm_atomic.h 2011-11-14 02:16:55.000000000 -0500 @@ -40,10 +40,9 @@ //#define FAKE_ATOMIC /* defined if true atomic not needed */ #define INCLUDE_ALLOW_USERLEVEL -#define INCLUDE_ALLOW_VMMEXT + #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON -#define INCLUDE_ALLOW_VMNIXMOD #define INCLUDE_ALLOW_VMKDRIVERS #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMKERNEL @@ -66,35 +65,6 @@ volatile uint64 value; } Atomic_uint64 ALIGNED(8); -#ifdef __arm__ -#ifndef NOT_IMPLEMENTED -#error NOT_IMPLEMENTED undefined -#endif -#ifdef __GNUC__ -EXTERN Atomic_uint32 atomicLocked64bit; -#ifndef FAKE_ATOMIC - /* - * Definitions for kernel function call which attempts an - * atomic exchange, returning 0 only upon success. - * The code actually called is put in memory by the kernel, - * and is in fact what the kernel uses for this atomic - * instruction. This does not work for Linux versions - * before 2.6 or (obviously) for non-Linux implementations. - * For other implementations on ARMv6 and up, use - * LDREX/SUBS/STREXEQ/LDRNE/ADDS/BNE spin-lock; for pre-ARMv6, - * use SWP-based spin-lock. - */ -#if !defined(__linux__) -#define __kernel_cmpxchg(x, y, z) NOT_IMPLEMENTED() -#else - typedef int (__kernel_cmpxchg_t)(uint32 oldVal, - uint32 newVal, - volatile uint32 *mem); -#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) -#endif -#endif // FAKE_ATOMIC -#endif // __GNUC__ -#endif // __arm__ /* * Prototypes for msft atomics. These are defined & inlined by the @@ -103,11 +73,11 @@ * have to use these. Unfortunately, we still have to use some inline asm * for the 32 bit code since the and/or/xor implementations didn't show up * untill xp or 2k3. - * + * * The declarations for the intrinsic functions were taken from ntddk.h * in the DDK. The declarations must match otherwise the 64-bit c++ * compiler will complain about second linkage of the intrinsic functions. - * We define the intrinsic using the basic types corresponding to the + * We define the intrinsic using the basic types corresponding to the * Windows typedefs. This avoids having to include windows header files * to get to the windows types. */ @@ -151,14 +121,37 @@ #endif #endif /* _MSC_VER */ +#ifdef __arm__ +# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \ + defined(__ARM_ARCH_7R__)|| defined(__ARM_ARCH_7M__) +# define VM_ARM_V7 +# else +# error Only ARMv7 extends the synchronization primitives ldrex/strex. \ + For the lower ARM version, please implement the atomic functions \ + by kernel APIs. +# endif +#endif + +/* Data Memory Barrier */ +#ifdef VM_ARM_V7 +#define dmb() __asm__ __volatile__("dmb" : : : "memory") +#endif + -/* Convert a volatile int to Atomic_uint32. */ +/* Convert a volatile uint32 to Atomic_uint32. */ static INLINE Atomic_uint32 * Atomic_VolatileToAtomic(volatile uint32 *var) { return (Atomic_uint32 *)var; } +/* Convert a volatile uint64 to Atomic_uint64. */ +static INLINE Atomic_uint64 * +Atomic_VolatileToAtomic64(volatile uint64 *var) +{ + return (Atomic_uint64 *)var; +} + /* *----------------------------------------------------------------------------- * @@ -175,7 +168,7 @@ * * Atomic_SetFence sets AtomicUseFence to the given value. * - * Atomic_Init computes and sets AtomicUseFence. + * Atomic_Init computes and sets AtomicUseFence for x86. * It does not take into account the number of processors. * * The rationale for all this complexity is that Atomic_Init @@ -247,15 +240,8 @@ "lfence\n\t" "2:\n\t" ".pushsection .patchtext\n\t" -#ifdef VMM32 - ".long 1b\n\t" - ".long 0\n\t" - ".long 2b\n\t" - ".long 0\n\t" -#else ".quad 1b\n\t" ".quad 2b\n\t" -#endif ".popsection\n\t" ::: "memory"); #else if (UNLIKELY(AtomicUseFence)) { @@ -371,37 +357,37 @@ Atomic_ReadWrite(Atomic_uint32 *var, // IN uint32 val) // IN { -#ifdef FAKE_ATOMIC - uint32 retval = var->value; - var->value = val; - return retval; -#elif defined(__GNUC__) -#ifdef __arm__ - register uint32 retval; - register volatile uint32 *mem = &(var->value); - /* XXX - ARMv5 only: for ARMv6, use LDREX/STREX/CMP/BEQ spin-lock */ - __asm__ __volatile__("swp %0, %1, [%2]" - : "=&r,&r" (retval) - : "r,0" (val), "r,r" (mem) : "memory"); - return retval; -#else // __arm__ (assume x86*) +#ifdef __GNUC__ +#ifdef VM_ARM_V7 + register volatile uint32 retVal; + register volatile uint32 res; + + dmb(); + + __asm__ __volatile__( + "1: ldrex %[retVal], [%[var]] \n\t" + "strex %[res], %[val], [%[var]] \n\t" + "teq %[res], #0 \n\t" + "bne 1b" + : [retVal] "=&r" (retVal), [res] "=&r" (res) + : [var] "r" (&var->value), [val] "r" (val) + : "memory", "cc" + ); + + dmb(); + + return retVal; +#else // __VM_ARM_V7 (assume x86*) /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "xchgl %0, %1" -# if VM_ASM_PLUS : "=r" (val), "+m" (var->value) : "0" (val) -# else - : "=r" (val), - "=m" (var->value) - : "0" (val), - "1" (var->value) -# endif ); AtomicEpilogue(); return val; -#endif // __arm__ +#endif // VM_ARM_V7 #elif defined _MSC_VER #if _MSC_VER >= 1310 return _InterlockedExchange((long *)&var->value, (long)val); @@ -415,10 +401,10 @@ // eax is the return value, this is documented to work - edward } #pragma warning(pop) -#endif +#endif // _MSC_VER >= 1310 #else #error No compiler defined for Atomic_ReadWrite -#endif +#endif // __GNUC__ } #define Atomic_ReadWrite32 Atomic_ReadWrite @@ -444,57 +430,43 @@ uint32 oldVal, // IN uint32 newVal) // IN { -#ifdef FAKE_ATOMIC - uint32 readVal = var->value; +#ifdef __GNUC__ +#ifdef VM_ARM_V7 + register uint32 retVal; + register uint32 res; - if (oldVal == readVal) { - var->value = newVal; - } - return oldVal; -#elif defined(__GNUC__) -#ifdef __arm__ - uint32 readVal; - register volatile uint32 *mem = &(var->value); + dmb(); - // loop until var not oldVal or var successfully replaced when var oldVal - do { - readVal = Atomic_Read(var); - if (oldVal != readVal) { - return readVal; - } - } while (__kernel_cmpxchg(oldVal, newVal, mem) != 0); - return oldVal; // success -#else // __arm__ (assume x86*) + __asm__ __volatile__( + "1: ldrex %[retVal], [%[var]] \n\t" + "mov %[res], #1 \n\t" + "teq %[retVal], %[oldVal] \n\t" + "strexeq %[res], %[newVal], [%[var]] \n\t" + "teq %[res], #0 \n\t" + "bne 1b" + : [retVal] "=&r" (retVal), [res] "=&r" (res) + : [var] "r" (&var->value), [oldVal] "r" (oldVal), [newVal] "r" (newVal) + : "memory", "cc" + ); + + dmb(); + + return retVal; +#else // VM_ARM_V7 (assume x86*) uint32 val; /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; cmpxchgl %2, %1" -# if VM_ASM_PLUS : "=a" (val), "+m" (var->value) : "r" (newVal), "0" (oldVal) -# else - : "=a" (val), - "=m" (var->value) - : "r" (newVal), - "0" (oldVal) - /* - * "1" (var->value): results in inconsistent constraints on gcc 2.7.2.3 - * when compiling enterprise-2.2.17-14-RH7.0-update. - * The constraint has been commented out for now. We may consider doing - * this systematically, but we need to be sure it is the right thing to - * do. However, it is also possible that the offending use of this asm - * function will be removed in the near future in which case we may - * decide to reintroduce the constraint instead. hpreg & agesen. - */ -# endif : "cc" ); AtomicEpilogue(); return val; -#endif // __arm__ +#endif // VM_ARM_V7 #elif defined _MSC_VER #if _MSC_VER >= 1310 return _InterlockedCompareExchange((long *)&var->value, @@ -586,33 +558,35 @@ Atomic_And(Atomic_uint32 *var, // IN uint32 val) // IN { -#ifdef FAKE_ATOMIC - var->value &= val; -#elif defined(__GNUC__) -#ifdef __arm__ - /* same as Atomic_FetchAndAnd without return value */ - uint32 res; - register volatile uint32 *mem = &(var->value); +#ifdef __GNUC__ +#ifdef VM_ARM_V7 + register volatile uint32 res; + register volatile uint32 tmp; - do { - res = Atomic_Read(var); - } while (__kernel_cmpxchg(res, res & val, mem) != 0); -#else // __arm__ + dmb(); + + __asm__ __volatile__( + "1: ldrex %[tmp], [%[var]] \n\t" + "and %[tmp], %[val] \n\t" + "strex %[res], %[tmp], [%[var]] \n\t" + "teq %[res], #0 \n\t" + "bne 1b" + : [res] "=&r" (res), [tmp] "=&r" (tmp) + : [var] "r" (&var->value), [val] "r" (val) + : "memory", "cc" + ); + + dmb(); +#else /* VM_ARM_V7 */ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; andl %1, %0" -# if VM_ASM_PLUS : "+m" (var->value) : "ri" (val) -# else - : "=m" (var->value) - : "ri" (val), - "0" (var->value) -# endif : "cc" ); AtomicEpilogue(); -#endif // __arm__ +#endif // VM_ARM_V7 #elif defined _MSC_VER #if defined(__x86_64__) _InterlockedAnd((long *)&var->value, (long)val); @@ -628,45 +602,6 @@ #define Atomic_And32 Atomic_And -#if defined(__x86_64__) -/* - *----------------------------------------------------------------------------- - * - * Atomic_And64 -- - * - * Atomic read, bitwise AND with a value, write. - * - * Results: - * None - * - * Side effects: - * None - * - *----------------------------------------------------------------------------- - */ - -static INLINE void -Atomic_And64(Atomic_uint64 *var, // IN - uint64 val) // IN -{ -#if defined(__GNUC__) - /* Checked against the AMD manual and GCC --hpreg */ - __asm__ __volatile__( - "lock; andq %1, %0" - : "+m" (var->value) - : "ri" (val) - : "cc" - ); - AtomicEpilogue(); -#elif defined _MSC_VER - _InterlockedAnd64((__int64 *)&var->value, (__int64)val); -#else -#error No compiler defined for Atomic_And64 -#endif -} -#endif - - /* *----------------------------------------------------------------------------- * @@ -687,33 +622,35 @@ Atomic_Or(Atomic_uint32 *var, // IN uint32 val) // IN { -#ifdef FAKE_ATOMIC - var->value |= val; -#elif defined(__GNUC__) -#ifdef __arm__ - /* same as Atomic_FetchAndOr without return value */ - uint32 res; - register volatile uint32 *mem = &(var->value); +#ifdef __GNUC__ +#ifdef VM_ARM_V7 + register volatile uint32 res; + register volatile uint32 tmp; - do { - res = Atomic_Read(var); - } while (__kernel_cmpxchg(res, res | val, mem) != 0); -#else // __arm__ + dmb(); + + __asm__ __volatile__( + "1: ldrex %[tmp], [%[var]] \n\t" + "orr %[tmp], %[val] \n\t" + "strex %[res], %[tmp], [%[var]] \n\t" + "teq %[res], #0 \n\t" + "bne 1b" + : [res] "=&r" (res), [tmp] "=&r" (tmp) + : [var] "r" (&var->value), [val] "r" (val) + : "memory", "cc" + ); + + dmb(); +#else // VM_ARM_V7 /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; orl %1, %0" -# if VM_ASM_PLUS : "+m" (var->value) : "ri" (val) -# else - : "=m" (var->value) - : "ri" (val), - "0" (var->value) -# endif : "cc" ); AtomicEpilogue(); -#endif // __arm__ +#endif // VM_ARM_V7 #elif defined _MSC_VER #if defined(__x86_64__) _InterlockedOr((long *)&var->value, (long)val); @@ -729,45 +666,6 @@ #define Atomic_Or32 Atomic_Or -#if defined(__x86_64__) -/* - *----------------------------------------------------------------------------- - * - * Atomic_Or64 -- - * - * Atomic read, bitwise OR with a value, write. - * - * Results: - * None - * - * Side effects: - * None - * - *----------------------------------------------------------------------------- - */ - -static INLINE void -Atomic_Or64(Atomic_uint64 *var, // IN - uint64 val) // IN -{ -#if defined(__GNUC__) - /* Checked against the AMD manual and GCC --hpreg */ - __asm__ __volatile__( - "lock; orq %1, %0" - : "+m" (var->value) - : "ri" (val) - : "cc" - ); - AtomicEpilogue(); -#elif defined _MSC_VER - _InterlockedOr64((__int64 *)&var->value, (__int64)val); -#else -#error No compiler defined for Atomic_Or64 -#endif -} -#endif - - /* *----------------------------------------------------------------------------- * @@ -788,32 +686,35 @@ Atomic_Xor(Atomic_uint32 *var, // IN uint32 val) // IN { -#ifdef FAKE_ATOMIC - var->value ^= val; -#elif defined(__GNUC__) -#ifdef __arm__ - uint32 res; - register volatile uint32 *mem = &(var->value); +#ifdef __GNUC__ +#ifdef VM_ARM_V7 + register volatile uint32 res; + register volatile uint32 tmp; - do { - res = Atomic_Read(var); - } while (__kernel_cmpxchg(res, res ^ val, mem) != 0); -#else // __arm__ + dmb(); + + __asm__ __volatile__( + "1: ldrex %[tmp], [%[var]] \n\t" + "eor %[tmp], %[val] \n\t" + "strex %[res], %[tmp], [%[var]] \n\t" + "teq %[res], #0 \n\t" + "bne 1b" + : [res] "=&r" (res), [tmp] "=&r" (tmp) + : [var] "r" (&var->value), [val] "r" (val) + : "memory", "cc" + ); + + dmb(); +#else // VM_ARM_V7 /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; xorl %1, %0" -# if VM_ASM_PLUS : "+m" (var->value) : "ri" (val) -# else - : "=m" (var->value) - : "ri" (val), - "0" (var->value) -# endif : "cc" ); AtomicEpilogue(); -#endif // __arm__ +#endif // VM_ARM_V7 #elif defined _MSC_VER #if defined(__x86_64__) _InterlockedXor((long *)&var->value, (long)val); @@ -888,33 +789,35 @@ Atomic_Add(Atomic_uint32 *var, // IN uint32 val) // IN { -#ifdef FAKE_ATOMIC - var->value += val; -#elif defined(__GNUC__) -#ifdef __arm__ - /* same as Atomic_FetchAndAddUnfenced without return value */ - uint32 res; - register volatile uint32 *mem = &(var->value); +#ifdef __GNUC__ +#ifdef VM_ARM_V7 + register volatile uint32 res; + register volatile uint32 tmp; - do { - res = Atomic_Read(var); - } while (__kernel_cmpxchg(res, res + val, mem) != 0); -#else // __arm__ + dmb(); + + __asm__ __volatile__( + "1: ldrex %[tmp], [%[var]] \n\t" + "add %[tmp], %[val] \n\t" + "strex %[res], %[tmp], [%[var]] \n\t" + "teq %[res], #0 \n\t" + "bne 1b" + : [res] "=&r" (res), [tmp] "=&r" (tmp) + : [var] "r" (&var->value), [val] "r" (val) + : "memory", "cc" + ); + + dmb(); +#else // VM_ARM_V7 /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; addl %1, %0" -# if VM_ASM_PLUS : "+m" (var->value) : "ri" (val) -# else - : "=m" (var->value) - : "ri" (val), - "0" (var->value) -# endif : "cc" ); AtomicEpilogue(); -#endif // __arm__ +#endif // VM_ARM_V7 #elif defined _MSC_VER #if _MSC_VER >= 1310 _InterlockedExchangeAdd((long *)&var->value, (long)val); @@ -989,32 +892,35 @@ Atomic_Sub(Atomic_uint32 *var, // IN uint32 val) // IN { -#ifdef FAKE_ATOMIC - var->value -= val; -#elif defined(__GNUC__) -#ifdef __arm__ - uint32 res; - register volatile uint32 *mem = &(var->value); +#ifdef __GNUC__ +#ifdef VM_ARM_V7 + register volatile uint32 res; + register volatile uint32 tmp; - do { - res = Atomic_Read(var); - } while (__kernel_cmpxchg(res, res - val, mem) != 0); -#else // __arm__ + dmb(); + + __asm__ __volatile__( + "1: ldrex %[tmp], [%[var]] \n\t" + "sub %[tmp], %[val] \n\t" + "strex %[res], %[tmp], [%[var]] \n\t" + "teq %[res], #0 \n\t" + "bne 1b" + : [res] "=&r" (res), [tmp] "=&r" (tmp) + : [var] "r" (&var->value), [val] "r" (val) + : "memory", "cc" + ); + + dmb(); +#else // VM_ARM_V7 /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; subl %1, %0" -# if VM_ASM_PLUS : "+m" (var->value) : "ri" (val) -# else - : "=m" (var->value) - : "ri" (val), - "0" (var->value) -# endif : "cc" ); AtomicEpilogue(); -#endif // __arm__ +#endif // VM_ARM_V7 #elif defined _MSC_VER #if _MSC_VER >= 1310 _InterlockedExchangeAdd((long *)&var->value, (long)-val); @@ -1089,24 +995,18 @@ Atomic_Inc(Atomic_uint32 *var) // IN { #ifdef __GNUC__ -#ifdef __arm__ - /* just use Atomic_Add */ +#ifdef VM_ARM_V7 Atomic_Add(var, 1); -#else // __arm__ +#else // VM_ARM_V7 /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; incl %0" -# if VM_ASM_PLUS : "+m" (var->value) : -# else - : "=m" (var->value) - : "0" (var->value) -# endif : "cc" ); AtomicEpilogue(); -#endif // __arm__ +#endif // VM_ARM_V7 #elif defined _MSC_VER #if _MSC_VER >= 1310 _InterlockedIncrement((long *)&var->value); @@ -1141,24 +1041,18 @@ Atomic_Dec(Atomic_uint32 *var) // IN { #ifdef __GNUC__ -#ifdef __arm__ - /* just use Atomic_Sub */ +#ifdef VM_ARM_V7 Atomic_Sub(var, 1); -#else // __arm__ +#else // VM_ARM_V7 /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; decl %0" -# if VM_ASM_PLUS : "+m" (var->value) : -# else - : "=m" (var->value) - : "0" (var->value) -# endif : "cc" ); AtomicEpilogue(); -#endif // __arm__ +#endif // VM_ARM_V7 #elif defined _MSC_VER #if _MSC_VER >= 1310 _InterlockedDecrement((long *)&var->value); @@ -1201,16 +1095,10 @@ { uint32 res; -#if defined(__arm__) && !defined(FAKE_ATOMIC) - register volatile uint32 *mem = &(var->value); - do { - res = Atomic_Read(var); - } while (__kernel_cmpxchg(res, res | val, mem) != 0); -#else do { res = Atomic_Read(var); } while (res != Atomic_ReadIfEqualWrite(var, res, res | val)); -#endif + return res; } @@ -1237,16 +1125,10 @@ { uint32 res; -#if defined(__arm__) && !defined(FAKE_ATOMIC) - register volatile uint32 *mem = &(var->value); - do { - res = Atomic_Read(var); - } while (__kernel_cmpxchg(res, res & val, mem) != 0); -#else do { res = Atomic_Read(var); } while (res != Atomic_ReadIfEqualWrite(var, res, res & val)); -#endif + return res; } #define Atomic_ReadOr32 Atomic_FetchAndOr @@ -1281,7 +1163,37 @@ return res; } -#endif + + +/* + *----------------------------------------------------------------------------- + * + * Atomic_ReadAnd64 -- + * + * Atomic read (returned), bitwise AND with a value, write. + * + * Results: + * The value of the variable before the operation. + * + * Side effects: + * None + * + *----------------------------------------------------------------------------- + */ + +static INLINE uint64 +Atomic_ReadAnd64(Atomic_uint64 *var, // IN + uint64 val) // IN +{ + uint64 res; + + do { + res = var->value; + } while (res != Atomic_ReadIfEqualWrite64(var, res, res & val)); + + return res; +} +#endif // __x86_64__ /* @@ -1311,39 +1223,38 @@ Atomic_FetchAndAddUnfenced(Atomic_uint32 *var, // IN uint32 val) // IN { -#ifdef FAKE_ATOMIC - uint32 res = var->value; - var->value = res + val; - return res; -#elif defined(__GNUC__) -#ifdef __arm__ - uint32 res; +#ifdef __GNUC__ +#ifdef VM_ARM_V7 + register volatile uint32 res; + register volatile uint32 retVal; - register volatile uint32 *mem = &(var->value); - do { - res = Atomic_Read(var); - } while (__kernel_cmpxchg(res, res + val, mem) != 0); + dmb(); - return res; -#else // __arm__ + __asm__ __volatile__( + "1: ldrex %[retVal], [%[var]] \n\t" + "add %[val], %[retVal] \n\t" + "strex %[res], %[val], [%[var]] \n\t" + "teq %[res], #0 \n\t" + "bne 1b" + : [res] "=&r" (res), [retVal] "=&r" (retVal) + : [var] "r" (&var->value), [val] "r" (val) + : "memory", "cc" + ); + + dmb(); + + return retVal; +#else // VM_ARM_V7 /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( -# if VM_ASM_PLUS "lock; xaddl %0, %1" : "=r" (val), "+m" (var->value) : "0" (val) : "cc" -# else - "lock; xaddl %0, (%1)" - : "=r" (val) - : "r" (&var->value), - "0" (val) - : "cc", "memory" -# endif ); return val; -#endif // __arm__ +#endif // VM_ARM_V7 #elif defined _MSC_VER #if _MSC_VER >= 1310 return _InterlockedExchangeAdd((long *)&var->value, (long)val); @@ -1391,7 +1302,7 @@ Atomic_FetchAndAdd(Atomic_uint32 *var, // IN uint32 val) // IN { -#if defined(__GNUC__) && !defined(__arm__) +#if defined(__GNUC__) && !defined(VM_ARM_V7) val = Atomic_FetchAndAddUnfenced(var, val); AtomicEpilogue(); return val; @@ -1605,9 +1516,6 @@ *----------------------------------------------------------------------------- */ -#ifdef __arm__ -#define Atomic_CMPXCHG64(x, y, z) NOT_IMPLEMENTED() -#else // __arm__ #if defined(__GNUC__) && __GNUC__ < 3 static Bool #else @@ -1617,18 +1525,11 @@ uint64 const *oldVal, // IN uint64 const *newVal) // IN { -#ifdef FAKE_ATOMIC - uint64 readVal = var->value; - - if (*oldVal == readVal) { - var->value = *newVal; - } - return (*oldVal == readVal); -#elif defined(__GNUC__) +#ifdef __GNUC__ Bool equal; /* Checked against the Intel manual and GCC --walken */ -#ifdef VMM64 +#if defined(__x86_64__) uint64 dummy; __asm__ __volatile__( "lock; cmpxchgq %3, %0" "\n\t" @@ -1640,9 +1541,9 @@ "2" (*oldVal) : "cc" ); -#else /* 32-bit version */ +#elif !defined(VM_ARM_V7) /* 32-bit version for non-ARM */ int dummy1, dummy2; -# if defined __PIC__ && !vm_x86_64 +# if defined __PIC__ /* * Rules for __asm__ statements in __PIC__ code * -------------------------------------------- @@ -1710,11 +1611,7 @@ __asm__ __volatile__( "lock; cmpxchg8b %0" "\n\t" "sete %1" -# if VM_ASM_PLUS : "+m" (*var), -# else - : "=m" (*var), -# endif "=qm" (equal), "=a" (dummy1), "=d" (dummy2) @@ -1725,14 +1622,35 @@ : "cc" ); # endif -#endif /* 32-bit version */ +#elif defined(VM_ARM_V7) + volatile uint64 tmp; + + dmb(); + + __asm__ __volatile__( + "ldrexd %[tmp], %H[tmp], [%[var]] \n\t" + "mov %[equal], #1 \n\t" + "teq %[tmp], %[oldVal] \n\t" + "teqeq %H[tmp], %H[oldVal] \n\t" + "strexdeq %[equal], %[newVal], %H[newVal], [%[var]]" + : [equal] "=&r" (equal), [tmp] "=&r" (tmp) + : [var] "r" (&var->value), [oldVal] "r" (*oldVal), [newVal] "r" (*newVal) + : "memory", "cc" + ); + + dmb(); + + return !equal; +#endif +#ifndef VM_ARM_V7 AtomicEpilogue(); return equal; +#endif // VM_ARM_V7 #elif defined _MSC_VER #if defined(__x86_64__) - return *oldVal == _InterlockedCompareExchange64((__int64 *)&var->value, - (__int64)*newVal, - (__int64)*oldVal); + return (__int64)*oldVal == _InterlockedCompareExchange64((__int64 *)&var->value, + (__int64)*newVal, + (__int64)*oldVal); #else #pragma warning(push) #pragma warning(disable : 4035) // disable no-return warning @@ -1753,9 +1671,8 @@ #endif #else #error No compiler defined for Atomic_CMPXCHG64 -#endif +#endif // !GNUC } -#endif // __arm__ /* @@ -1779,63 +1696,48 @@ uint32 oldVal, // IN uint32 newVal) // IN { -#ifdef FAKE_ATOMIC - uint32 readVal = var->value; +#ifdef __GNUC__ + Bool equal; +#ifdef VM_ARM_V7 + volatile uint64 tmp; - if (oldVal == readVal) { - var->value = newVal; - } - return (oldVal == readVal); -#elif defined(__GNUC__) -#ifdef __arm__ - register volatile uint32 *mem = &(var->value); + dmb(); - return !__kernel_cmpxchg(oldVal, newVal, mem); -#else // __arm__ - Bool equal; + __asm__ __volatile__( + "ldrex %[tmp], [%[var]] \n\t" + "mov %[equal], #1 \n\t" + "teq %[tmp], %[oldVal] \n\t" + "strexeq %[equal], %[newVal], [%[var]]" + : [equal] "=&r" (equal), [tmp] "=&r" (tmp) + : [var] "r" (&var->value), [oldVal] "r" (oldVal), [newVal] "r" (newVal) + : "memory", "cc" + ); + + dmb(); + return !equal; +#else // VM_ARM_V7 uint32 dummy; + __asm__ __volatile__( "lock; cmpxchgl %3, %0" "\n\t" "sete %1" -# if VM_ASM_PLUS : "+m" (*var), "=qm" (equal), "=a" (dummy) : "r" (newVal), "2" (oldVal) -# else - : "=m" (*var), - "=qm" (equal), - "=a" (dummy) - : /*"0" (*var), */ - "r" (newVal), - "2" (oldVal) -# endif : "cc" ); AtomicEpilogue(); return equal; -#endif // __arm__ +#endif // VM_ARM_V7 #else // defined(__GNUC__) return (Atomic_ReadIfEqualWrite(var, oldVal, newVal) == oldVal); -#endif // defined(__GNUC__) +#endif // !defined(__GNUC__) } -#ifdef __arm__ - -#define Atomic_Read64(x) NOT_IMPLEMENTED() -#define Atomic_FetchAndAdd64(x,y) NOT_IMPLEMENTED() -#define Atomic_FetchAndInc64(x) NOT_IMPLEMENTED() -#define Atomic_FetchAndDec64(x) NOT_IMPLEMENTED() -#define Atomic_Inc64(x) NOT_IMPLEMENTED() -#define Atomic_Dec64(x) NOT_IMPLEMENTED() -#define Atomic_ReadWrite64(x,y) NOT_IMPLEMENTED() -#define Atomic_Write64(x,y) NOT_IMPLEMENTED() - -#else // __arm__ - /* *----------------------------------------------------------------------------- * @@ -1855,11 +1757,22 @@ static INLINE uint64 Atomic_Read64(Atomic_uint64 const *var) // IN { -#if defined(FAKE_ATOMIC) - return var->value; -#elif defined(__x86_64__) - return var->value; -#elif defined(__GNUC__) && defined(__i386__) /* GCC on x86 */ +#if defined(__GNUC__) && defined(__x86_64__) + uint64 value; + +#ifdef VMM + ASSERT((uintptr_t)var % 8 == 0); +#endif + /* + * Use asm to ensure we emit a single load. + */ + __asm__ __volatile__( + "movq %1, %0" + : "=r" (value) + : "m" (var->value) + ); + return value; +#elif defined(__GNUC__) && defined(__i386__) uint64 value; /* * Since cmpxchg8b will replace the contents of EDX:EAX with the @@ -1880,7 +1793,16 @@ ); AtomicEpilogue(); return value; -#elif defined _MSC_VER /* MSC (assume on x86 for now) */ +#elif defined (_MSC_VER) && defined(__x86_64__) + /* + * Microsoft docs guarantee "Simple reads and writes to properly + * aligned 64-bit variables are atomic on 64-bit Windows." + * http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx + * + * XXX Verify that value is properly aligned. Bug 61315. + */ + return var->value; +#elif defined (_MSC_VER) && defined(__i386__) # pragma warning(push) # pragma warning(disable : 4035) // disable no-return warning { @@ -1891,8 +1813,16 @@ // edx:eax is the return value; this is documented to work. --mann } # pragma warning(pop) -#else -# error No compiler defined for Atomic_Read64 +#elif defined(__GNUC__) && defined (VM_ARM_V7) + uint64 value; + + __asm__ __volatile__( + "ldrexd %[value], %H[value], [%[var]] \n\t" + : [value] "=&r" (value) + : [var] "r" (&var->value) + ); + + return value; #endif } @@ -1900,6 +1830,35 @@ /* *---------------------------------------------------------------------- * + * Atomic_ReadUnaligned64 -- + * + * Atomically read a 64 bit integer, possibly misaligned. + * This function can be *very* expensive, costing over 50 kcycles + * on Nehalem. + * + * Note that "var" needs to be writable, even though it will not + * be modified. + * + * Results: + * The value of the atomic variable. + * + * Side effects: + * None + * + *---------------------------------------------------------------------- + */ +#if defined(__x86_64__) +static INLINE uint64 +Atomic_ReadUnaligned64(Atomic_uint64 const *var) +{ + return Atomic_ReadIfEqualWrite64((Atomic_uint64*)var, 0, 0); +} +#endif + + +/* + *---------------------------------------------------------------------- + * * Atomic_FetchAndAdd64 -- * * Atomically adds a 64-bit integer to another @@ -2129,6 +2088,10 @@ { #if defined(__x86_64__) #if defined(__GNUC__) + +#ifdef VMM + ASSERT((uintptr_t)var % 8 == 0); +#endif /* * There is no move instruction for 64-bit immediate to memory, so unless * the immediate value fits in 32-bit (i.e. can be sign-extended), GCC @@ -2146,6 +2109,8 @@ * Microsoft docs guarantee "Simple reads and writes to properly aligned * 64-bit variables are atomic on 64-bit Windows." * http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx + * + * XXX Verify that value is properly aligned. Bug 61315. */ var->value = val; @@ -2156,7 +2121,98 @@ (void)Atomic_ReadWrite64(var, val); #endif } -#endif // __arm__ + + +/* + *----------------------------------------------------------------------------- + * + * Atomic_Or64 -- + * + * Atomic read, bitwise OR with a 64-bit value, write. + * + * Results: + * None + * + * Side effects: + * None + * + *----------------------------------------------------------------------------- + */ + +static INLINE void +Atomic_Or64(Atomic_uint64 *var, // IN + uint64 val) // IN +{ +#if defined(__x86_64__) +#if defined(__GNUC__) + /* Checked against the AMD manual and GCC --hpreg */ + __asm__ __volatile__( + "lock; orq %1, %0" + : "+m" (var->value) + : "ri" (val) + : "cc" + ); + AtomicEpilogue(); +#elif defined _MSC_VER + _InterlockedOr64((__int64 *)&var->value, (__int64)val); +#else +#error No compiler defined for Atomic_Or64 +#endif +#else // __x86_64__ + uint64 oldVal; + uint64 newVal; + do { + oldVal = var->value; + newVal = oldVal | val; + } while (!Atomic_CMPXCHG64(var, &oldVal, &newVal)); +#endif +} + + +/* + *----------------------------------------------------------------------------- + * + * Atomic_And64 -- + * + * Atomic read, bitwise AND with a 64-bit value, write. + * + * Results: + * None + * + * Side effects: + * None + * + *----------------------------------------------------------------------------- + */ + +static INLINE void +Atomic_And64(Atomic_uint64 *var, // IN + uint64 val) // IN +{ +#if defined(__x86_64__) +#if defined(__GNUC__) + /* Checked against the AMD manual and GCC --hpreg */ + __asm__ __volatile__( + "lock; andq %1, %0" + : "+m" (var->value) + : "ri" (val) + : "cc" + ); + AtomicEpilogue(); +#elif defined _MSC_VER + _InterlockedAnd64((__int64 *)&var->value, (__int64)val); +#else +#error No compiler defined for Atomic_And64 +#endif +#else // __x86_64__ + uint64 oldVal; + uint64 newVal; + do { + oldVal = var->value; + newVal = oldVal & val; + } while (!Atomic_CMPXCHG64(var, &oldVal, &newVal)); +#endif +} /* --- source-7.1.5/vmnet-only/vm_basic_asm.h 2011-09-23 22:05:45.000000000 -0400 +++ patched/vmnet-only/vm_basic_asm.h 2011-11-14 02:16:55.000000000 -0500 @@ -1,5 +1,5 @@ /********************************************************* - * Copyright (C) 2003 VMware, Inc. All rights reserved. + * Copyright (C) 2003-2011 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -28,10 +28,9 @@ #define _VM_BASIC_ASM_H_ #define INCLUDE_ALLOW_USERLEVEL -#define INCLUDE_ALLOW_VMMEXT + #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON -#define INCLUDE_ALLOW_VMNIXMOD #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_DISTRIBUTE @@ -40,7 +39,6 @@ #include "includeCheck.h" #include "vm_basic_types.h" -#include "x86cpuid.h" #if defined VM_X86_64 #include "vm_basic_asm_x86_64.h" @@ -48,7 +46,6 @@ #include "vm_basic_asm_x86.h" #endif - /* * x86-64 windows doesn't support inline asm so we have to use these * intrinsic functions defined in the compiler. Not all of these are well @@ -70,7 +67,7 @@ /* * It seems x86 & x86-64 windows still implements these intrinsic * functions. The documentation for the x86-64 suggest the - * __inbyte/__outbyte intrinsics eventhough the _in/_out work fine and + * __inbyte/__outbyte intrinsics even though the _in/_out work fine and * __inbyte/__outbyte aren't supported on x86. */ int _inp(unsigned short); @@ -81,28 +78,52 @@ unsigned short _outpw(unsigned short, unsigned short); unsigned long _outpd(uint16, unsigned long); #pragma intrinsic(_inp, _inpw, _inpd, _outp, _outpw, _outpw, _outpd) + +/* + * Prevents compiler from re-ordering reads, writes and reads&writes. + * These functions do not add any instructions thus only affect + * the compiler ordering. + * + * See: + * `Lockless Programming Considerations for Xbox 360 and Microsoft Windows' + * http://msdn.microsoft.com/en-us/library/bb310595(VS.85).aspx + */ +void _ReadBarrier(void); +void _WriteBarrier(void); void _ReadWriteBarrier(void); -#pragma intrinsic(_ReadWriteBarrier) +#pragma intrinsic(_ReadBarrier, _WriteBarrier, _ReadWriteBarrier) + +void _mm_mfence(void); +void _mm_lfence(void); +#pragma intrinsic(_mm_mfence, _mm_lfence) #ifdef VM_X86_64 /* * intrinsic functions only supported by x86-64 windows as of 2k3sp1 */ -void __cpuid(unsigned int*, unsigned int); unsigned __int64 __rdtsc(void); -void __stosw(unsigned short*, unsigned short, size_t); -void __stosd(unsigned long*, unsigned long, size_t); -#pragma intrinsic(__cpuid, __rdtsc, __stosw, __stosd) - -/* - * intrinsic functions supported by x86-64 windows and newer x86 - * compilers (13.01.2035 for _BitScanForward). - */ -unsigned char _BitScanForward(unsigned long*, unsigned long); -void _mm_pause(void); -#pragma intrinsic(_BitScanForward, _mm_pause) +void __stosw(unsigned short *, unsigned short, size_t); +void __stosd(unsigned long *, unsigned long, size_t); +void _mm_pause(void); +#pragma intrinsic(__rdtsc, __stosw, __stosd, _mm_pause) + +unsigned char _BitScanForward64(unsigned long *, unsigned __int64); +unsigned char _BitScanReverse64(unsigned long *, unsigned __int64); +#pragma intrinsic(_BitScanForward64, _BitScanReverse64) #endif /* VM_X86_64 */ +unsigned char _BitScanForward(unsigned long *, unsigned long); +unsigned char _BitScanReverse(unsigned long *, unsigned long); +#pragma intrinsic(_BitScanForward, _BitScanReverse) + +unsigned char _bittestandset(long *, long); +unsigned char _bittestandreset(long *, long); +#pragma intrinsic(_bittestandset, _bittestandreset) +#ifdef VM_X86_64 +unsigned char _bittestandset64(__int64 *, __int64); +unsigned char _bittestandreset64(__int64 *, __int64); +#pragma intrinsic(_bittestandset64, _bittestandreset64) +#endif /* VM_X86_64 */ #ifdef __cplusplus } #endif @@ -161,155 +182,9 @@ #define OUTW(port, val) __GCC_OUT(w, w, port, val) #define OUT32(port, val) __GCC_OUT(l, , port, val) - #define GET_CURRENT_EIP(_eip) \ __asm__ __volatile("call 0\n\tpopl %0" : "=r" (_eip): ); - -/* - * Checked against the Intel manual and GCC --hpreg - * - * Need __volatile__ and "memory" since CPUID has a synchronizing effect. - * The CPUID may also change at runtime (APIC flag, etc). - * - */ - -/* - * %ebx is reserved on i386 PIC. Apple's gcc-5493 (gcc 4.0) compiling - * for x86_64 incorrectly errors out saying %ebx is reserved. This is - * Apple bug 7304232. - */ -#if vm_x86_64 ? (defined __APPLE_CC__ && __APPLE_CC__ == 5493) : defined __PIC__ -#if vm_x86_64 -/* - * Note that this generates movq %rbx,%rbx; cpuid; xchgq %rbx,%rbx ... - * Unfortunately Apple's assembler does not have .ifnes, and I cannot - * figure out how to do that with .if. If we ever enable this code - * on other 64bit systems, both movq & xchgq should be surrounded by - * .ifnes \"%%rbx\", \"%q1\" & .endif - */ -#define VM_CPUID_BLOCK "movq %%rbx, %q1\n\t" \ - "cpuid\n\t" \ - "xchgq %%rbx, %q1\n\t" -#define VM_EBX_OUT(reg) "=&r"(reg) -#else -#define VM_CPUID_BLOCK "movl %%ebx, %1\n\t" \ - "cpuid\n\t" \ - "xchgl %%ebx, %1\n\t" -#define VM_EBX_OUT(reg) "=&rm"(reg) -#endif -#else -#define VM_CPUID_BLOCK "cpuid" -#define VM_EBX_OUT(reg) "=b"(reg) -#endif - -static INLINE void -__GET_CPUID(int eax, // IN - CPUIDRegs *regs) // OUT -{ - __asm__ __volatile__( - VM_CPUID_BLOCK - : "=a" (regs->eax), VM_EBX_OUT(regs->ebx), "=c" (regs->ecx), "=d" (regs->edx) - : "a" (eax) - : "memory" - ); -} - -static INLINE void -__GET_CPUID2(int eax, // IN - int ecx, // IN - CPUIDRegs *regs) // OUT -{ - __asm__ __volatile__( - VM_CPUID_BLOCK - : "=a" (regs->eax), VM_EBX_OUT(regs->ebx), "=c" (regs->ecx), "=d" (regs->edx) - : "a" (eax), "c" (ecx) - : "memory" - ); -} - -static INLINE uint32 -__GET_EAX_FROM_CPUID(int eax) // IN -{ - uint32 ebx; - - __asm__ __volatile__( - VM_CPUID_BLOCK - : "=a" (eax), VM_EBX_OUT(ebx) - : "a" (eax) - : "memory", "%ecx", "%edx" - ); - - return eax; -} - -static INLINE uint32 -__GET_EBX_FROM_CPUID(int eax) // IN -{ - uint32 ebx; - - __asm__ __volatile__( - VM_CPUID_BLOCK - : "=a" (eax), VM_EBX_OUT(ebx) - : "a" (eax) - : "memory", "%ecx", "%edx" - ); - - return ebx; -} - -static INLINE uint32 -__GET_ECX_FROM_CPUID(int eax) // IN -{ - uint32 ecx; - uint32 ebx; - - __asm__ __volatile__( - VM_CPUID_BLOCK - : "=a" (eax), VM_EBX_OUT(ebx), "=c" (ecx) - : "a" (eax) - : "memory", "%edx" - ); - - return ecx; -} - -static INLINE uint32 -__GET_EDX_FROM_CPUID(int eax) // IN -{ - uint32 edx; - uint32 ebx; - - __asm__ __volatile__( - VM_CPUID_BLOCK - : "=a" (eax), VM_EBX_OUT(ebx), "=d" (edx) - : "a" (eax) - : "memory", "%ecx" - ); - - return edx; -} - - -static INLINE uint32 -__GET_EAX_FROM_CPUID4(int ecx) // IN -{ - uint32 eax; - uint32 ebx; - - __asm__ __volatile__( - VM_CPUID_BLOCK - : "=a" (eax), VM_EBX_OUT(ebx), "=c" (ecx) - : "a" (4), "c" (ecx) - : "memory", "%edx" - ); - - return eax; -} - -#undef VM_CPUID_BLOCK -#undef VM_EBX_OUT - #endif // x86* #elif defined(_MSC_VER) // } { @@ -354,266 +229,290 @@ __asm pop eax \ __asm mov _eip, eax \ } while (0) -#endif +#endif // VM_X86_64 -static INLINE void -__GET_CPUID(int input, CPUIDRegs *regs) -{ -#ifdef VM_X86_64 - __cpuid((unsigned int *)regs, input); -#else - __asm push esi - __asm push ebx - __asm push ecx - __asm push edx - - __asm mov eax, input - __asm mov esi, regs - __asm _emit 0x0f __asm _emit 0xa2 - __asm mov 0x0[esi], eax - __asm mov 0x4[esi], ebx - __asm mov 0x8[esi], ecx - __asm mov 0xC[esi], edx - - __asm pop edx - __asm pop ecx - __asm pop ebx - __asm pop esi -#endif -} +#else // } { +#error +#endif // } -#ifdef VM_X86_64 +/* Sequence recommended by Intel for the Pentium 4. */ +#define INTEL_MICROCODE_VERSION() ( \ + __SET_MSR(MSR_BIOS_SIGN_ID, 0), \ + __GET_EAX_FROM_CPUID(1), \ + __GET_MSR(MSR_BIOS_SIGN_ID)) /* - * No inline assembly in Win64. Implemented in bora/lib/user in - * cpuidMasm64.asm. + * Locate most and least significant bit set functions. Use our own name + * space to avoid namespace collisions. The new names follow a pattern, + *