diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 5a274b99f2992..d2fc1215f4ff5 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -192,5 +192,6 @@ source "drivers/net/ethernet/wangxun/Kconfig" source "drivers/net/ethernet/wiznet/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" +source "drivers/net/ethernet/guangruntong/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 0d872d4efcd10..147f18cdbdfbb 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -104,3 +104,4 @@ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ +obj-$(CONFIG_NET_VENDOR_GRT) += guangruntong/ diff --git a/drivers/net/ethernet/guangruntong/Kconfig b/drivers/net/ethernet/guangruntong/Kconfig new file mode 100644 index 0000000000000..cc6a729b78da0 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/Kconfig @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Guangruntong network device configuration +# + +config NET_VENDOR_GRT + bool "Guanruntong devices" + depends on PCI + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Intel cards. If you say Y, you will be asked for + your specific card in the following questions. + +config GRTNIC + tristate "Guangruntong PCI Express adapters support" + depends on NET_VENDOR_GRT + help + This driver supports Guangruntong PCI Express family of + adapters. diff --git a/drivers/net/ethernet/guangruntong/Makefile b/drivers/net/ethernet/guangruntong/Makefile new file mode 100644 index 0000000000000..d806bde47f16d --- /dev/null +++ b/drivers/net/ethernet/guangruntong/Makefile @@ -0,0 +1,245 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 1999 - 2021 Intel Corporation. + +ifneq ($(KERNELRELEASE),) +# kbuild part of makefile +# +# Makefile for the Intel(R) 10GbE PCI Express Linux Network Driver +# + +obj-$(CONFIG_GRTNIC_XGB) += grtnic_xgb.o + +define grtnic_xgb-y + grtnic_main.o + grtnic_netdev.o + grtnic_macphy.o + grtnic_param.o + grtnic_nvm.o + grtnic_ethtool.o + grtnic_proc.o +endef +grtnic_xgb-y := $(strip ${grtnic_xgb-y}) +grtnic_xgb-y += kcompat.o + +else # ifneq($(KERNELRELEASE),) +# normal makefile + +DRIVER := grtnic_xgb + +# Check that kernel version is at least 2.6.0, since we don't support 2.4.x +# kernels with the grtnic driver. We can't use minimum_kver_check since SLES 10 +# SP4's Make has a bug which causes $(eval) inside an ifeq conditional to error +# out. This was fixed in Make 3.81, but SLES 10 SP4 does not have a fix for +# this yet. +ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,2,6,0) ]; echo "$?")) + $(warning *** Aborting the build.) + $(error This driver is not supported on kernel versions older than 2.6.0) +endif + +###################### +# Kernel Build Macro # +###################### + +# customized kernelbuild function +# +# ${1} is the kernel build target +# ${2} may contain extra rules to pass to kernelbuild macro +# +# We customize the kernelbuild target in order to provide our hack to disable +# CONFIG_PTP_1588_CLOCK support should -DNO_PTP_SUPPORT be defined in the extra +# cflags given on the command line. +devkernelbuild = $(call kernelbuild,$(if $(filter -DNO_PTP_SUPPORT,${EXTRA_CFLAGS}),CONFIG_PTP_1588_CLOCK=n) ${2},${1}) + +# Command to update initramfs or display a warning message +ifeq (${cmd_initrd},) +define cmd_initramfs +@echo "Unable to update initramfs. You may need to do this manually." +endef +else +define cmd_initramfs +@echo "Updating initramfs..." +-@$(call cmd_initrd) +endef +endif + +############### +# Build rules # +############### + +# Standard compilation, with regular output +default: + @+$(call devkernelbuild,modules) + +# Noisy output, for extra debugging +noisy: + @+$(call devkernelbuild,modules,V=1) + +# Silence any output generated +silent: + @+$(call devkernelbuild,modules,>/dev/null) + +# Enable higher warning level +checkwarnings: clean + @+$(call devkernelbuild,modules,W=1) + +# Run sparse static analyzer +sparse: clean + @+$(call devkernelbuild,modules,C=2 CF="-D__CHECK_ENDIAN__ -Wbitwise -Wcontext") + +# Run coccicheck static analyzer +ccc: clean + @+$(call devkernelbuild,modules,coccicheck MODE=report)) + +# Build manfiles +manfile: + @gzip -c ../${DRIVER}.${MANSECTION} > ${DRIVER}.${MANSECTION}.gz + +# Clean the module subdirectories +clean: + @+$(call devkernelbuild,clean) + @-rm -rf *.${MANSECTION}.gz *.ko + +kylin: EXTRA_CFLAGS += -DKYLIN_KERNEL +kylin: + @echo "Compile for Kylin kernel..." + @+$(call devkernelbuild,modules) + +kylin44: EXTRA_CFLAGS += -DKYLIN_KERNEL44 +kylin44: + @echo "Compile for Kylin4.4 kernel..." + @+$(call devkernelbuild,modules) + +uos: EXTRA_CFLAGS += -DUOS_KERNEL +uos: + @echo "Compile for Uos kernel..." + @+$(call devkernelbuild,modules) + +euler: EXTRA_CFLAGS += -DEULER_KERNEL +euler: + @echo "Compile for OpenEuler kernel..." + @+$(call devkernelbuild,modules) + + +# Install the modules and manpage +mandocs_install: manfile + @echo "Copying manpages..." + @install -D -m 644 ${DRIVER}.${MANSECTION}.gz ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz + +# Install kernel module files. This target is called by the RPM specfile +# when generating binary RPMs, and is not expected to modify files outside +# of the build root. Thus, it must not update initramfs, or run depmod. +modules_install: default + @echo "Installing modules..." + @+$(call devkernelbuild,modules_install) + +kylin_modules_install: kylin + @echo "Installing modules..." + @+$(call devkernelbuild,modules_install) + +kylin44_modules_install: kylin44 + @echo "Installing modules..." + @+$(call devkernelbuild,modules_install) + +uos_modules_install: uos + @echo "Installing modules..." + @+$(call devkernelbuild,modules_install) + +euler_modules_install: euler + @echo "Installing modules..." + @+$(call devkernelbuild,modules_install) + +# After installing all the files, perform necessary work to ensure the +# system will use the new modules. This includes running depmod to update +# module dependencies and updating the initramfs image in case the module is +# loaded during early boot. +install: modules_install #mandocs_install + @echo "Running depmod..." + $(call cmd_depmod) + $(call cmd_initramfs) + +kylin_install: kylin_modules_install #mandocs_install + @echo "Running depmod..." + $(call cmd_depmod) + $(call cmd_initramfs) + +kylin44_install: kylin44_modules_install #mandocs_install + @echo "Running depmod..." + $(call cmd_depmod) + $(call cmd_initramfs) + +uos_install: uos_modules_install #mandocs_install + @echo "Running depmod..." + $(call cmd_depmod) + $(call cmd_initramfs) + +euler_install: euler_modules_install #mandocs_install + @echo "Running depmod..." + $(call cmd_depmod) + $(call cmd_initramfs) + +mandocs_uninstall: + if [ -e ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz ] ; then \ + rm -f ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz ; \ + fi; + +# Remove installed module files. This target is called by the RPM specfile +# when generating binary RPMs, and is not expected to modify files outside +# of the build root. Thus, it must not update the initramfs image or run +# depmod. +modules_uninstall: + rm -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_MOD_DIR}/${DRIVER}.ko; + +# After uninstalling all the files, perform necessary work to restore the +# system back to using the default kernel modules. This includes running +# depmod to update module dependencies and updating the initramfs image. +uninstall: modules_uninstall + $(call cmd_depmod) + $(call cmd_initramfs) + +######## +# Help # +######## +help: + @echo 'Build targets:' + @echo ' default - Build module(s) with standard verbosity' + @echo ' kylin - Build module(s) for kylin Kernel' + @echo ' kylin44 - Build module(s) for kylin 4.4 Kernel' + @echo ' uos - Build module(s) for uos Kernel' + @echo ' euler - Build module(s) for euler Kernel' + @echo ' noisy - Build module(s) with V=1 verbosity -- very noisy' + @echo ' silent - Build module(s), squelching all output' + @echo '' + @echo 'Static Analysis:' + @echo ' checkwarnings - Clean, then build module(s) with W=1 warnings enabled' + @echo ' sparse - Clean, then check module(s) using sparse' + @echo ' ccc - Clean, then check module(s) using coccicheck' + @echo '' + @echo 'Cleaning targets:' + @echo ' clean - Clean files generated by kernel module build' + @echo '' + @echo 'Other targets:' + @echo ' manfile - Generate a gzipped manpage' + @echo ' modules_install - Install the module(s) only' + @echo ' mandocs_install - Install the manpage only' + @echo ' install - Build then install the module(s) and manpage, and update initramfs' + @echo ' kylin_install - Build then install kylin module(s) and update initramfs' + @echo ' kylin44_install - Build then install kylin 4.4 module(s) and update initramfs' + @echo ' uos_install - Build then install uos module(s) and update initramfs' + @echo ' euler_install - Build then install OpenEuler module(s) and update initramfs' + @echo ' modules_uninstall - Uninstall the module(s) only' + @echo ' mandocs_uninstall - Uninstall the manpage only' + @echo ' uninstall - Uninstall the module(s) and manpage, and update initramfs' + @echo ' help - Display this help message' + @echo '' + @echo 'Variables:' + @echo ' LINUX_VERSION - Debug tool to force kernel LINUX_VERSION_CODE. Use at your own risk.' + @echo ' W=N - Kernel variable for setting warning levels' + @echo ' V=N - Kernel variable for setting output verbosity' + @echo ' INSTALL_MOD_PATH - Add prefix for the module and manpage installation path' + @echo ' INSTALL_MOD_DIR - Use module directory other than updates/drivers/net/ethernet/intel/${DRIVER}' + @echo ' Other variables may be available for tuning make process, see' + @echo ' Kernel Kbuild documentation for more information' + +.PHONY: default noisy clean manfile silent sparse ccc install uninstall help + +endif # ifneq($(KERNELRELEASE),) diff --git a/drivers/net/ethernet/guangruntong/auxiliary_compat.h b/drivers/net/ethernet/guangruntong/auxiliary_compat.h new file mode 100644 index 0000000000000..d8b9c9e1dc444 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/auxiliary_compat.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2024 Intel Corporation */ + +#ifndef _AUXILIARY_COMPAT_H_ +#define _AUXILIARY_COMPAT_H_ + +/* This file contains only the minimal set of kernel compatibility backports + * required by auxiliary.c to build. It is similar to the kcompat.h file, but + * reduced to an absolute minimum in order to reduce the risk of generating + * different kernel symbol CRC values at build time. + * + * For a detailed discussion of kernel symbol CRCs, please read: + * + * Documentation/kernel-symbol-crc.rst + * + * Include only the minimum required kernel compatibility implementations from + * kcompat_generated_defs.h and kcompat_impl.h. If a new fix is required, + * please first implement it as part of the kcompat project before porting it + * to this file. + * + * The current list of required implementations is: + * + * NEED_BUS_FIND_DEVICE_CONST_DATA + * NEED_DEV_PM_DOMAIN_ATTACH + * NEED_DEV_PM_DOMAIN_DETACH + * + * Note that kernels since v5.11 support auxiliary as a built-in config + * option. Using this is always preferred to using an out-of-tree module when + * available. + */ + +#include "kcompat_generated_defs.h" + +/**************************** + * Backport implementations * + ****************************/ + +#ifdef NEED_BUS_FIND_DEVICE_CONST_DATA +/* NEED_BUS_FIND_DEVICE_CONST_DATA + * + * bus_find_device() was updated in upstream commit 418e3ea157ef + * ("bus_find_device: Unify the match callback with class_find_device") + * to take a const void *data parameter and also have the match() function + * passed in take a const void *data parameter. + * + * all of the kcompat below makes it so the caller can always just call + * bus_find_device() according to the upstream kernel without having to worry + * about const vs. non-const arguments. + */ +struct _kc_bus_find_device_custom_data { + const void *real_data; + int (*real_match)(struct device *dev, const void *data); +}; + +static inline int _kc_bus_find_device_wrapped_match(struct device *dev, void *data) +{ + struct _kc_bus_find_device_custom_data *custom_data = data; + + return custom_data->real_match(dev, custom_data->real_data); +} + +static inline struct device * +_kc_bus_find_device(struct bus_type *type, struct device *start, + const void *data, + int (*match)(struct device *dev, const void *data)) +{ + struct _kc_bus_find_device_custom_data custom_data = {}; + + custom_data.real_data = data; + custom_data.real_match = match; + + return bus_find_device(type, start, &custom_data, + _kc_bus_find_device_wrapped_match); +} + +/* force callers of bus_find_device() to call _kc_bus_find_device() on kernels + * where NEED_BUS_FIND_DEVICE_CONST_DATA is defined + */ +#define bus_find_device(type, start, data, match) \ + _kc_bus_find_device(type, start, data, match) +#endif /* NEED_BUS_FIND_DEVICE_CONST_DATA */ + +#if defined(NEED_DEV_PM_DOMAIN_ATTACH) && defined(NEED_DEV_PM_DOMAIN_DETACH) +#include +/* NEED_DEV_PM_DOMAIN_ATTACH and NEED_DEV_PM_DOMAIN_DETACH + * + * dev_pm_domain_attach() and dev_pm_domain_detach() were added in upstream + * commit 46420dd73b80 ("PM / Domains: Add APIs to attach/detach a PM domain for + * a device"). To support older kernels and OSVs that don't have these API, just + * implement how older versions worked by directly calling acpi_dev_pm_attach() + * and acpi_dev_pm_detach(). + */ +static inline int dev_pm_domain_attach(struct device *dev, bool power_on) +{ + if (dev->pm_domain) + return 0; + + if (ACPI_HANDLE(dev)) + return acpi_dev_pm_attach(dev, true); + + return 0; +} + +static inline void dev_pm_domain_detach(struct device *dev, bool power_off) +{ + if (ACPI_HANDLE(dev)) + acpi_dev_pm_detach(dev, true); +} +#else /* NEED_DEV_PM_DOMAIN_ATTACH && NEED_DEV_PM_DOMAIN_DETACH */ +/* it doesn't make sense to compat only one of these functions, and it is + * likely either a failure in kcompat-generator.sh or a failed distribution + * backport if this occurs. Don't try to support it. + */ +#ifdef NEED_DEV_PM_DOMAIN_ATTACH +#error "NEED_DEV_PM_DOMAIN_ATTACH defined but NEED_DEV_PM_DOMAIN_DETACH not defined???" +#endif /* NEED_DEV_PM_DOMAIN_ATTACH */ +#ifdef NEED_DEV_PM_DOMAIN_DETACH +#error "NEED_DEV_PM_DOMAIN_DETACH defined but NEED_DEV_PM_DOMAIN_ATTACH not defined???" +#endif /* NEED_DEV_PM_DOMAIN_DETACH */ +#endif /* NEED_DEV_PM_DOMAIN_ATTACH && NEED_DEV_PM_DOMAIN_DETACH */ + +#endif /* _AUXILIARY_COMPAT_H_ */ diff --git a/drivers/net/ethernet/guangruntong/common.mk b/drivers/net/ethernet/guangruntong/common.mk new file mode 100755 index 0000000000000..f8d9e3c8d6f31 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/common.mk @@ -0,0 +1,462 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 1999 - 2024 Intel Corporation + +# +# common Makefile rules useful for out-of-tree Linux driver builds +# +# Usage: include common.mk +# +# After including, you probably want to add a minimum_kver_check call +# +# Required Variables: +# DRIVER +# -- Set to the lowercase driver name + +##################### +# Helpful functions # +##################### + +SHELL := $(shell which bash) +src ?= $(dir $(realpath $(lastword $(MAKEFILE_LIST)))) +readlink = $(shell readlink -f ${1}) + +# helper functions for converting kernel version to version codes +get_kver = $(or $(word ${2},$(subst ., ,${1})),0) +get_kvercode = $(shell [ "${1}" -ge 0 -a "${1}" -le 255 2>/dev/null ] && \ + [ "${2}" -ge 0 -a "${2}" -le 255 2>/dev/null ] && \ + [ "${3}" -ge 0 -a "${3}" -le 255 2>/dev/null ] && \ + printf %d $$(( ( ${1} << 16 ) + ( ${2} << 8 ) + ( ${3} ) )) ) + +################ +# depmod Macro # +################ + +cmd_depmod = /sbin/depmod $(if ${SYSTEM_MAP_FILE},-e -F ${SYSTEM_MAP_FILE}) \ + $(if $(strip ${INSTALL_MOD_PATH}),-b ${INSTALL_MOD_PATH}) \ + -a ${KVER} + +##################### +# Environment tests # +##################### + +DRIVER_UPPERCASE := $(shell echo ${DRIVER} | tr "[:lower:]" "[:upper:]") + +ifeq (,${BUILD_KERNEL}) +BUILD_KERNEL=$(shell uname -r) +endif + +# Kernel Search Path +# All the places we look for kernel source +KSP := /lib/modules/${BUILD_KERNEL}/source \ + /lib/modules/${BUILD_KERNEL}/build \ + /usr/src/linux-${BUILD_KERNEL} \ + /usr/src/kernel-headers-${BUILD_KERNEL} \ + /usr/src/kernel-source-${BUILD_KERNEL} \ + /usr/src/linux \ + /usr/src/kernels/${BUILD_KERNEL} \ + /usr/src/kernels + +# prune the list down to only values that exist and have an include/linux +# sub-directory. We can't use include/config because some older kernels don't +# have this. +test_dir = $(shell [ -e ${dir}/include/linux ] && echo ${dir}) +KSP := $(foreach dir, ${KSP}, ${test_dir}) + +# we will use this first valid entry in the search path +ifeq (,${KSRC}) + KSRC := $(firstword ${KSP}) +endif + +ifeq (,${KSRC}) + $(warning *** Kernel header files not in any of the expected locations.) + $(warning *** Install the appropriate kernel development package, e.g.) + $(error kernel-devel, for building kernel modules and try again) +else +ifeq (/lib/modules/${BUILD_KERNEL}/source, ${KSRC}) + KOBJ := /lib/modules/${BUILD_KERNEL}/build +else + KOBJ := ${KSRC} +endif +endif + +SCRIPT_PATH := ${KSRC}/scripts +info_signed_modules = + +ifeq (,${SCRIPT_PATH}) + info_signed_modules += echo "*** Could not find sign-file script. Cannot sign driver." ; +else + SIGN_FILE_EXISTS := $(or $(and $(wildcard $(SCRIPT_PATH)/sign-file),1),) + PRIV_KEY_EXISTS := $(or $(and $(wildcard intel-linux-key.key),1),) + PUB_KEY_EXISTS := $(or $(and $(wildcard intel-linux-key.crt),1),) +ifneq ($(and $(SIGN_FILE_EXISTS),$(PRIV_KEY_EXISTS),$(PUB_KEY_EXISTS)),) + info_signed_modules += \ + echo "*** Is sign-file present: ${SIGN_FILE_EXISTS}" ; \ + echo "*** Is private key present: ${PRIV_KEY_EXISTS}" ; \ + echo "*** Is public key present: ${PUB_KEY_EXISTS}" ; + info_signed_modules += echo "*** All files are present, signing driver." ; + sign_driver = $(shell ${SCRIPT_PATH}/sign-file sha256 intel-linux-key.key \ + intel-linux-key.crt ${DRIVER}.ko) +else + info_signed_modules += echo "*** Files are missing, cannot sign driver." ; + sign_driver = +endif +endif + +# Version file Search Path +VSP := ${KOBJ}/include/generated/utsrelease.h \ + ${KOBJ}/include/linux/utsrelease.h \ + ${KOBJ}/include/linux/version.h \ + ${KOBJ}/include/generated/uapi/linux/version.h \ + /boot/vmlinuz.version.h + +# Config file Search Path +CSP := ${KOBJ}/include/generated/autoconf.h \ + ${KOBJ}/include/linux/autoconf.h \ + /boot/vmlinuz.autoconf.h + +# System.map Search Path (for depmod) +MSP := ${KSRC}/System.map \ + /usr/lib/debug/boot/System.map-${BUILD_KERNEL} \ + /boot/System.map-${BUILD_KERNEL} + +# prune the lists down to only files that exist +test_file = $(shell [ -f ${1} ] && echo ${1}) +VSP := $(foreach file, ${VSP}, $(call test_file,${file})) +CSP := $(foreach file, ${CSP}, $(call test_file,${file})) +MSP := $(foreach file, ${MSP}, $(call test_file,${file})) + + +# and use the first valid entry in the Search Paths +ifeq (,${VERSION_FILE}) + VERSION_FILE := $(firstword ${VSP}) +endif + +ifeq (,${CONFIG_FILE}) + CONFIG_FILE := $(firstword ${CSP}) +endif + +ifeq (,${SYSTEM_MAP_FILE}) + SYSTEM_MAP_FILE := $(firstword ${MSP}) +endif + +ifeq (,$(wildcard ${VERSION_FILE})) + $(error Linux kernel source not configured - missing version header file) +endif + +ifeq (,$(wildcard ${CONFIG_FILE})) + $(error Linux kernel source not configured - missing autoconf.h) +endif + +ifeq (,$(wildcard ${SYSTEM_MAP_FILE})) + $(warning Missing System.map file - depmod will not check for missing symbols during module installation) +endif + +ifneq ($(words $(subst :, ,$(CURDIR))), 1) + $(error Sources directory '$(CURDIR)' cannot contain spaces nor colons. Rename directory or move sources to another path) +endif + +######################## +# Extract config value # +######################## + +get_config_value = $(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\ + grep -m 1 ${1} | awk '{ print $$3 }') + +################ +# dracut Macro # +################ + +cmd_initrd := $(shell \ + if [[ ${KOBJ} != /lib/modules/${BUILD_KERNEL}/* ]]; then \ + echo ""; \ + elif which dracut > /dev/null 2>&1 ; then \ + echo "dracut --force --kver ${BUILD_KERNEL}"; \ + elif which update-initramfs > /dev/null 2>&1 ; then \ + echo "update-initramfs -u -k ${BUILD_KERNEL}"; \ + fi ) + +######################## +# Check module signing # +######################## + +CONFIG_MODULE_SIG_ALL := $(call get_config_value,CONFIG_MODULE_SIG_ALL) +CONFIG_MODULE_SIG_FORCE := $(call get_config_value,CONFIG_MODULE_SIG_FORCE) +CONFIG_MODULE_SIG_KEY := $(call get_config_value,CONFIG_MODULE_SIG_KEY) + +SIG_KEY_SP := ${KOBJ}/${CONFIG_MODULE_SIG_KEY} \ + ${KOBJ}/certs/signing_key.pem + +SIG_KEY_FILE := $(firstword $(foreach file, ${SIG_KEY_SP}, $(call test_file,${file}))) + +# print a warning if the kernel configuration attempts to sign modules but +# the signing key can't be found. +ifneq (${SIG_KEY_FILE},) +warn_signed_modules := : ; +else +warn_signed_modules := +ifeq (${CONFIG_MODULE_SIG_ALL},1) +warn_signed_modules += \ + echo "*** The target kernel has CONFIG_MODULE_SIG_ALL enabled, but" ; \ + echo "*** the signing key cannot be found. Module signing has been" ; \ + echo "*** disabled for this build." ; +endif # CONFIG_MODULE_SIG_ALL=y +ifeq (${CONFIG_MODULE_SIG_FORCE},1) + warn_signed_modules += \ + echo "warning: The target kernel has CONFIG_MODULE_SIG_FORCE enabled," ; \ + echo "warning: but the signing key cannot be found. The module must" ; \ + echo "warning: be signed manually using 'scripts/sign-file'." ; +endif # CONFIG_MODULE_SIG_FORCE +DISABLE_MODULE_SIGNING := Yes +endif + +####################### +# Linux Version Setup # +####################### + +# The following command line parameter is intended for development of KCOMPAT +# against upstream kernels such as net-next which have broken or non-updated +# version codes in their Makefile. They are intended for debugging and +# development purpose only so that we can easily test new KCOMPAT early. If you +# don't know what this means, you do not need to set this flag. There is no +# arcane magic here. + +# Convert LINUX_VERSION into LINUX_VERSION_CODE +ifneq (${LINUX_VERSION},) + LINUX_VERSION_CODE=$(call get_kvercode,$(call get_kver,${LINUX_VERSION},1),$(call get_kver,${LINUX_VERSION},2),$(call get_kver,${LINUX_VERSION},3)) +endif + +# Honor LINUX_VERSION_CODE +ifneq (${LINUX_VERSION_CODE},) + $(warning Forcing target kernel to build with LINUX_VERSION_CODE of ${LINUX_VERSION_CODE}$(if ${LINUX_VERSION}, from LINUX_VERSION=${LINUX_VERSION}). Do this at your own risk.) + KVER_CODE := ${LINUX_VERSION_CODE} + EXTRA_CFLAGS += -DLINUX_VERSION_CODE=${LINUX_VERSION_CODE} +endif + +# Determine SLE_KERNEL_REVISION for SuSE SLE >= 11 (needed by kcompat) +# This assumes SuSE will continue setting CONFIG_LOCALVERSION to the string +# appended to the stable kernel version on which their kernel is based with +# additional versioning information (up to 3 numbers), a possible abbreviated +# git SHA1 commit id and a kernel type, e.g. CONFIG_LOCALVERSION=-1.2.3-default +# or CONFIG_LOCALVERSION=-999.gdeadbee-default +# SLE >= 15SP3 added additional information about version and service pack +# to their kernel version e.g CONFIG_LOCALVERSION=-150300.59.43.1-default +# +# SLE_LOCALVERSION_CODE is also exported to support legacy kcompat.h +# definitions. +ifeq (1,$(call get_config_value,CONFIG_SUSE_KERNEL)) + +ifneq (10,$(call get_config_value,CONFIG_SLE_VERSION)) + + CONFIG_LOCALVERSION := $(call get_config_value,CONFIG_LOCALVERSION) + LOCALVERSION := $(shell echo ${CONFIG_LOCALVERSION} | \ + cut -d'-' -f2 | sed 's/\.g[[:xdigit:]]\{7\}//') + LOCALVER_A := $(shell echo ${LOCALVERSION} | cut -d'.' -f1) +ifeq ($(shell test ${LOCALVER_A} -gt 65535; echo $$?),0) + LOCAL_VER_MAJOR := $(shell echo ${LOCALVER_A:0:3}) + LOCAL_VER_MINOR := $(shell echo ${LOCALVER_A:3:3}) + LOCALVER_B := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f2) + LOCALVER_C := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f3) + LOCALVER_D := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f4) + SLE_LOCALVERSION_CODE := $(shell expr ${LOCALVER_B} \* 65536 + \ + 0${LOCALVER_C} \* 256 + 0${LOCALVER_D}) + EXTRA_CFLAGS += -DSLE_LOCALVERSION_CODE=${SLE_LOCALVERSION_CODE} + EXTRA_CFLAGS += -DSLE_KERNEL_REVISION=${LOCALVER_B} +else + LOCALVER_B := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f2) + LOCALVER_C := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f3) + SLE_LOCALVERSION_CODE := $(shell expr ${LOCALVER_A} \* 65536 + \ + 0${LOCALVER_B} \* 256 + 0${LOCALVER_C}) + EXTRA_CFLAGS += -DSLE_LOCALVERSION_CODE=${SLE_LOCALVERSION_CODE} + EXTRA_CFLAGS += -DSLE_KERNEL_REVISION=${LOCALVER_A} +endif +endif +endif + +EXTRA_CFLAGS += ${CFLAGS_EXTRA} + +# get the kernel version - we use this to find the correct install path +KVER := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VERSION_FILE} | grep UTS_RELEASE | \ + awk '{ print $$3 }' | sed 's/\"//g') + +# assume source symlink is the same as build, otherwise adjust KOBJ +ifneq (,$(wildcard /lib/modules/${KVER}/build)) + ifneq (${KSRC},$(call readlink,/lib/modules/${KVER}/build)) + KOBJ=/lib/modules/${KVER}/build + endif +endif + +ifeq (${KVER_CODE},) + KVER_CODE := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VSP} 2> /dev/null |\ + grep -m 1 LINUX_VERSION_CODE | awk '{ print $$3 }' | sed 's/\"//g') +endif + +# minimum_kver_check +# +# helper function to provide uniform output for different drivers to abort the +# build based on kernel version check. Usage: "$(call minimum_kver_check,2,6,XX)". +define _minimum_kver_check +ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,${1},${2},${3}) ]; echo "$$?")) + $$(warning *** Aborting the build.) + $$(error This driver is not supported on kernel versions older than ${1}.${2}.${3}) +endif +endef +minimum_kver_check = $(eval $(call _minimum_kver_check,${1},${2},${3})) + +############################# +# kcompat definitions setup # +############################# + +# In most cases, kcompat flags can be checked within the driver source files +# using simple CPP checks. However, it may be necessary to check for a flag +# value within the Makefile for some specific edge cases. For example, if an +# entire feature ought to be excluded on some kernels due to missing +# functionality. +# +# To support this, kcompat_defs.h is preprocessed and converted into a word list +# that can be checked to determine whether a given kcompat feature flag will +# be defined for this kernel. +# + +# call script that populates defines automatically +$(if $(shell \ + $(if $(findstring 1,${V}),,QUIET_COMPAT=1) \ + KSRC=${KSRC} OUT=${src}/kcompat_generated_defs.h CONFIG_FILE=${CONFIG_FILE} \ + bash ${src}/kcompat-generator.sh && echo ok), , $(error kcompat-generator.sh failed)) + +################ +# Manual Pages # +################ + +MANSECTION = 7 + +ifeq (,${MANDIR}) + # find the best place to install the man page + MANPATH := $(shell (manpath 2>/dev/null || echo $MANPATH) | sed 's/:/ /g') + ifneq (,${MANPATH}) + # test based on inclusion in MANPATH + test_dir = $(findstring ${dir}, ${MANPATH}) + else + # no MANPATH, test based on directory existence + test_dir = $(shell [ -e ${dir} ] && echo ${dir}) + endif + # our preferred install path + # should /usr/local/man be in here ? + MANDIR := /usr/share/man /usr/man + MANDIR := $(foreach dir, ${MANDIR}, ${test_dir}) + MANDIR := $(firstword ${MANDIR}) +endif +ifeq (,${MANDIR}) + # fallback to /usr/man + MANDIR := /usr/man +endif + +#################### +# CCFLAGS variable # +#################### + +# set correct CCFLAGS variable for kernels older than 2.6.24 +ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,2,6,24) ]; echo $$?)) +CCFLAGS_VAR := EXTRA_CFLAGS +else +CCFLAGS_VAR := ccflags-y +endif + +################# +# KBUILD_OUTPUT # +################# + +# Only set KBUILD_OUTPUT if the real paths of KOBJ and KSRC differ +ifneq ($(call readlink,${KSRC}),$(call readlink,${KOBJ})) +export KBUILD_OUTPUT ?= ${KOBJ} +endif + +############################ +# Module Install Directory # +############################ + +# Default to using updates/drivers/net/ethernet/intel/ path, since depmod since +# v3.1 defaults to checking updates folder first, and only checking kernels/ +# and extra afterwards. We use updates instead of kernel/* due to desire to +# prevent over-writing built-in modules files. +export INSTALL_MOD_DIR ?= updates/drivers/net/ethernet/intel/${DRIVER} + +################# +# Auxiliary Bus # +################# + +# If the check_aux_bus script exists, then this driver depends on the +# auxiliary module. Run the script to determine if we need to include +# auxiliary files with this build. +CHECK_AUX_BUS ?= ../scripts/check_aux_bus +ifneq ($(call test_file,${CHECK_AUX_BUS}),) +NEED_AUX_BUS := $(shell ${CHECK_AUX_BUS} --ksrc="${KSRC}" --build-kernel="${BUILD_KERNEL}" >/dev/null 2>&1; echo $$?) +endif # check_aux_bus exists + +# The out-of-tree auxiliary module we ship should be moved into this +# directory as part of installation. +export INSTALL_AUX_DIR ?= updates/drivers/net/ethernet/intel/auxiliary + +# If we're installing auxiliary bus out-of-tree, the following steps are +# necessary to ensure the relevant files get put in place. +AUX_BUS_HEADERS ?= linux/auxiliary_bus.h auxiliary_compat.h kcompat_generated_defs.h +ifeq (${NEED_AUX_BUS},2) +define auxiliary_post_install + install -D -m 644 Module.symvers ${INSTALL_MOD_PATH}/lib/modules/${KVER}/extern-symvers/intel_auxiliary.symvers + install -d ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR} + mv -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_MOD_DIR}/intel_auxiliary.ko* \ + ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR}/ + install -d ${INSTALL_MOD_PATH}/${KSRC}/include/linux + install -D -m 644 ${AUX_BUS_HEADERS} -t ${INSTALL_MOD_PATH}/${KSRC}/include/linux +endef +else +auxiliary_post_install = +endif + +ifeq (${NEED_AUX_BUS},2) +define auxiliary_post_uninstall + rm -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/extern-symvers/intel_auxiliary.symvers + rm -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR}/intel_auxiliary.ko* + rm -f ${INSTALL_MOD_PATH}/${KSRC}/include/linux/auxiliary_bus.h + rm -f ${INSTALL_MOD_PATH}/${KSRC}/include/linux/auxiliary_compat.h + rm -f ${INSTALL_MOD_PATH}/${KSRC}/include/linux/kcompat_generated_defs.h +endef +else +auxiliary_post_uninstall = +endif + +ifeq (${NEED_AUX_BUS},2) +EXTRA_CFLAGS += -DUSE_INTEL_AUX_BUS +endif +###################### +# Kernel Build Macro # +###################### + +# kernel build function +# ${1} is the kernel build target +# ${2} may contain any extra rules to pass directly to the sub-make process +# +# This function is expected to be executed by +# @+$(call kernelbuild,,) +# from within a Makefile recipe. +# +# The following variables are expected to be defined for its use: +# GCC_I_SYS -- if set it will enable use of gcc-i-sys.sh wrapper to use -isystem +# CCFLAGS_VAR -- the CCFLAGS variable to set extra CFLAGS +# EXTRA_CFLAGS -- a set of extra CFLAGS to pass into the ccflags-y variable +# KSRC -- the location of the kernel source tree to build against +# DRIVER_UPPERCASE -- the uppercase name of the kernel module, set from DRIVER +# W -- if set, enables the W= kernel warnings options +# C -- if set, enables the C= kernel sparse build options +# +kernelbuild = ${Q}$(call warn_signed_modules) \ + ${MAKE} $(if ${GCC_I_SYS},CC="${GCC_I_SYS}") \ + ${CCFLAGS_VAR}="${EXTRA_CFLAGS}" \ + -C "${KSRC}" \ + CONFIG_${DRIVER_UPPERCASE}=m \ + $(if ${DISABLE_MODULE_SIGNING},CONFIG_MODULE_SIG=n) \ + $(if ${DISABLE_MODULE_SIGNING},CONFIG_MODULE_SIG_ALL=) \ + M="${CURDIR}" \ + $(if ${W},W="${W}") \ + $(if ${C},C="${C}") \ + $(if ${NEED_AUX_BUS},NEED_AUX_BUS="${NEED_AUX_BUS}") \ + ${2} ${1} diff --git a/drivers/net/ethernet/guangruntong/dma_add.h b/drivers/net/ethernet/guangruntong/dma_add.h new file mode 100755 index 0000000000000..2b4794cd3c3a0 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/dma_add.h @@ -0,0 +1,57 @@ +#ifndef GRTDMA_H +#define GRTDMA_H + +// Target +#define TARGET_H2C 0 +#define TARGET_C2H 1 +#define TARGET_IRQ 2 +#define TARGET_CONFIG 3 + +#define TARGET_MSIX 8 + +//TARGET_H2C & TARGET_C2H +// Writable addresses +#define ADDR_SG_SWPT 0 +#define ADDR_SG_ADDRLO 1 +#define ADDR_SG_ADDRHI 2 +#define ADDR_SG_MAXNUM 3 +#define ADDR_ENGINE_CTRL 4 + +#define ADDR_DESC_CTRL 5 //WTHRESH, PTHRESH, HTHRESH +#define ADDR_INT_DELAY 6 //Write back & Interrupt Delay Value TIDV RDTR + +#define ADDR_SG_WBADDRLO 7 +#define ADDR_SG_WBADDRHI 8 + +#define ADDR_DCA_RXTXCTL 9 + +// Readable Addresses +#define ADDR_SG_HWPT 16 + + +//TARGET_IRQ +// Writable addresses +#define ADDR_INTR_ICS 0 //Interrupt Cause Set Register 1:This registers allows triggering an immediate interrupt by software 1:对应的中断激活 +#define ADDR_INTR_IMS 1 //interrupt mask set 1:对应的中断激活,0:没影响。如果要想屏蔽中断,应该用IMC +#define ADDR_INTR_IMC 2 //interrupt mask clean 1:对应的中断禁止,0:没影响。 +#define ADDR_INTR_IAM 3 //interrupt auto mask 当IAME(中断应答自动屏蔽使能)位置1时,对ICR寄存器的读或写会产生将IAM寄存器中的值写入IMC寄存器的副作用。 该位为0b时,该功能被禁用 +#define ADDR_INTR_MODE 4 //中断模式c2s:s2c 前16位的4位为channel,后面的16位的2位为模式c2s:s2c 1:c2s eop interrupt 0:s2c normal interrupt + +#define ADDR_INTR_ITR 5 //msix,多少个中断就有多少个,channel*2(RXTX) + 1(Other) ,32bit数据,前面16位(用了4位,总共32个中断)为vector,后面为ITR数据。如果不支持msix,那么数据就在第一个vector上面 + //The interval is specified in 256 ns increments. Zero disables interrupt throttling logic. + +#define ADD_INTR_IVAR 6 +#define ADD_INTR_IVAR_MISC 7 + + +// Readable Addresses +#define ADDR_INTR_VECTOR 8 + +//TARGET_CONF +#define ADDR_CORESETTINGS 0 +#define ADDR_FPGA_NAME 1 + +#define ADDR_DCA_GTCL 3 +#define ADDR_FUNC_RST 4 + +#endif /* GRTDMA_H */ diff --git a/drivers/net/ethernet/guangruntong/grtnic.h b/drivers/net/ethernet/guangruntong/grtnic.h new file mode 100755 index 0000000000000..57e5693b2c8a0 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic.h @@ -0,0 +1,1209 @@ +/* + +Copyright (c) 2018 Alex Forencich + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +*/ + +#ifndef GRTNIC_CORE_H +#define GRTNIC_CORE_H + +#include +#include +#include +#include +#include +//#include +#include +#include +#include +#include +#include + +#define CONFIG_KEYLIN_OS 1 + +//#ifndef HAVE_SWIOTLB_SKIP_CPU_SYNC +//#define HAVE_SWIOTLB_SKIP_CPU_SYNC //飞腾ARM平台可以强制打开提高性能 +//#endif + +#include "kcompat.h" +#include "dma_add.h" + +#ifdef HAVE_NDO_BUSY_POLL +#include +#define BP_EXTENDED_STATS +#endif + +//#define CONFIG_DISABLE_PACKET_SPLIT //龙芯平台可以需要打开这个开关 +#define GRTNIC_NO_LRO + +#define DRIVER_NAME "grtnic_xgb" +#define DRIVER_VERSION "1.24.0711" + +#define CHANNEL0_PORT_MASK 0x03 + +#define CHANNEL_NUM_MAX (16) +#define GRTNIC_PORTS_MAX (16) + +#define GRTNIC_DEFAULT_TXD (512) +#define GRTNIC_DEFAULT_TX_WORK (256) +#define GRTNIC_DEFAULT_RXD (512) + +#define GRTNIC_MAX_NUM_DESCRIPTORS 4096 +#define GRTNIC_MIN_NUM_DESCRIPTORS 64 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define GRTNIC_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define GRTNIC_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define GRTNIC_REQ_TX_BUFFER_GRANULARITY 1024 + + +#define MAX_Q_VECTORS 10 + +/* Transmit and receive queues */ +#define MAX_RX_QUEUES 8 +#define MAX_TX_QUEUES 8 + +///////////////////////////////////////////////////////////////////////////// +#define XPHY_STATUS (0x0000) +#define MAC_ADRS_FILTER (0x0004) + +#define DESIGN_STATUS (0x0010) +#define IPXE_STATUS (0x0014) +#define TEMP_STATUS (0x0018) +#define SERIAL_NO (0x001C) + +//#define MAC_ADRS_FILTER (0x0004) //redefine in macphy.h XXGE_AFC_OFFSET +//#define MAC0_ADRS_LOW (0x0018) //redefine in macphy.h XXGE_MACADDR_OFFSET + +#define MAC_ADRS_ID (0x0020) +#define MAC_ADRS_LOW (0x0024) +#define MAC_ADRS_HIGH (0x0028) + +#define PHY_TX_DISABLE (0x0040) +#define MAC_LED_CTL (0x0044) +#define MAX_LED_PKT_NUM (0x0048) + +#define I2CCTL (0x0050) +#define ASIC_BOOT (0x0054) +#define FLASH_CMD (0x0058) + +#define ASIC_RX_FIFO_RST (0x0064) +#define ASIC_TX_FIFO_RST (0x0068) + +#define FC_WATERMARK (0x0070) +#define ETH_TX_PAUSE (0x0074) +#define CSUM_ENABLE (0x008C) + +#define MAC_HASH_TABLE_START (0x0200) +#define MAC_HASH_TABLE_WR (0x0204) +#define MAC_RX_OVERFLOW_FRAME (0x0210) + + +#define RSS_KEY_BEGIN (0x0300) +#define RSS_KEY_END (0x0324) + +#define RSS_RETA_BEGIN (0x0330) +#define RSS_RETA_END (0x03AC) + +#define FIRMWARE_CMD (0x040C) + +#define ETH_HIGH_MARK (96) +#define ETH_LOW_MARK (32) + +////////////////////////////////////////////////// +#define MAX_JUMBO_FRAME_SIZE 0x3F00 +/* The datasheet maximum supported RX size is 9.5KB (9728 bytes) */ +#define MAX_RX_JUMBO_FRAME_SIZE 0x2600 + + +/* Supported Rx Buffer Sizes */ +#define GRTNIC_RXBUFFER_256 256 /* Used for skb receive header */ +#define GRTNIC_RXBUFFER_1536 1536 +#define GRTNIC_RXBUFFER_2K 2048 +#define GRTNIC_RXBUFFER_3K 3072 +#define GRTNIC_RXBUFFER_4K 4096 +#ifdef CONFIG_DISABLE_PACKET_SPLIT +#define GRTNIC_RXBUFFER_7K 7168 +#define GRTNIC_RXBUFFER_8K 8192 +#define GRTNIC_RXBUFFER_15K 15360 +#endif /* CONFIG_DISABLE_PACKET_SPLIT */ +#define GRTNIC_MAX_RXBUFFER 16384 /* largest size for single descriptor */ + +/* Attempt to maximize the headroom available for incoming frames. We + * use a 2K buffer for receives and need 1536/1534 to store the data for + * the frame. This leaves us with 512 bytes of room. From that we need + * to deduct the space needed for the shared info and the padding needed + * to IP align the frame. + * + * Note: For cache line sizes 256 or larger this value is going to end + * up negative. In these cases we should fall back to the 3K + * buffers. + */ +#if (PAGE_SIZE < 8192) +#define GRTNIC_MAX_2K_FRAME_BUILD_SKB (GRTNIC_RXBUFFER_1536 - NET_IP_ALIGN) +#define GRTNIC_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + GRTNIC_RXBUFFER_1536) > SKB_WITH_OVERHEAD(GRTNIC_RXBUFFER_2K)) + +static inline int grtnic_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int grtnic_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (GRTNIC_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = GRTNIC_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = GRTNIC_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return grtnic_compute_pad(rx_buf_len); +} + +#define GRTNIC_SKB_PAD grtnic_skb_pad() +#else //(PAGE_SIZE < 8192) +#define GRTNIC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif //!(PAGE_SIZE < 8192) + +/* + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, + * this adds up to 448 bytes of extra data. + * + * Since netdev_alloc_skb now allocates a page fragment we can use a value + * of 256 and the resultant skb will have a truesize of 960 or less. + */ +#define GRTNIC_RX_HDR_SIZE GRTNIC_RXBUFFER_256 + +#define GRTNIC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#ifdef HAVE_STRUCT_DMA_ATTRS +#define GRTNIC_RX_DMA_ATTR NULL +#else +#define GRTNIC_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#endif + + + +#define MAX_EITR 0x00000FF8 +#define MIN_EITR 8 + +/* + * microsecond values for various ITR rates shifted by 2 to fit itr register + * with the first 3 bits reserved 0 + */ +#define GRTNIC_MIN_RSC_ITR 24 +#define GRTNIC_100K_ITR 40 +#define GRTNIC_20K_ITR 200 +#define GRTNIC_16K_ITR 248 +#define GRTNIC_12K_ITR 336 + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + + +/* Interrupt modes, as used by the IntMode parameter */ +#define GRTNIC_INT_MODE_MSIX 0 +#define GRTNIC_INT_MODE_MSI 1 +#define GRTNIC_INT_MODE_LEGACY 2 + + +/* obtain the 32 most significant (high) bits of a 32-bit or 64-bit address */ +#define PCI_DMA_H(addr) ((addr >> 16) >> 16) +/* obtain the 32 least significant (low) bits of a 32-bit or 64-bit address */ +#define PCI_DMA_L(addr) (addr & 0xffffffffUL) + +#define TX_INT_DELAY 32 +#define RX_INT_DELAY 32 + +#define GRTNIC_TXDCTL_DMA_BURST_ENABLE \ + (0x00000000 | /* set descriptor granularity */ \ + (1u << 25) | /* LWTHRESH */ \ + (8u << 16) | /* wthresh must be +1 more than desired */\ + (1u << 8) | /* hthresh */ \ + 0x20) /* pthresh */ + +#define GRTNIC_RXDCTL_DMA_BURST_ENABLE \ + (0x00000000 | /* set descriptor granularity */ \ + (1u << 25) | /* LWTHRESH */ \ + (8u << 16) | /* set writeback threshold */ \ + (4u << 8) | /* set Hrefetch threshold */ \ + 0x20) /* set Pthresh */ + +enum grt_gigeth_boards { + board_902E_GRT_FF, + board_902T_GRT_FF, + board_901ELR_GRT_FF, + board_1001E_GRT_FF, + board_1001E_QM_FF, + board_1002E_GRT_FF, + board_1005E_GRT_FX +}; + +struct grt_gigeth_info { + enum grt_gigeth_boards type; + int dma_channel_max; + unsigned char port_type; //0 for FIBER; 1 for COPPER + unsigned char port_speed; //0 for 1G; 1 for 10G +}; + +extern const struct grt_gigeth_info grt_902eff_info; +extern const struct grt_gigeth_info grt_902tff_info; +extern const struct grt_gigeth_info grt_901elr_info; +extern const struct grt_gigeth_info grt_1001eff_info; +extern const struct grt_gigeth_info qm_1001eff_info; +extern const struct grt_gigeth_info grt_1002eff_info; +extern const struct grt_gigeth_info grt_1005efx_info; + +/* Direct Cache Access (DCA) definitions */ +#define GRTNIC_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define GRTNIC_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define GRTNIC_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define GRTNIC_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define GRTNIC_DCA_RXCTRL_CPUID_MASK 0xFF000000 /* Rx CPUID Mask */ +#define GRTNIC_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID Shift */ +#define GRTNIC_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */ +#define GRTNIC_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */ +#define GRTNIC_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */ +#define GRTNIC_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */ +#define GRTNIC_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ +#define GRTNIC_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ + +#define GRTNIC_DCA_TXCTRL_CPUID_MASK 0xFF000000 /* Tx CPUID Mask */ +#define GRTNIC_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID Shift */ +#define GRTNIC_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define GRTNIC_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define GRTNIC_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define GRTNIC_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ + +/* iterator for handling rings in ring container */ +#define grtnic_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +#define GRTNIC_TIDV_FPD BIT(31) +#define GRTNIC_RDTR_FPD BIT(31) + +#define GRTNIC_GET_DESC(R, i, type) (&(((union type *)((R).desc))[i])) +#define GRTNIC_TX_DESC(R, i) GRTNIC_GET_DESC(R, i, grtnic_tx_desc) +#define GRTNIC_RX_DESC(R, i) GRTNIC_GET_DESC(R, i, grtnic_rx_desc) + +#define GRTNIC_MAX_JUMBO_FRAME_SIZE 65536+18 + +#define GRTNIC_DEAD_READ_RETRIES 10 +#define GRTNIC_DEAD_READ_REG 0xdeadbeefU +#define GRTNIC_FAILED_READ_REG 0xffffffffU +#define GRTNIC_FAILED_READ_RETRIES 5 + + +//static inline void write_register(u32 value, void *iomem) +//{ +// iowrite32(value, iomem); +//} +// +//static inline u32 read_register(void *iomem) +//{ +// return ioread32(iomem); +//} + +static inline bool grtnic_removed(void __iomem *addr) +{ + return unlikely(!addr); +} +#define GRTNIC_REMOVED(a) grtnic_removed(a) + +////////////////////////////////////////////////////////////////////////////// +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct grtnic_tx_buffer { + union grtnic_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct grtnic_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned int in_port; + u32 length; +#ifndef CONFIG_DISABLE_PACKET_SPLIT + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; +#endif +}; + +struct grtnic_queue_stats { + u64 packets; + u64 bytes; +#ifdef BP_EXTENDED_STATS + u64 yields; + u64 misses; + u64 cleaned; +#endif /* BP_EXTENDED_STATS */ +}; + +struct grtnic_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct grtnic_rx_queue_stats { + u64 rsc_count; + u64 rsc_flush; + u64 non_eop_descs; + u64 alloc_rx_page; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 csum_err; +}; + +/* Statistics counters collected by the MAC */ +struct grtnic_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 scc; + u64 mcc; + u64 mpc; + u64 ecol; + u64 latecol; + u64 dc; + u64 rlec; + u64 rxpause; + u64 txpause; + u64 tx_underrun; + u64 badopcode; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 prcoversize; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 ruc; + u64 rfc; + u64 roc; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 ptcoversize; + u64 mptc; + u64 bptc; +}; + +enum grtnic_ring_state_t { +#ifndef CONFIG_DISABLE_PACKET_SPLIT + __GRTNIC_RX_3K_BUFFER, + __GRTNIC_RX_BUILD_SKB_ENABLED, +#endif + __GRTNIC_RX_RSC_ENABLED, + __GRTNIC_RX_CSUM_UDP_ZERO_ERR, +#if IS_ENABLED(CONFIG_FCOE) + __GRTNIC_RX_FCOE, +#endif + __GRTNIC_TX_FDIR_INIT_DONE, + __GRTNIC_TX_XPS_INIT_DONE, + __GRTNIC_TX_DETECT_HANG, + __GRTNIC_HANG_CHECK_ARMED, + __GRTNIC_TX_XDP_RING, +#ifdef HAVE_AF_XDP_ZC_SUPPORT + __GRTNIC_TX_DISABLED, +#endif +}; + +#ifndef CONFIG_DISABLE_PACKET_SPLIT + +#define ring_uses_build_skb(ring) \ + test_bit(__GRTNIC_RX_BUILD_SKB_ENABLED, &(ring)->state) +#endif + +#define check_for_tx_hang(ring) \ + test_bit(__GRTNIC_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__GRTNIC_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__GRTNIC_TX_DETECT_HANG, &(ring)->state) +#define ring_is_rsc_enabled(ring) \ + test_bit(__GRTNIC_RX_RSC_ENABLED, &(ring)->state) +#define set_ring_rsc_enabled(ring) \ + set_bit(__GRTNIC_RX_RSC_ENABLED, &(ring)->state) +#define clear_ring_rsc_enabled(ring) \ + clear_bit(__GRTNIC_RX_RSC_ENABLED, &(ring)->state) +#define netdev_ring(ring) (ring->netdev) +#define ring_queue_index(ring) (ring->queue_index) + +struct grtnic_ring { + struct grtnic_ring *next; /* pointer to next ring in q_vector */ + struct grtnic_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device for dma mapping */ + + void *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + + void *desc_wb; /* pointer to desc writeback memory */ + dma_addr_t desc_wb_dma;/* phys address of desc writeback memory */ + + struct sk_buff *skb; + + union { + struct grtnic_tx_buffer *tx_buffer_info; + struct grtnic_rx_buffer *rx_buffer_info; + }; + unsigned long state; + u8 __iomem *tail; + + unsigned int size; /* length of ring in bytes */ + u16 count; /* number of desc. in ring */ + + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + + u16 next_to_use; + u16 next_to_clean; + +#ifndef CONFIG_DISABLE_PACKET_SPLIT + u16 next_to_alloc; +#endif + +//#ifdef CONFIG_DISABLE_PACKET_SPLIT + u16 rx_buffer_len; +//#endif + + struct grtnic_queue_stats stats; +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync syncp; +#endif + union { + struct grtnic_tx_queue_stats tx_stats; + struct grtnic_rx_queue_stats rx_stats; + }; + +} ____cacheline_internodealigned_in_smp; + + +#ifndef CONFIG_DISABLE_PACKET_SPLIT +/* + * FCoE requires that all Rx buffers be over 2200 bytes in length. Since + * this is twice the size of a half page we need to double the page order + * for FCoE enabled Rx queues. + */ +static inline unsigned int grtnic_rx_bufsz(struct grtnic_ring __maybe_unused *ring) +{ +#if MAX_SKB_FRAGS < 8 + return ALIGN(GRTNIC_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024); +#else + if (test_bit(__GRTNIC_RX_3K_BUFFER, &ring->state)) + return GRTNIC_RXBUFFER_3K; +#if (PAGE_SIZE < 8192) + if (ring_uses_build_skb(ring)) + return GRTNIC_MAX_2K_FRAME_BUILD_SKB; +#endif + return GRTNIC_RXBUFFER_2K; +#endif +} + +static inline unsigned int grtnic_rx_pg_order(struct grtnic_ring __maybe_unused *ring) +{ +#if (PAGE_SIZE < 8192) + if (test_bit(__GRTNIC_RX_3K_BUFFER, &ring->state)) + return 1; +#endif + return 0; +} +#define grtnic_rx_pg_size(_ring) (PAGE_SIZE << grtnic_rx_pg_order(_ring)) + +#endif //CONFIG_DISABLE_PACKET_SPLIT + +#define ITR_ADAPTIVE_MIN_INC 2 +#define ITR_ADAPTIVE_MIN_USECS 10 +#define ITR_ADAPTIVE_MAX_USECS 84 +#define ITR_ADAPTIVE_LATENCY 0x80 +#define ITR_ADAPTIVE_BULK 0x00 +#define ITR_ADAPTIVE_MASK_USECS (ITR_ADAPTIVE_LATENCY - ITR_ADAPTIVE_MIN_INC) + +struct grtnic_ring_container { + struct grtnic_ring *ring; /* pointer to linked list of rings */ + unsigned long next_update; /* jiffies value of last update */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct grtnic_q_vector { + struct grtnic_adapter *adapter; + int cpu; /* CPU for DCA */ + u16 v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ + + u32 eims_value; /* EIMS mask value */ + u16 itr; /* Interrupt throttle rate written to EITR */ + struct grtnic_ring_container rx, tx; + + struct napi_struct napi; +#ifndef HAVE_NETDEV_NAPI_LIST + struct net_device poll_dev; +#endif +#ifdef HAVE_IRQ_AFFINITY_HINT + cpumask_t affinity_mask; +#endif + int node; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + bool netpoll_rx; + +#ifdef HAVE_NDO_BUSY_POLL + atomic_t state; +#endif /* HAVE_NDO_BUSY_POLL */ + + /* for dynamic allocation of rings associated with this q_vector */ + struct grtnic_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + + +#ifdef HAVE_NDO_BUSY_POLL +enum grtnic_qv_state_t { + GRTNIC_QV_STATE_IDLE = 0, + GRTNIC_QV_STATE_NAPI, + GRTNIC_QV_STATE_POLL, + GRTNIC_QV_STATE_DISABLE +}; + +static inline void grtnic_qv_init_lock(struct grtnic_q_vector *q_vector) +{ + /* reset state to idle */ + atomic_set(&q_vector->state, GRTNIC_QV_STATE_IDLE); +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool grtnic_qv_lock_napi(struct grtnic_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, GRTNIC_QV_STATE_IDLE, GRTNIC_QV_STATE_NAPI); +#ifdef BP_EXTENDED_STATS + if (rc != GRTNIC_QV_STATE_IDLE) + q_vector->tx.ring->stats.yields++; +#endif + + return rc == GRTNIC_QV_STATE_IDLE; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline void grtnic_qv_unlock_napi(struct grtnic_q_vector *q_vector) +{ + WARN_ON(atomic_read(&q_vector->state) != GRTNIC_QV_STATE_NAPI); + + /* flush any outstanding Rx frames */ + if (q_vector->napi.gro_list) + napi_gro_flush(&q_vector->napi, false); + + /* reset state to idle */ + atomic_set(&q_vector->state, GRTNIC_QV_STATE_IDLE); +} + +/* called from ixgbe_low_latency_poll() */ +static inline bool grtnic_qv_lock_poll(struct grtnic_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, GRTNIC_QV_STATE_IDLE, GRTNIC_QV_STATE_POLL); +#ifdef BP_EXTENDED_STATS + if (rc != GRTNIC_QV_STATE_IDLE) + q_vector->rx.ring->stats.yields++; +#endif + return rc == GRTNIC_QV_STATE_IDLE; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline void grtnic_qv_unlock_poll(struct grtnic_q_vector *q_vector) +{ + WARN_ON(atomic_read(&q_vector->state) != GRTNIC_QV_STATE_POLL); + + /* reset state to idle */ + atomic_set(&q_vector->state, GRTNIC_QV_STATE_IDLE); +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool grtnic_qv_busy_polling(struct grtnic_q_vector *q_vector) +{ + return atomic_read(&q_vector->state) == GRTNIC_QV_STATE_POLL; +} + +/* false if QV is currently owned */ +static inline bool grtnic_qv_disable(struct grtnic_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, GRTNIC_QV_STATE_IDLE, GRTNIC_QV_STATE_DISABLE); + + return rc == GRTNIC_QV_STATE_IDLE; +} + +#endif /* HAVE_NDO_BUSY_POLL */ + +enum grtnic_state_t { + __GRTNIC_TESTING, + __GRTNIC_RESETTING, + __GRTNIC_DOWN, + __GRTNIC_DISABLED, + __GRTNIC_REMOVING, + __GRTNIC_SERVICE_SCHED, + __GRTNIC_SERVICE_INITED, + __GRTNIC_IN_SFP_INIT, +#ifdef HAVE_PTP_1588_CLOCK + __GRTNIC_PTP_RUNNING, + __GRTNIC_PTP_TX_IN_PROGRESS, +#endif + __GRTNIC_RESET_REQUESTED, +}; + +struct grtnic_cb { +#ifdef CONFIG_DISABLE_PACKET_SPLIT + union { /* Union defining head/tail partner */ + struct sk_buff *head; + struct sk_buff *tail; + }; +#endif + dma_addr_t dma; +#ifdef HAVE_VLAN_RX_REGISTER + u16 vid; /* VLAN tag */ +#endif + u16 append_cnt; /* number of skb's appended */ +#ifndef CONFIG_DISABLE_PACKET_SPLIT + bool page_released; +#endif +}; +#define GRTNIC_CB(skb) ((struct grtnic_cb *)(skb)->cb) + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +struct grtnic_ps_page { + struct page *page; + u64 dma; /* must be u64 - written to hw */ +}; + + +union grtnic_tx_desc { + struct { + __le64 src_addr; /* Address of descriptor's data buf */ + struct + { + u32 len:20; + u32 desc_num:4; + u32 chl :3; + u32 cmp:1; + u32 rs :1; + u32 irq:1; + u32 eop:1; + u32 sop:1; + }len_ctl; + struct + { + u32 csum_info:16; + u32 reserved:12; + u32 port:4; + } tx_info; /*user data */ + } read; + + struct { + __le64 rsvd0; /* Reserved */ + struct + { + u32 len:20; + u32 desc_num:4; + u32 chl :3; + u32 cmp:1; + u32 rs :1; + u32 irq:1; + u32 eop:1; + u32 sop:1; + }len_ctl; + __le32 rsvd1; + } wb; +}; + + +union grtnic_rx_desc { + struct { + __le64 src_addr; /* Packet buffer address */ + struct + { + u32 len:20; + u32 desc_num:4; + u32 chl :3; + u32 cmp:1; + u32 rs :1; + u32 irq:1; + u32 eop:1; + u32 sop:1; + }len_ctl; + __le32 rsvd; + } read; + + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + + struct + { + struct + { + u32 len:20; + u32 desc_num:4; + u32 chl :3; + u32 cmp:1; + u32 rs :1; + u32 irq:1; + u32 eop:1; + u32 sop:1; + }len_ctl; + + struct + { + u32 csum_ok:1; + u32 ipcs:1; + u32 tcpcs:1; + u32 udpcs:1; + u32 udp_csum_flag:1; + u32 reserved:27; + } rx_info; + } upper; + } wb; /* writeback */ +}; + +///////////////////////////////////////////////////////////////////////////////////// +#define GRTNIC_MAX_TXD_PWR 13 +#define GRTNIC_MAX_DATA_PER_TXD (1 << GRTNIC_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), GRTNIC_MAX_DATA_PER_TXD) +#ifndef MAX_SKB_FRAGS +#define DESC_NEEDED 4 +#elif (MAX_SKB_FRAGS < 16) +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) +#else +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#endif + + +//struct grtnic_buffer { +// dma_addr_t dma; +// struct sk_buff *skb; +// unsigned int in_port; +// unsigned long time_stamp; +// u32 length; +// u16 next_to_watch; +// unsigned int segs; +// unsigned int bytecount; +// u16 mapped_as_page; +//#ifndef CONFIG_DISABLE_PACKET_SPLIT +// struct page *page; +//#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) +// __u32 page_offset; +//#else +// __u16 page_offset; +//#endif +// __u16 pagecnt_bias; +//#endif +//}; + +struct grtnic_desc_wb { + u32 desc_hw_ptr; +} __packed; + +//////////////////////////////////////////////////////////////////////////////////// +enum fc_mode { + fc_none = 0, + fc_rx_pause, + fc_tx_pause, + fc_full, + fc_default = 0xFF +}; + +struct fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + u16 refresh_time; /* Flow control refresh timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + bool fc_autoneg; + enum fc_mode current_mode; /* FC mode in effect */ + enum fc_mode requested_mode; /* FC mode requested by caller */ +}; + +struct grtnic_mac_info { + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + u32 mc_filter_type; + u16 mta_reg_count; + + struct fc_info fc; + + /* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; +}; + +struct grtnic_hw { + // BAR pointers + void * __iomem dma_bar; + void * __iomem user_bar; + resource_size_t dma_bar_len; + resource_size_t user_bar_len; + void *back; + struct grtnic_mac_info mac; + bool adapter_stopped; + u32 phy_addr; +}; + +/* default to trying for four seconds */ +#define GRTNIC_TRY_LINK_TIMEOUT (4 * HZ) + +struct grtnic_adapter { + struct device *dev; + struct pci_dev *pdev; + struct net_device *netdev; + + int func; + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr_setting; + u16 tx_work_limit; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + u16 rx_work_limit; + + /* TX */ + struct grtnic_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; + + u64 restart_queue; + u64 lsc_int; + u32 tx_timeout_count; + u64 tx_busy; + + /* RX */ + struct grtnic_ring *rx_ring[MAX_RX_QUEUES]; + u64 hw_csum_rx_error; + u64 non_eop_descs; + u32 alloc_rx_page; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + + const struct grt_gigeth_info *ei; + + int rss_queues; + int num_q_vectors; + + u8 ivar[MAX_Q_VECTORS]; + struct grtnic_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + + unsigned int id; + + struct proc_dir_entry *proc_dir; //for test + u32 tx_count0; + u32 tx_count1; + u32 rx_count; + + struct msix_entry *msix_entries; + int int_mode; + +#ifdef ETHTOOL_TEST + u32 test_icr; + struct grtnic_ring test_tx_ring; + struct grtnic_ring test_rx_ring; +#endif + + struct grtnic_hw hw; + u16 msg_enable; + + unsigned int tx_ring_count; + unsigned int rx_ring_count; + + u32 link_speed; + bool link_up; + + unsigned long link_check_timeout; + + struct timer_list service_timer; + struct work_struct service_task; + + u32 max_frame_size; + u32 min_frame_size; + +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats net_stats; +#endif + + struct grtnic_hw_stats stats; + +//#ifdef ETHTOOL_GRXFHINDIR +// u32 rss_indir_tbl_init; +// u8 rss_indir_tbl[RETA_SIZE]; +//#endif + +#define GRTNIC_MAX_RETA_ENTRIES 512 + u8 rss_indir_tbl[GRTNIC_MAX_RETA_ENTRIES]; + +#define GRTNIC_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ + u32 *rss_key; + + unsigned long state; + + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 flags; +#define GRTNIC_FLAG_MSI_CAPABLE (u32)(1 << 0) +#define GRTNIC_FLAG_MSI_ENABLED (u32)(1 << 1) +#define GRTNIC_FLAG_MSIX_CAPABLE (u32)(1 << 2) +#define GRTNIC_FLAG_MSIX_ENABLED (u32)(1 << 3) + +#define GRTNIC_FLAG_TXCSUM_CAPABLE (u32)(1 << 4) +#define GRTNIC_FLAG_RXCSUM_CAPABLE (u32)(1 << 5) + +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#define GRTNIC_FLAG_DCA_ENABLED (u32)(1 << 6) +#define GRTNIC_FLAG_DCA_CAPABLE (u32)(1 << 7) +#define GRTNIC_FLAG_DCA_ENABLED_DATA (u32)(1 << 8) +#else +#define GRTNIC_FLAG_DCA_ENABLED (u32)0 +#define GRTNIC_FLAG_DCA_CAPABLE (u32)0 +#define GRTNIC_FLAG_DCA_ENABLED_DATA (u32)0 +#endif +#define GRTNIC_FLAG_MQ_CAPABLE (u32)(1 << 9) +#define GRTNIC_FLAG_DCB_ENABLED (u32)(1 << 10) +#define GRTNIC_FLAG_VMDQ_ENABLED (u32)(1 << 11) +#define GRTNIC_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 12) +#define GRTNIC_FLAG_NEED_LINK_UPDATE (u32)(1 << 13) +#define GRTNIC_FLAG_NEED_LINK_CONFIG (u32)(1 << 14) +#define GRTNIC_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 15) +#define GRTNIC_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 16) +#if IS_ENABLED(CONFIG_FCOE) +#define GRTNIC_FLAG_FCOE_CAPABLE (u32)(1 << 17) +#define GRTNIC_FLAG_FCOE_ENABLED (u32)(1 << 18) +#endif /* CONFIG_FCOE */ +#define GRTNIC_FLAG_SRIOV_CAPABLE (u32)(1 << 19) +#define GRTNIC_FLAG_SRIOV_ENABLED (u32)(1 << 20) +#define GRTNIC_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 21) +#define GRTNIC_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 22) +#define GRTNIC_FLAG_SRIOV_VEPA_BRIDGE_MODE (u32)(1 << 23) +#define GRTNIC_FLAG_RX_HWTSTAMP_ENABLED (u32)(1 << 24) +#define GRTNIC_FLAG_VXLAN_OFFLOAD_CAPABLE (u32)(1 << 25) +#define GRTNIC_FLAG_VXLAN_OFFLOAD_ENABLE (u32)(1 << 26) +#define GRTNIC_FLAG_RX_HWTSTAMP_IN_REGISTER (u32)(1 << 27) +#define GRTNIC_FLAG_MDD_ENABLED (u32)(1 << 29) +#define GRTNIC_FLAG_DCB_CAPABLE (u32)(1 << 30) +#define GRTNIC_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(31) + +// struct grtnic_mac_info mac; + int type; + int speed; + + u16 bd_number; + bool netdev_registered; +}; + +/* Error Codes */ +#define GRTNIC_SUCCESS 0 +#define GRTNIC_ERR_OUT_OF_MEM -34 + + +//////////////////////////////////////////////////////////////// +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((NETIF_MSG_##nlevel & adapter->msg_enable) ? \ + (void)(netdev_printk(KERN_##klevel, adapter->netdev, \ + "%s: " fmt, __func__, ## args)) : NULL) + +#define hw_err(hw, format, arg...) \ + netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dbg(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) + +static inline void GRTNIC_WRITE_REG(struct grtnic_hw *hw, u32 reg, u32 value, u8 bar) +{ + u8 __iomem *reg_addr; + + reg_addr = bar ? hw->dma_bar : hw->user_bar; + if (GRTNIC_REMOVED(reg_addr)) + return; + writel(value, reg_addr + reg); +} + +#define GRTNIC_READ_REG(h, r, b) grtnic_read_reg(h, r, b) //hw, reg, bar +#define GRTNIC_WRITE_FLUSH(a) GRTNIC_READ_REG(a, XPHY_STATUS, 0) + + + +//#ifdef CONFIG_BQL +static inline struct netdev_queue *txring_txq(const struct grtnic_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} +//#endif /* CONFIG_BQL */ + +u32 grtnic_read_reg(struct grtnic_hw *hw, u32 reg, u8 bar); + +#ifdef GRTNIC_PROCFS +void grtnic_procfs_exit(struct grtnic_adapter *adapter); +int grtnic_procfs_init(struct grtnic_adapter *adapter); +int grtnic_procfs_topdir_init(void); +void grtnic_procfs_topdir_exit(void); +#endif /* GRTNIC_PROCFS */ + + //main.c +void grtnic_write_itr (struct grtnic_q_vector *q_vector); +void grtnic_update_stats(struct grtnic_adapter *adapter); +void grtnic_down(struct grtnic_adapter *adapter); +void grtnic_assign_netdev_ops(struct net_device *netdev); +irqreturn_t grtnic_msix_other(int __always_unused irq, void *data); +irqreturn_t grtnic_msix_ring(int __always_unused irq, void *data); +irqreturn_t grtnic_isr (int __always_unused irq, void *data); +int grtnic_poll(struct napi_struct *napi, int budget); +void grtnic_close_suspend(struct grtnic_adapter *adapter); + +void grtnic_check_options(struct grtnic_adapter *adapter); //in param.c + + //netdev.c +void grtnic_setup_mrqc(struct grtnic_adapter *adapter); +void grtnic_configure_msix(struct grtnic_adapter *adapter); +void grtnic_configure_msi_and_legacy(struct grtnic_adapter *adapter); +int grtnic_request_irq(struct grtnic_adapter *adapter); +void grtnic_irq_enable(struct grtnic_adapter *adapter); +void grtnic_irq_disable(struct grtnic_adapter *adapter); +void grtnic_free_irq(struct grtnic_adapter *adapter); +void grtnic_napi_enable_all(struct grtnic_adapter *adapter); +void grtnic_napi_disable_all(struct grtnic_adapter *adapter); +void grtnic_service_event_schedule(struct grtnic_adapter *adapter); +void grtnic_set_ethtool_ops(struct net_device *netdev); + +//#ifdef ETHTOOL_OPS_COMPAT +// int ethtool_ioctl(struct ifreq *ifr); +//#endif + + //ethtool.c +void grtnic_disable_rx_queue(struct grtnic_adapter *adapter); +void grtnic_disable_tx_queue(struct grtnic_adapter *adapter); +void grtnic_reset(struct grtnic_adapter *adapter); +void grtnic_do_reset(struct net_device *netdev); + +void grtnic_configure_tx_ring(struct grtnic_adapter *adapter, struct grtnic_ring *ring); +void grtnic_configure_rx_ring(struct grtnic_adapter *adapter, struct grtnic_ring *ring); +void grtnic_alloc_rx_buffers(struct grtnic_ring *rx_ring, u16 cleaned_count); +netdev_tx_t grtnic_xmit_frame_ring (struct sk_buff *skb, struct grtnic_adapter __maybe_unused *adapter, struct grtnic_ring *tx_ring); + +int grtnic_close(struct net_device *netdev); +int grtnic_open(struct net_device *netdev); + +int grtnic_setup_tx_resources(struct grtnic_ring *tx_ring); +int grtnic_setup_rx_resources(struct grtnic_ring *rx_ring); +void grtnic_free_tx_resources(struct grtnic_ring *tx_ring); +void grtnic_free_rx_resources(struct grtnic_ring *rx_ring); +void grtnic_up(struct grtnic_adapter *adapter); +void grtnic_store_reta(struct grtnic_adapter *adapter); + +u32 grtnic_rss_indir_tbl_entries(struct grtnic_adapter *adapter); +void grtnic_store_key(struct grtnic_adapter *adapter); + +#endif /* GRTNIC_CORE_H */ diff --git a/drivers/net/ethernet/guangruntong/grtnic_ethtool.c b/drivers/net/ethernet/guangruntong/grtnic_ethtool.c new file mode 100755 index 0000000000000..db03efa7ad2fb --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_ethtool.c @@ -0,0 +1,2262 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +//#include +#include +#include +#include + +#include "grtnic.h" +#include "grtnic_nvm.h" +#include "grtnic_macphy.h" + +#define be32(x) ((x<<24 & 0xff000000) | (x<<8 & 0x00ff0000) | (x>>8 & 0x0000ff00) | (x>>24 & 0x000000ff)) +#define be16(x) ((x<<8 & 0xff00) | (x>>8 & 0x00ff)) + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + + +#ifdef ETHTOOL_GSTATS +struct grtnic_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define GRTNIC_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +} + +static const struct grtnic_stats grtnic_gstrings_net_stats[] = { + GRTNIC_NETDEV_STAT(rx_errors), + GRTNIC_NETDEV_STAT(tx_errors), + GRTNIC_NETDEV_STAT(tx_dropped), + GRTNIC_NETDEV_STAT(rx_length_errors), + GRTNIC_NETDEV_STAT(rx_over_errors), + GRTNIC_NETDEV_STAT(rx_frame_errors), + GRTNIC_NETDEV_STAT(rx_fifo_errors), + GRTNIC_NETDEV_STAT(tx_fifo_errors), + GRTNIC_NETDEV_STAT(tx_heartbeat_errors) +}; + +#define GRTNIC_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct grtnic_adapter, _stat), \ + .stat_offset = offsetof(struct grtnic_adapter, _stat) \ +} + +static const struct grtnic_stats grtnic_gstrings_stats[] = { + GRTNIC_STAT("rx_packets", stats.gprc), + GRTNIC_STAT("tx_packets", stats.gptc), + GRTNIC_STAT("rx_bytes", stats.gorc), + GRTNIC_STAT("tx_bytes", stats.gotc), + + GRTNIC_STAT("lsc_int", lsc_int), + GRTNIC_STAT("tx_busy", tx_busy), + GRTNIC_STAT("non_eop_descs", non_eop_descs), +// GRTNIC_STAT("tx_timeout_count", tx_timeout_count), + GRTNIC_STAT("tx_restart_queue", restart_queue), + GRTNIC_STAT("rx_csum_offload_errors", hw_csum_rx_error), + GRTNIC_STAT("alloc_rx_page", alloc_rx_page), + GRTNIC_STAT("alloc_rx_page_failed", alloc_rx_page_failed), + GRTNIC_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + + GRTNIC_STAT("rx_broadcast", stats.bprc), + GRTNIC_STAT("tx_broadcast", stats.bptc), + GRTNIC_STAT("rx_multicast", stats.mprc), + GRTNIC_STAT("tx_multicast", stats.mptc), + GRTNIC_STAT("multicast", stats.mprc), + GRTNIC_STAT("rx_pause", stats.rxpause), + GRTNIC_STAT("tx_pause", stats.txpause), + GRTNIC_STAT("tx_underrun", stats.tx_underrun), + GRTNIC_STAT("rx_crc_errors", stats.crcerrs), + GRTNIC_STAT("rx_missed_errors", stats.mpc), + GRTNIC_STAT("tx_aborted_errors", stats.ecol), + GRTNIC_STAT("tx_window_errors", stats.latecol), + GRTNIC_STAT("tx_abort_late_coll", stats.latecol), + GRTNIC_STAT("tx_deferred_ok", stats.dc), + GRTNIC_STAT("tx_single_coll_ok", stats.scc), + GRTNIC_STAT("tx_multi_coll_ok", stats.mcc), + GRTNIC_STAT("rx_long_length_errors", stats.roc), + GRTNIC_STAT("rx_short_length_errors", stats.ruc), + GRTNIC_STAT("rx_align_errors", stats.algnerrc), + GRTNIC_STAT("rx_long_byte_count", stats.gorc) +}; + +/* grtnic allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#ifdef HAVE_TX_MQ +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define GRTNIC_NUM_RX_QUEUES netdev->num_tx_queues +#define GRTNIC_NUM_TX_QUEUES netdev->num_tx_queues +#else +#define GRTNIC_NUM_RX_QUEUES adapter->num_tx_queues +#define GRTNIC_NUM_TX_QUEUES adapter->num_tx_queues +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else /* HAVE_TX_MQ */ +#define GRTNIC_NUM_TX_QUEUES 1 +#define GRTNIC_NUM_RX_QUEUES ( \ + ((struct grtnic_adapter *)netdev_priv(netdev))->num_rx_queues) +#endif /* HAVE_TX_MQ */ + +#define GRTNIC_QUEUE_STATS_LEN ( \ + (GRTNIC_NUM_TX_QUEUES + GRTNIC_NUM_RX_QUEUES) * \ + (sizeof(struct grtnic_queue_stats) / sizeof(u64))) + +#define GRTNIC_GLOBAL_STATS_LEN ARRAY_SIZE(grtnic_gstrings_stats) +#define GRTNIC_NETDEV_STATS_LEN ARRAY_SIZE(grtnic_gstrings_net_stats) +#define GRTNIC_STATS_LEN \ + (GRTNIC_GLOBAL_STATS_LEN + GRTNIC_NETDEV_STATS_LEN + GRTNIC_QUEUE_STATS_LEN) + +#endif /* ETHTOOL_GSTATS */ +#ifdef ETHTOOL_TEST +static const char grtnic_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define GRTNIC_TEST_LEN (sizeof(grtnic_gstrings_test) / ETH_GSTRING_LEN) +#endif /* ETHTOOL_TEST */ + + +#ifdef ETHTOOL_GLINKSETTINGS +static int grtnic_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + int max_port_speed = adapter->speed; + int port_speed = adapter->link_speed; + u32 fiber_speed = SPEED_1000; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + if(adapter->type==1) //copper + { + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + if(max_port_speed) + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); + + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + if(max_port_speed) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); + + cmd->base.port = PORT_TP; + } + + else //fiber + { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + + if(max_port_speed) + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); + else + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + if(max_port_speed) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); + else + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + + cmd->base.port = PORT_FIBRE; + fiber_speed = max_port_speed ? SPEED_10000 : SPEED_1000; + } + + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + + if (netif_running(netdev)) { + if (netif_carrier_ok(netdev)) { + cmd->base.speed = (adapter->type==0) ? fiber_speed : (port_speed==0x03) ? SPEED_10000 : (port_speed==0x02) ? SPEED_1000 : (port_speed==0x01) ? SPEED_100 : SPEED_10; + cmd->base.duplex = DUPLEX_FULL; + } + } + + cmd->base.phy_address = adapter->func; + cmd->base.autoneg = AUTONEG_ENABLE; + + return 0; +} + +static int grtnic_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) +{ + return 0; +} + + +#else /* !ETHTOOL_GLINKSETTINGS */ +static int grtnic_nic_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + int max_port_speed = adapter->speed; + int port_speed = adapter->link_speed; + u32 fiber_speed = SPEED_1000; + + if(adapter->type==1) //copper + { + ecmd->supported = ( SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + (max_port_speed ? SUPPORTED_10000baseT_Full : 0)| + SUPPORTED_Autoneg | + SUPPORTED_TP | + SUPPORTED_Pause); + + ecmd->advertising = ecmd->supported | ADVERTISED_TP | ADVERTISED_Autoneg | ADVERTISED_Pause; + ecmd->port = PORT_TP; + ecmd->transceiver = XCVR_INTERNAL; + } + + else //fiber + { + ecmd->supported = (max_port_speed ? SUPPORTED_10000baseT_Full : SUPPORTED_1000baseT_Full) | SUPPORTED_FIBRE | SUPPORTED_Autoneg | SUPPORTED_Pause; + ecmd->advertising = ecmd->supported | ADVERTISED_FIBRE | ADVERTISED_Autoneg | ADVERTISED_Pause; + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + fiber_speed = max_port_speed ? SPEED_10000 : SPEED_1000; + } + + ecmd->speed = SPEED_UNKNOWN; + ecmd->duplex = DUPLEX_UNKNOWN; + + if (netif_running(netdev)) { + if (netif_carrier_ok(netdev)) { + ecmd->speed = (adapter->type==0) ? fiber_speed : (port_speed==0x03) ? SPEED_10000 : (port_speed==0x02) ? SPEED_1000 : (port_speed==0x01) ? SPEED_100 : SPEED_10; + ecmd->duplex = DUPLEX_FULL; + } + } + + ecmd->autoneg = AUTONEG_ENABLE; + ecmd->phy_address = adapter->func; + + return 0; +} + +static int grtnic_nic_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ +// struct grtnic_port *grtnic_port = netdev_priv(netdev); +// struct grtnic_adapter *adapter = grtnic_port->adapter; +// u32 phy_mode_control_val32 = 0; + + //printk("netdev_no=%d, autoneg=%d, speed=%d duplex=%d\n", port_adapter->netdev_no, ecmd->autoneg, ecmd->speed, ecmd->duplex); + + return 0; +#if 0 + phy_mode_control_val32 = phy_read(adapter, port_adapter->phyid, PHY_MODE_CONTRL_REG); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + phy_mode_control_val32 |= BIT(12); + phy_mode_control_val32 |= BIT(6); /* forced speed selection bit 6,13 */ + } else { + phy_mode_control_val32 &= ~BIT(12); + + if (ecmd->speed == SPEED_1000) { /* 10 */ + phy_mode_control_val32 |= BIT(6); + phy_mode_control_val32 &= ~BIT(13); + } else if (ecmd->speed == SPEED_100 && port_adapter->support_100M) { /* 01 */ + phy_mode_control_val32 &= ~BIT(6); + phy_mode_control_val32 |= BIT(13); + } else + return -EINVAL; + + if (ecmd->duplex == DUPLEX_FULL) + phy_mode_control_val32 |= BIT(8); /* full duplex bit 8 */ + else + phy_mode_control_val32 &= ~BIT(8); + } + + port_adapter->phy_mode_control_val = phy_mode_control_val32; + + chip_rx_disable(adapter); + phy_write(adapter, port_adapter->phyid, PHY_MODE_CONTRL_REG, phy_mode_control_val32); + chip_rx_enable(adapter); + + return 0; +#endif +} + +#endif //* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ + +////////////////////////////////////////////////////////////////////////////////////////// + +static void grtnic_nic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + char firmware_version[32]; + char ipxe_version[32]; + u32 hw_version[2]; + u8 offset = adapter->speed ? 1 : 0; +// u32 sn_h[2]; +// u8 sn_h_l; +// char sn_s[64]; +// u32 chip_temp; + + strncpy(drvinfo->driver, DRIVER_NAME, 32); + strncpy(drvinfo->version, DRIVER_VERSION, 32); + +// chip_temp = read_register(adapter->user_bar + TEMP_STATUS); +// printk("temp = %d\n", (chip_temp*504)/4096-273); +// +// sn_h[0] = read_register(adapter->user_bar + SERIAL_NO); +// sn_h[1] = read_register(adapter->user_bar + SERIAL_NO); +// sn_h_l = read_register(adapter->user_bar + SERIAL_NO) & 0xff; +// sprintf(sn_s, "%08x%08x%02x", sn_h[0],sn_h[1],sn_h_l); +// printk("sn = %s\n", sn_s); + + hw_version[0] = GRTNIC_READ_REG(hw, DESIGN_STATUS, 0); + hw_version[1] = GRTNIC_READ_REG(hw, IPXE_STATUS, 0); + + if(hw_version[0] < 0x200) //maybe old firmware //0x101 + { + read_flash_buffer(adapter, (VPD_OFFSET - (offset * 0x100000)) + VERSION_OFFSET, 2, (u32 *)&hw_version); + sprintf(firmware_version, "%08d", hw_version[0] ^ 0xFFFFFFFF); + sprintf(ipxe_version, "%08d", hw_version[1] ^ 0xFFFFFFFF); + } + else + { + sprintf(firmware_version, "%08x", hw_version[0]); + sprintf(ipxe_version, "%08x", hw_version[1]); + } + + strncpy(drvinfo->fw_version, firmware_version, 32); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0) + strncpy(drvinfo->erom_version, ipxe_version, 32); +#endif + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->n_stats = GRTNIC_STATS_LEN; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + +static void grtnic_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + struct grtnic_mac_info *mac = &hw->mac; + + pause->autoneg = (mac->fc.fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (mac->fc.current_mode == fc_rx_pause) { + pause->rx_pause = 1; + } else if (mac->fc.current_mode == fc_tx_pause) { + pause->tx_pause = 1; + } else if (mac->fc.current_mode == fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +static void +grtnic_nic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +#else +static void grtnic_nic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */ +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = GRTNIC_MAX_NUM_DESCRIPTORS; + ring->tx_max_pending = GRTNIC_MAX_NUM_DESCRIPTORS; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +static int +grtnic_nic_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +#else +static int grtnic_nic_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */ +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_ring *temp_ring; + int i, err = 0; + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (ring->tx_pending > GRTNIC_MAX_NUM_DESCRIPTORS || + ring->tx_pending < GRTNIC_MIN_NUM_DESCRIPTORS || + ring->rx_pending > GRTNIC_MAX_NUM_DESCRIPTORS || + ring->rx_pending < GRTNIC_MIN_NUM_DESCRIPTORS) { + netdev_info(netdev, + "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, + GRTNIC_MIN_NUM_DESCRIPTORS, + GRTNIC_MAX_NUM_DESCRIPTORS); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, + GRTNIC_REQ_TX_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, + GRTNIC_REQ_RX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__GRTNIC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + temp_ring = vmalloc(i * sizeof(struct grtnic_ring)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + grtnic_down(adapter); + + /* + * Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct grtnic_ring)); + + temp_ring[i].count = new_tx_count; + err = grtnic_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + grtnic_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + grtnic_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct grtnic_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct grtnic_ring)); + + temp_ring[i].count = new_rx_count; + err = grtnic_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + grtnic_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + grtnic_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct grtnic_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } + +err_setup: + grtnic_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__GRTNIC_RESETTING, &adapter->state); + return err; +} + + + + +static int grtnic_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + struct grtnic_mac_info *mac = &hw->mac; + u8 flowctl = 0; + int retval = 0; + + mac->fc.fc_autoneg = pause->autoneg; + + if (mac->fc.fc_autoneg == AUTONEG_ENABLE) { + mac->fc.requested_mode = fc_full; + } else { + if (pause->rx_pause && pause->tx_pause) + mac->fc.requested_mode = fc_full; + else if (pause->rx_pause && !pause->tx_pause) + mac->fc.requested_mode = fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + mac->fc.requested_mode = fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + mac->fc.requested_mode = fc_none; + + mac->fc.current_mode = mac->fc.requested_mode; + + } + + if(mac->fc.requested_mode == fc_full) flowctl = 3; + else if(mac->fc.requested_mode == fc_tx_pause) flowctl = 2; + else if(mac->fc.requested_mode == fc_rx_pause) flowctl = 1; + else flowctl = 0; + + if(pause->tx_pause) + grtnic_set_fc_watermarks(netdev); + + GRTNIC_WRITE_REG(hw, ETH_TX_PAUSE, pause->tx_pause, 0); + + grtnic_SetPause(netdev, flowctl); + + return retval; +} + + +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT +static int grtnic_get_stats_count(struct net_device *netdev) +{ + return GRTNIC_STATS_LEN; +} + +static int grtnic_diag_test_count(struct net_device *netdev) +{ + return GRTNIC_TEST_LEN; +} + +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ + +static int grtnic_get_sset_count(struct net_device *netdev, int sset) +{ +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + struct grtnic_adapter *adapter = netdev_priv(netdev); +#endif +#endif + + switch (sset) { + case ETH_SS_STATS: + return GRTNIC_STATS_LEN; + case ETH_SS_TEST: + return GRTNIC_TEST_LEN; + case ETH_SS_PRIV_FLAGS: +// return IXGBE_PRIV_FLAGS_STR_LEN; + default: + return -EOPNOTSUPP; + } +} + +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + +static void grtnic_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, u64 *data) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + +#ifdef HAVE_NDO_GET_STATS64 + const struct rtnl_link_stats64 *net_stats; + struct rtnl_link_stats64 temp; + unsigned int start; +#else +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif +#endif + struct grtnic_ring *ring; + int i, j; + char *p; + + grtnic_update_stats(adapter); +#ifdef HAVE_NDO_GET_STATS64 + net_stats = dev_get_stats(netdev, &temp); +#endif + + for (i = 0; i < GRTNIC_NETDEV_STATS_LEN; i++) { + p = (char *)net_stats + grtnic_gstrings_net_stats[i].stat_offset; + data[i] = (grtnic_gstrings_net_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < GRTNIC_GLOBAL_STATS_LEN; j++, i++) { + p = (char *)adapter + grtnic_gstrings_stats[j].stat_offset; + data[i] = (grtnic_gstrings_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < GRTNIC_NUM_TX_QUEUES; j++) { + ring = adapter->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } + +#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin(&ring->syncp); +#endif + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry(&ring->syncp, start)); +#endif + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif + } + for (j = 0; j < GRTNIC_NUM_RX_QUEUES; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } + +#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin(&ring->syncp); +#endif + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; +#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry(&ring->syncp, start)); +#endif + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif + } +} + +static void grtnic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + char *p = (char *)data; + unsigned int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *grtnic_gstrings_test, + GRTNIC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < GRTNIC_NETDEV_STATS_LEN; i++) { + memcpy(p, grtnic_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < GRTNIC_GLOBAL_STATS_LEN; i++) { + memcpy(p, grtnic_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < GRTNIC_NUM_TX_QUEUES; i++) { + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_bp_napi_yield", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "tx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < GRTNIC_NUM_RX_QUEUES; i++) { + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_bp_poll_yield", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ + break; +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT + case ETH_SS_PRIV_FLAGS: +// memcpy(data, ixgbe_priv_flags_strings, +// IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + } +} + +#ifdef HAVE_ETHTOOL_SET_PHYS_ID +static int grtnic_nic_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + u8 led_cmd, led_on; + + led_cmd = 1<<5; + led_on = 1<<4; + + switch (state) { + case ETHTOOL_ID_ACTIVE: +// grtnic_port->led_reg = read_register(adapter->user_bar + MAC_LED_CTL); + return 2; + + case ETHTOOL_ID_ON: + GRTNIC_WRITE_REG(hw, MAC_LED_CTL, (led_cmd|led_on), 0); //led_start+led_on + break; + + case ETHTOOL_ID_OFF: + GRTNIC_WRITE_REG(hw, MAC_LED_CTL, led_cmd, 0); //led_start + led_off + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ + GRTNIC_WRITE_REG(hw, MAC_LED_CTL, 0, 0); //led_stop and led_off + break; + } + + return 0; +} +#else +static int grtnic_nic_phys_id(struct net_device *netdev, u32 data) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + u32 i; + u8 led_cmd, led_on; + + led_cmd = 1<<5; + led_on = 1<<4; + + if (!data || data > 300) + data = 300; + + for (i = 0; i < (data * 1000); i += 400) { + GRTNIC_WRITE_REG(hw, MAC_LED_CTL, (led_cmd|led_on), 0); //led_start+led_on + msleep_interruptible(200); + GRTNIC_WRITE_REG(hw, MAC_LED_CTL, led_cmd, 0); //led_start + led_off + msleep_interruptible(200); + } + + /* Restore LED settings */ + + GRTNIC_WRITE_REG(hw, MAC_LED_CTL, 0, 0); //led_stop and led_off + + return 0; +} +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ + +int firmware_is_old(struct grtnic_adapter *adapter) +{ + int old_firmware; + struct grtnic_hw *hw = &adapter->hw; + + GRTNIC_WRITE_REG(hw, FIRMWARE_CMD, 1, 0); + old_firmware = !GRTNIC_READ_REG(hw, FIRMWARE_CMD, 0); + GRTNIC_WRITE_REG(hw, FIRMWARE_CMD, 0, 0); + return old_firmware; +} + +static int grtnic_flash_device(struct net_device *netdev, struct ethtool_flash *flash) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + + int i = 0; + + u16 temp = 0; + u16 vid = 0; + u16 pid = 0; + int image_type = 0; + + char version_s[64]; + u32 version = 0; + long version_h; + long result = 0; + + int pxe_size = 0; + char ipxe_ver_s[9] = {0}; + + const struct firmware *fw; + const char *filename = flash->data; + int rc = 0; + + u32 offset = 0; + u32 copied = 0; + + u32 read_filesize; + u32 once_size; + + int firmware_offset; + + int cycle, remainder, schedule; + u32 offset_int; + u32 *segment; + + int old_firmware = firmware_is_old(adapter); + + rc = request_firmware(&fw, filename, &netdev->dev); + if (rc != 0) { + netdev_err(netdev, "PKG error %d requesting file: %s\n", rc, filename); + printk("You MUST copy image file to /lib/firmware directory!!!"); + return rc; + } + + read_filesize = fw->size; + cycle = read_filesize / FLASH_SECTOR_SIZE; + remainder = read_filesize % FLASH_SECTOR_SIZE; + cycle = cycle + (remainder ? 1 : 0); + + + firmware_offset = adapter->speed; //10G,link_speed = 1 + temp = *(u16 *) (fw->data); + + if(temp==0xAA55) //maybe pxe image + { + image_type = 1; + vid = *(u16 *) (fw->data+ 0x20); + pid = *(u16 *) (fw->data+ 0x22); + + pxe_size = read_filesize - 8; //last 8 is ver + memcpy(ipxe_ver_s, (fw->data + pxe_size), 8); +// version = strtol(ipxe_ver_s,NULL,0); + + if(kstrtol(ipxe_ver_s,0,&result)) + result = 0; + version = result^0xFFFFFFFF; + + offset = PXE_OFFSET - (firmware_offset * 0x100000); + } + + else if( (temp&0xff)==0x82 && read_filesize==0x100) //maybe vpd image + { + image_type = 2; + offset = VPD_OFFSET - (firmware_offset * 0x100000); + } + + else //firmware + { + version = *(u32 *)(fw->data); + pid = *(u16 *) (fw->data+ 0x04); //exchange vid & pid pos + vid = *(u16 *) (fw->data+ 0x06); + vid ^=0xFFFF; + pid ^=0xFFFF; + + offset = old_firmware ? 0 : 0x200000; + } + + if(image_type!=2) //vpd no vid&pid + { + if(vid != adapter->pdev->vendor || pid != adapter->pdev->device) + { + printk("Wrong image!\n\n"); + return 0; + } + } + + printk("Found %s image File!!! ", (image_type==1) ? "pxe" : (image_type==2) ? "vpd" : "firmware"); + if(image_type==0 || image_type==1) + printk("and version = %08d", version^0xFFFFFFFF); + + printk("\n\n"); + + + if(image_type==2) //vpd image no needed vpd & pid + { + segment = vmalloc(FLASH_SUBSECTOR_SIZE); + memset(segment, 0x00, FLASH_SUBSECTOR_SIZE); + + read_flash_buffer(adapter, offset, FLASH_SUBSECTOR_SIZE>>2, segment); + erase_subsector_flash(adapter, offset); + + memcpy(segment, fw->data, read_filesize); + + write_flash_buffer(adapter, offset, FLASH_SUBSECTOR_SIZE>>2, segment); + vfree(segment); + } + + else //firmware or pxe image + { + GRTNIC_WRITE_REG(&adapter->hw, FIRMWARE_CMD, 1, 0); + + while (read_filesize>0) + { + erase_sector_flash(adapter, offset); + + if(read_filesize >= FLASH_SECTOR_SIZE) + once_size = FLASH_SECTOR_SIZE; + else + once_size = read_filesize; + + if(once_size & 0x04) + write_flash_buffer(adapter, offset, (once_size>>2)+1, (u32 *)(fw->data + copied)); + else + write_flash_buffer(adapter, offset, once_size>>2, (u32 *)(fw->data + copied)); + + schedule = (i+1)*100 / cycle; + + if(i< cycle-1) + printk("\rUpgrading--->[%d%%]",schedule); + else + printk("\rUpgrading--->[%s]\n\n","Done"); + + touch_softlockup_watchdog(); + + read_filesize = read_filesize - once_size; + offset += once_size; + copied += once_size; + i++; + } + + GRTNIC_WRITE_REG(&adapter->hw, FIRMWARE_CMD, 0, 0); + + //next write version to flash + offset_int = VPD_OFFSET - (firmware_offset * 0x100000); + segment = vmalloc(FLASH_SUBSECTOR_SIZE); + memset(segment, 0x00, FLASH_SUBSECTOR_SIZE); + + read_flash_buffer(adapter, offset_int, FLASH_SUBSECTOR_SIZE>>2, segment); + erase_subsector_flash(adapter, offset_int); + + sprintf(version_s, "%08d", version ^ 0xFFFFFFFF); + if(kstrtol(version_s,16,&version_h)) + version_h = 0; + //save 16jinzhi version for easy asic get version cmd + + segment[(VERSION_OFFSET>>2) + image_type] = version; + segment[(VERSION_OFFSET>>2) + 4 + image_type] = version_h ^ 0xFFFFFFFF; + + write_flash_buffer(adapter, offset_int, FLASH_SUBSECTOR_SIZE>>2, segment); + vfree(segment); + } + + release_firmware(fw); + + printk("firmware Update Complete\n"); +// printk("Triggering IPROG to reload ASIC...\n"); +// write_register(0xFEE1DEAD, adapter->user_bar + 0x0054); + printk("YOU MUST REBOOT COMPUTER TO LET NEW FIRMWARE BEGIN WORKS!\n"); + return rc; +} + +#ifdef ETHTOOL_GRXRINGS + +static int grtnic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS + void *rule_locs) +#else + u32 *rule_locs) +#endif +{ + struct grtnic_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; +// case ETHTOOL_GRXCLSRLCNT: +// cmd->rule_cnt = adapter->fdir_filter_count; +// ret = 0; +// break; +// case ETHTOOL_GRXCLSRULE: +// ret = grtnic_get_ethtool_fdir_entry(adapter, cmd); +// break; +// case ETHTOOL_GRXCLSRLALL: +// ret = grtnic_get_ethtool_fdir_all(adapter, cmd, +// (u32 *)rule_locs); +// break; +// case ETHTOOL_GRXFH: +// ret = grtnic_get_rss_hash_opts(adapter, cmd); +// break; + default: + break; + } + + return ret; +} + +#endif /* ETHTOOL_GRXRINGS */ + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +static int grtnic_rss_indir_tbl_max(struct grtnic_adapter *adapter) +{ + return 16; +} + +static u32 grtnic_get_rxfh_key_size(struct net_device *netdev) +{ + return GRTNIC_RSS_KEY_SIZE; +} + +static u32 grtnic_rss_indir_size(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + + return grtnic_rss_indir_tbl_entries(adapter); +} + +static void grtnic_get_reta(struct grtnic_adapter *adapter, u32 *indir) +{ + int i, reta_size = grtnic_rss_indir_tbl_entries(adapter); + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i]; +} + +#ifdef HAVE_RXFH_HASHFUNC +static int grtnic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +#else +static int grtnic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#endif +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; +#endif + + if (indir) + grtnic_get_reta(adapter, indir); + + if (key) + memcpy(key, adapter->rss_key, grtnic_get_rxfh_key_size(netdev)); + + return 0; +} + +#ifdef HAVE_RXFH_HASHFUNC +static int grtnic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) +#else +#ifdef HAVE_RXFH_NONCONST +static int grtnic_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#else +static int grtnic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key) +#endif /* HAVE_RXFH_NONCONST */ +#endif /* HAVE_RXFH_HASHFUNC */ +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + int i; + u32 reta_entries = grtnic_rss_indir_tbl_entries(adapter); + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) + return -EINVAL; +#endif + + /* Fill out the redirection table */ + if (indir) { + int max_queues = min_t(int, adapter->num_rx_queues, grtnic_rss_indir_tbl_max(adapter)); + + /* Verify user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + grtnic_store_reta(adapter); + } + + /* Fill out the rss hash key */ + if (key) { + memcpy(adapter->rss_key, key, grtnic_get_rxfh_key_size(netdev)); + grtnic_store_key(adapter); + } + + return 0; +} +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +////////////////////////////////////////////////////////////////////////////////////////////// +static irqreturn_t grtnic_test_intr(int __always_unused irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct grtnic_adapter *adapter = netdev_priv(netdev); + + adapter->test_icr = GRTNIC_READ_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_VECTOR*4), 1); + + return IRQ_HANDLED; +} + +static int grtnic_intr_test(struct grtnic_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + if (GRTNIC_REMOVED(adapter->hw.dma_bar)) { + *data = 1; + return -1; + } + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->msix_entries) { + /* NOTE: we don't test MSI-X interrupts here, yet */ + return 0; + } else if (adapter->flags & GRTNIC_FLAG_MSI_ENABLED) { + shared_int = false; + if (request_irq(irq, &grtnic_test_intr, 0, netdev->name, + netdev)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, &grtnic_test_intr, IRQF_PROBE_SHARED, + netdev->name, netdev)) { + shared_int = false; + } else if (request_irq(irq, &grtnic_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMC*4), 0xFFFFFFFF, 1); + GRTNIC_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (; i < 2; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* + * Disable the interrupts to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMC*4), mask, 1); + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_ICS*4), mask, 1); //trigger interrupt + GRTNIC_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* + * Enable the interrupt to be reported in the cause + * register and then force the same interrupt and see + * if one gets posted. If an interrupt was not posted + * to the bus, the test failed. + */ + adapter->test_icr = 0; + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMS*4), mask, 1); + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_ICS*4), mask, 1); //trigger interrupt + GRTNIC_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* + * Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMC*4), ~mask & 0x03, 1); + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_ICS*4), ~mask & 0x03, 1); //trigger interrupt + GRTNIC_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMC*4), 0xFFFFFFFF, 1); + GRTNIC_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + + +static void grtnic_free_desc_rings(struct grtnic_adapter *adapter) +{ + /* Shut down the DMA engines now so they can be reinitialized later, + * since the test rings and normally used rings should overlap on + * queue 0 we can just use the standard disable Rx/Tx calls and they + * will take care of disabling the test rings for us. + */ + + /* first Rx */ + grtnic_disable_rx_queue(adapter); + + /* now Tx */ + grtnic_disable_tx_queue(adapter); + + grtnic_reset(adapter); + + grtnic_free_tx_resources(&adapter->test_tx_ring); + grtnic_free_rx_resources(&adapter->test_rx_ring); +} + + + +static int grtnic_setup_desc_rings(struct grtnic_adapter *adapter) +{ + struct grtnic_ring *tx_ring = &adapter->test_tx_ring; + struct grtnic_ring *rx_ring = &adapter->test_rx_ring; + int ret_val; + int err; + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = GRTNIC_DEFAULT_TXD; + tx_ring->queue_index = 0; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; + + err = grtnic_setup_tx_resources(tx_ring); + if (err) + return 1; + + grtnic_configure_tx_ring(adapter, tx_ring); + + /* Setup Rx Descriptor ring and Rx buffers */ + rx_ring->count = GRTNIC_DEFAULT_RXD; + rx_ring->queue_index = 0; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; + rx_ring->rx_buffer_len = GRTNIC_RXBUFFER_2K; + + err = grtnic_setup_rx_resources(rx_ring); + if (err) { + ret_val = 4; + goto err_nomem; + } + + grtnic_SetRx(adapter->netdev, 0); //stop rx + GRTNIC_WRITE_REG(&adapter->hw, ASIC_RX_FIFO_RST, 0xff, 0); //reset all channel rx fifo data + + grtnic_configure_rx_ring(adapter, rx_ring); + + grtnic_SetRx(adapter->netdev, 1); //start rx + + return 0; + +err_nomem: + grtnic_free_desc_rings(adapter); + return ret_val; +} + +static int grtnic_setup_loopback_test(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + u32 phy_addr = hw->phy_addr; + u16 reg_data; + u8 promisc_mode = 1; + + GRTNIC_WRITE_REG(hw, PHY_TX_DISABLE, 0x00, 0); //enable laser; only for led blink + + if(adapter->ei->type == board_1002E_GRT_FF || adapter->ei->type == board_1005E_GRT_FX) + { + //enable loopback + grtnic_SetPhyAddr(adapter->netdev, phy_addr, 0x01, 0x00); //prtad_devad_reg //mdio reg:1.0 + grtnic_PhyRead(adapter->netdev, phy_addr, 0x01, ®_data); + reg_data |= 0x01; //loopback 1.0.0 + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x01, reg_data); + } + + else + { + /* Setup PHY loopback */ + grtnic_PhyRead(adapter->netdev, phy_addr, 0x00, ®_data); + + reg_data |= PHY_LOOPBACK; + reg_data &= ~PHY_ISOLATE; + reg_data &= ~PHY_AUTO_NEG_EN; + + if(adapter->ei->type == board_902T_GRT_FF) + reg_data &= ~PHY_POWER_DOWN; + + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x00, reg_data); + + if(adapter->ei->type == board_902T_GRT_FF) + { + /* Setup mac speed */ + grtnic_ResetRx(adapter->netdev); + grtnic_SetSpeed(adapter->netdev, 0x02); //speed 1000 + } + } + + /*muliticast mode*/ + reg_data = grtnic_GetAdrsFilter(adapter->netdev); + reg_data |= promisc_mode; //promisc + grtnic_SetAdrsFilter(adapter->netdev, reg_data); + + GRTNIC_WRITE_REG(hw, CSUM_ENABLE, 0, 0); ////tx rx checksum off + + usleep_range(10000, 20000); + + return 0; +} + +static void grtnic_loopback_cleanup(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + u32 phy_addr = hw->phy_addr; + u16 reg_data; + u8 promisc_mode = 1; + u8 csum_tx_mode = 0, csum_rx_mode = 0; + + if(adapter->flags & GRTNIC_FLAG_TXCSUM_CAPABLE) csum_tx_mode = 1; + if(adapter->flags & GRTNIC_FLAG_RXCSUM_CAPABLE) csum_rx_mode = 1; + GRTNIC_WRITE_REG(hw, CSUM_ENABLE, (csum_rx_mode << 1 | csum_tx_mode), 0); //告诉asic, tx checksum offload + + if(adapter->ei->type == board_1002E_GRT_FF || adapter->ei->type == board_1005E_GRT_FX) + { + //disable loopback + grtnic_SetPhyAddr(adapter->netdev, phy_addr, 0x01, 0x00); //prtad_devad_reg //mdio reg:1.0 + grtnic_PhyRead(adapter->netdev, phy_addr, 0x01, ®_data); + reg_data &= ~0x01; //clear loopback 1.0.0 + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x01, reg_data); + } + + else + { + /* Clear PHY loopback */ + grtnic_PhyRead(adapter->netdev, phy_addr, 0x00, ®_data); + reg_data &= ~PHY_LOOPBACK; + reg_data |= PHY_AUTO_NEG_EN; + + if(adapter->ei->type == board_902T_GRT_FF) + reg_data |= PHY_POWER_DOWN; + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x00, reg_data); + } + + /*Clear muliticast mode*/ + reg_data = grtnic_GetAdrsFilter(adapter->netdev); + reg_data &= ~promisc_mode; //promisc + grtnic_SetAdrsFilter(adapter->netdev, reg_data); +} + +static void grtnic_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size >>= 1; + memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size + 10], 0xBE, 1); + memset(&skb->data[frame_size + 12], 0xAF, 1); +} + +static bool grtnic_check_lbtest_frame(struct grtnic_rx_buffer *rx_buffer, unsigned int frame_size) +{ + unsigned char *data; + bool match = true; + + frame_size >>= 1; + +#ifdef CONFIG_DISABLE_PACKET_SPLIT + data = rx_buffer->skb->data; +#else + data = kmap(rx_buffer->page) + rx_buffer->page_offset; +#endif + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + +#ifndef CONFIG_DISABLE_PACKET_SPLIT + kunmap(rx_buffer->page); + +#endif + return match; +} + +static u16 grtnic_clean_test_rings(struct grtnic_ring *rx_ring, struct grtnic_ring *tx_ring, unsigned int size) +{ + union grtnic_rx_desc *rx_desc; +#ifdef CONFIG_DISABLE_PACKET_SPLIT + const int bufsz = rx_ring->rx_buffer_len; +#else + const int bufsz = grtnic_rx_bufsz(rx_ring); +#endif + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = GRTNIC_RX_DESC(*rx_ring, rx_ntc); + + while (tx_ntc != tx_ring->next_to_use) { + union grtnic_tx_desc *tx_desc; + struct grtnic_tx_buffer *tx_buffer; + + tx_desc = GRTNIC_TX_DESC(*tx_ring, tx_ntc); + + /* if DD is not set transmit has not completed */ + if (!tx_desc->wb.len_ctl.cmp) + return count; + + /* unmap buffer on Tx side */ + tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + /* increment Tx next to clean counter */ + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + } + + while (rx_desc->wb.upper.len_ctl.cmp) { + struct grtnic_rx_buffer *rx_buffer; + + /* check Rx buffer */ + rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer->dma, + bufsz, + DMA_FROM_DEVICE); + + /* verify contents of skb */ + if (grtnic_check_lbtest_frame(rx_buffer, size)) + count++; + else + break; + + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer->dma, + bufsz, + DMA_FROM_DEVICE); + + /* increment Rx next to clean counter */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = GRTNIC_RX_DESC(*rx_ring, rx_ntc); + } + + /* re-map buffers to ring, store next to clean values */ + grtnic_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +#define DESC_PER_LOOP 64 + +static int grtnic_run_loopback_test(struct grtnic_adapter *adapter) +{ + struct grtnic_ring *tx_ring = &adapter->test_tx_ring; + struct grtnic_ring *rx_ring = &adapter->test_rx_ring; + int i, j, lc, ret_val = 0; + unsigned int size = 1024; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + grtnic_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* + * Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / DESC_PER_LOOP) * 2) + 1; + else + lc = ((rx_ring->count / DESC_PER_LOOP) * 2) + 1; + + for (j = 0; j <= lc; j++) { + unsigned int good_cnt; + + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < DESC_PER_LOOP; i++) { + skb_get(skb); + tx_ret_val = grtnic_xmit_frame_ring(skb, + adapter, + tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + if (good_cnt != DESC_PER_LOOP) { + ret_val = 12; + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = grtnic_clean_test_rings(rx_ring, tx_ring, size); + if (good_cnt != DESC_PER_LOOP) { + ret_val = 13; + break; + } + + } + + /* free the original skb */ + kfree_skb(skb); + + return ret_val; +} + + +static int grtnic_loopback_test(struct grtnic_adapter *adapter, u64 *data) +{ + *data = grtnic_setup_desc_rings(adapter); + if (*data) + goto out; + + *data = grtnic_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + + *data = grtnic_run_loopback_test(adapter); + grtnic_loopback_cleanup(adapter); + +err_loopback: + grtnic_free_desc_rings(adapter); +out: + return *data; +} + +static bool grtnic_eeprom_test(struct grtnic_adapter *adapter, u64 *data) +{ + int firmware_offset = adapter->speed; //10G,link_speed = 1 + int offset_int = 0xF00000 - (firmware_offset * 0x100000); + u32 ident_id; + + read_flash_buffer(adapter, offset_int, 1, &ident_id); + + if (ident_id != 0x665599AA) { + *data = 1; + return true; + } else { + *data = 0; + return false; + } +} + + +/* ethtool register test data */ +struct grtnic_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +static struct grtnic_reg_test reg_test[] = { + { MAX_LED_PKT_NUM, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +// { MAC_ADRS_LOW, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { MAC_ADRS_HIGH, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { FC_WATERMARK, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { MAC_ADRS_FILTER, 1, SET_READ_TEST, 0x00000007, 0x00000007 }, + { CSUM_ENABLE, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, + { .reg = 0 } +}; + +static bool reg_set_and_check(struct grtnic_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 val, before; + + before = GRTNIC_READ_REG(&adapter->hw, reg, 0); + GRTNIC_WRITE_REG(&adapter->hw, reg, write & mask, 0); + usleep_range(10, 20); //wait for data stable + val = GRTNIC_READ_REG(&adapter->hw, reg, 0); + + if ((write & mask) != (val & mask)) { + e_err(drv, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + GRTNIC_WRITE_REG(&adapter->hw, reg, before, 0); + return true; + } + GRTNIC_WRITE_REG(&adapter->hw, reg, before, 0); + return false; +} + + +static bool reg_pattern_test(struct grtnic_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = GRTNIC_READ_REG(&adapter->hw, reg, 0); + GRTNIC_WRITE_REG(&adapter->hw, reg, test_pattern[pat] & write, 0); + usleep_range(10, 20); //wait for data stable + val = GRTNIC_READ_REG(&adapter->hw, reg, 0); + if (val != (test_pattern[pat] & write & mask)) { + e_err(drv, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + GRTNIC_WRITE_REG(&adapter->hw, reg, before, 0); + return true; + } + GRTNIC_WRITE_REG(&adapter->hw, reg, before, 0); + } + return false; +} + +static bool grtnic_reg_test(struct grtnic_adapter *adapter, u64 *data) +{ + struct grtnic_reg_test *test; + struct grtnic_hw *hw = &adapter->hw; + u32 i; + + if (GRTNIC_REMOVED(hw->user_bar)) { + e_err(drv, "Adapter removed - register test blocked\n"); + *data = 1; + return true; + } + + test = reg_test; + + /* + * Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + bool b = false; + + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + GRTNIC_WRITE_REG(hw, test->reg + (i * 0x40), + test->write, 0); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + (test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + if (b) + return true; + } + test++; + } + + *data = 0; + return false; +} + + +static void grtnic_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + struct grtnic_hw *hw = &adapter->hw; + + if (GRTNIC_REMOVED(hw->user_bar)) { + e_err(hw, "Adapter removed - test blocked\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + set_bit(__GRTNIC_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + + /* Offline tests */ + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result */ + if(GRTNIC_READ_REG(hw, XPHY_STATUS, 0) & 0x01) //link up + data[4] = 0; + else { + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + if (if_running) + /* indicate we're in test mode */ + grtnic_close(netdev); + else + grtnic_reset(adapter); + + e_info(hw, "register testing starting\n"); + if (grtnic_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + grtnic_reset(adapter); + e_info(hw, "eeprom testing starting\n"); + if (grtnic_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + grtnic_reset(adapter); + e_info(hw, "interrupt testing starting\n"); + if (grtnic_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + grtnic_reset(adapter); + + e_info(hw, "loopback testing starting\n"); + if (grtnic_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + grtnic_reset(adapter); + + /* clear testing bit and return adapter to previous state */ + clear_bit(__GRTNIC_TESTING, &adapter->state); + + if (if_running) + grtnic_open(netdev); + else + GRTNIC_WRITE_REG(hw, PHY_TX_DISABLE, 0x01, 0); //disable laser; + } else { + e_info(hw, "online testing starting\n"); + + /* Online tests */ + if(GRTNIC_READ_REG(hw, XPHY_STATUS, 0) & 0x01) //link up + data[4] = 0; + else { + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + /* Offline tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__GRTNIC_TESTING, &adapter->state); + } + + msleep_interruptible(4 * 1000); +} + +#ifdef ETHTOOL_GMODULEINFO +static int grtnic_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct grtnic_adapter *adapter = netdev_priv(dev); + struct grtnic_hw *hw = &adapter->hw; + u32 status; + u8 sff8472_rev, addr_mode; + bool page_swap = false; + + if(adapter->type!=0) //not fiber + return 0; + + /* Check whether we support SFF-8472 or not */ + status = grtnic_read_i2c_eeprom(hw, GRTNIC_SFF_SFF_8472_COMP, &sff8472_rev); + if (status != 0) + return -EIO; + + /* addressing mode is not supported */ + status = grtnic_read_i2c_eeprom(hw, GRTNIC_SFF_SFF_8472_SWAP, &addr_mode); + if (status != 0) + return -EIO; + + if (addr_mode & GRTNIC_SFF_ADDRESSING_MODE) { + e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); + page_swap = true; + } + + if (sff8472_rev == GRTNIC_SFF_SFF_8472_UNSUP || page_swap) { + /* We have a SFP, but it does not support SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int grtnic_get_module_eeprom(struct net_device *dev, struct ethtool_eeprom *ee, u8 *data) +{ + struct grtnic_adapter *adapter = netdev_priv(dev); + struct grtnic_hw *hw = &adapter->hw; + u32 status = GRTNIC_ERR_PHY_ADDR_INVALID; + u8 databyte = 0xFF; + int i = 0; + + if (ee->len == 0) + return -EINVAL; + + for (i = ee->offset; i < ee->offset + ee->len; i++) { + /* I2C reads can take long time */ + if (test_bit(__GRTNIC_IN_SFP_INIT, &adapter->state)) + return -EBUSY; + + if (i < ETH_MODULE_SFF_8079_LEN) + status = grtnic_read_i2c_eeprom(hw, i, &databyte); + else + status = grtnic_read_i2c_sff8472(hw, i, &databyte); + + if (status != 0) + return -EIO; + + data[i - ee->offset] = databyte; + } + + return 0; +} +#endif /* ETHTOOL_GMODULEINFO */ + +#ifndef HAVE_NDO_SET_FEATURES +static u32 grtnic_get_rx_csum(struct net_device *netdev) +{ + return !!(netdev->features & NETIF_F_RXCSUM); +} + +static int grtnic_set_rx_csum(struct net_device *netdev, u32 data) +{ + + if (data) + netdev->features |= NETIF_F_RXCSUM; + else + netdev->features &= ~NETIF_F_RXCSUM; + + return 0; +} + +static int grtnic_set_tx_csum(struct net_device *netdev, u32 data) +{ + + if (data) + netdev->features |= NETIF_F_HW_CSUM; + else + netdev->features &= ~NETIF_F_HW_CSUM; + + return 0; +} +#endif /* HAVE_NDO_SET_FEATURES */ + +static int grtnic_get_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else + struct ethtool_coalesce *ec) +#endif +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + + ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +static int grtnic_set_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else + struct ethtool_coalesce *ec) +#endif +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + int i; + u16 tx_itr_param, rx_itr_param; + u16 tx_itr_prev; + bool need_reset = false; + + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EINVAL; + tx_itr_prev = adapter->rx_itr_setting; + } else { + tx_itr_prev = adapter->tx_itr_setting; + } + + if (ec->tx_max_coalesced_frames_irq) + adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + + if ((ec->rx_coalesce_usecs > (MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (MAX_EITR >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = GRTNIC_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = GRTNIC_12K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + /* mixed Rx/Tx */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + adapter->tx_itr_setting = adapter->rx_itr_setting; + + /* detect ITR changes that require update of TXDCTL.WTHRESH */ + if ((adapter->tx_itr_setting != 1) && + (adapter->tx_itr_setting < GRTNIC_100K_ITR)) { + if ((tx_itr_prev == 1) || + (tx_itr_prev >= GRTNIC_100K_ITR)) + need_reset = true; + } else { + if ((tx_itr_prev != 1) && + (tx_itr_prev < GRTNIC_100K_ITR)) + need_reset = true; + } + + /* check the old value and enable RSC if necessary */ +// need_reset |= grtnic_update_rsc(adapter); + +// if (adapter->hw.mac.dmac_config.watchdog_timer && +// (!adapter->rx_itr_setting && !adapter->tx_itr_setting)) { +// e_info(probe, +// "Disabling DMA coalescing because interrupt throttling is disabled\n"); +// adapter->hw.mac.dmac_config.watchdog_timer = 0; +// ixgbe_dmac_config(&adapter->hw); +// } + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct grtnic_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + grtnic_write_itr(q_vector); + } + + /* + * do reset here at the end to make sure EITR==0 case is handled + * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings + * also locks in RSC enable/disable which requires reset + */ + if (need_reset) + grtnic_do_reset(netdev); + + return 0; +} + +static u32 grtnic_get_msglevel(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void grtnic_set_msglevel(struct net_device *netdev, u32 data) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +///////////////////////////////////////////////////////////////////////////////////////////// +static struct ethtool_ops grtnic_nic_ethtool_ops = { +#ifdef ETHTOOL_GLINKSETTINGS + .get_link_ksettings = grtnic_get_link_ksettings, + .set_link_ksettings = grtnic_set_link_ksettings, +#else + .get_settings = grtnic_nic_get_settings, + .set_settings = grtnic_nic_set_settings, +#endif + .get_drvinfo = grtnic_nic_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = grtnic_nic_get_ringparam, + .set_ringparam = grtnic_nic_set_ringparam, + .get_pauseparam = grtnic_get_pauseparam, + .set_pauseparam = grtnic_set_pauseparam, + .get_msglevel = grtnic_get_msglevel, + .set_msglevel = grtnic_set_msglevel, +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .self_test_count = grtnic_diag_test_count, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .self_test = grtnic_diag_test, + .get_strings = grtnic_get_strings, + +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef HAVE_ETHTOOL_SET_PHYS_ID + .set_phys_id = grtnic_nic_set_phys_id, +#else + .phys_id = grtnic_nic_phys_id, +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .get_stats_count = grtnic_get_stats_count, +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .get_sset_count = grtnic_get_sset_count, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .get_ethtool_stats = grtnic_get_ethtool_stats, +#ifdef HAVE_ETHTOOL_GET_PERM_ADDR + .get_perm_addr = ethtool_op_get_perm_addr, +#endif + .get_coalesce = grtnic_get_coalesce, + .set_coalesce = grtnic_set_coalesce, +#ifdef ETHTOOL_COALESCE_USECS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, +#endif +#ifndef HAVE_NDO_SET_FEATURES + .get_rx_csum = grtnic_get_rx_csum, + .set_rx_csum = grtnic_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = grtnic_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +#endif /* HAVE_NDO_SET_FEATURES */ + +#ifdef ETHTOOL_GRXRINGS + .get_rxnfc = grtnic_get_rxnfc, +// .set_rxnfc = ixgbe_set_rxnfc, +//#ifdef ETHTOOL_SRXNTUPLE +// .set_rx_ntuple = ixgbe_set_rx_ntuple, +//#endif +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef ETHTOOL_GMODULEINFO + .get_module_info = grtnic_get_module_info, + .get_module_eeprom = grtnic_get_module_eeprom, +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_indir_size = grtnic_rss_indir_size, + .get_rxfh_key_size = grtnic_get_rxfh_key_size, + .get_rxfh = grtnic_get_rxfh, + .set_rxfh = grtnic_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + + .flash_device = grtnic_flash_device, +}; + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +static const struct ethtool_ops_ext grtnic_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .set_phys_id = grtnic_nic_set_phys_id, + +#ifdef ETHTOOL_GMODULEINFO + .get_module_info = grtnic_get_module_info, + .get_module_eeprom = grtnic_get_module_eeprom, +#endif + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_indir_size = grtnic_rss_indir_size, + .get_rxfh_key_size = grtnic_get_rxfh_key_size, + .get_rxfh = grtnic_get_rxfh, + .set_rxfh = grtnic_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +}; + +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + + +void grtnic_set_ethtool_ops(struct net_device *netdev) +{ +#ifndef ETHTOOL_OPS_COMPAT + netdev->ethtool_ops = &grtnic_nic_ethtool_ops; +#else + SET_ETHTOOL_OPS(netdev, &grtnic_nic_ethtool_ops); +#endif + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + set_ethtool_ops_ext(netdev, &grtnic_ethtool_ops_ext); +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +} \ No newline at end of file diff --git a/drivers/net/ethernet/guangruntong/grtnic_macphy.c b/drivers/net/ethernet/guangruntong/grtnic_macphy.c new file mode 100755 index 0000000000000..fd733d369a8aa --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_macphy.c @@ -0,0 +1,1009 @@ +#include "grtnic.h" +#include "grtnic_macphy.h" + +void grtnic_SetSpeed(struct net_device *netdev, int speed) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + u32 Speed_reg = speed << 30; + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_SPEED_OFFSET), Speed_reg, 0); +} + + +void grtnic_SetFc(struct net_device *netdev, int onoff) //flow control no use, use setpause blow +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + u32 Reg; + + Reg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_FCC_OFFSET), 0); + if(onoff) + { + Reg |= XXGE_FCC_FCRX_MASK; + } + else + { + Reg &= ~XXGE_FCC_FCRX_MASK; + } + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_FCC_OFFSET), Reg, 0); +} + +int grtnic_ResetTx(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + u32 RegTc; + u32 TimeoutLoops; + + RegTc = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_TC_OFFSET), 0); + RegTc |= XXGE_TC_RST_MASK; + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_TC_OFFSET), RegTc, 0); + TimeoutLoops = XXGE_RST_DELAY_LOOPCNT_VAL; + /* Poll until the reset is done */ + while (TimeoutLoops && (RegTc & XXGE_TC_RST_MASK)) { + RegTc = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_TC_OFFSET), 0); + TimeoutLoops --; + } //return somthing + + if(0 == TimeoutLoops ) { + return 1; + } + return 0; +} + +int grtnic_ResetRx(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + u32 RegRcw1; + u32 TimeoutLoops; + + RegRcw1 = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), 0); + RegRcw1 |= XXGE_RCW1_RST_MASK; + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), RegRcw1, 0); + + TimeoutLoops = XXGE_RST_DELAY_LOOPCNT_VAL; + /* Poll until the reset is done */ + while (TimeoutLoops && (RegRcw1 & XXGE_RCW1_RST_MASK)) { + RegRcw1 = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), 0); + TimeoutLoops --; + } //return somthing + + if(0 == TimeoutLoops ) { + return 1; + } + return 0; +} + + +void grtnic_SetTx(struct net_device *netdev, int onoff) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + u32 RegTc; + + RegTc = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_TC_OFFSET), 0); + if(onoff) + { + RegTc |= XXGE_TC_TX_MASK; + } + else + { + RegTc &= ~XXGE_TC_TX_MASK; + } + + RegTc |= XXGE_TC_DIC_MASK; //Deficit Idle Count Enable + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_TC_OFFSET), RegTc, 0); + RegTc = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_TC_OFFSET), 0); +} + +void grtnic_SetRx(struct net_device *netdev, int onoff) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + u32 RegRcw1; + + RegRcw1 = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), 0); + + if(onoff) + { + RegRcw1 |= XXGE_RCW1_RX_MASK; + } + else + { + RegRcw1 &= ~XXGE_RCW1_RX_MASK; + } + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), RegRcw1, 0); + RegRcw1 = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), 0); +// printk("after RegRcw1 = %08x\n", RegRcw1); +} + +void grtnic_GetRx(struct net_device *netdev, u32 *status) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + *status = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), 0); +// printk("read RegRcw1 = %08x\n", *status); +} + +void grtnic_SetMaxFrameLen(struct net_device *netdev, int len) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u32 value; + u16 BaseAddr = XXGE_PORT_ADDRBASE; + + value = ((len & 0x7FFF) | (1<<16)); + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_TMTU_OFFSET), value, 0); + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_RMTU_OFFSET), value, 0); +} + +void grtnic_SetJumbo(struct net_device *netdev, int onoff) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u32 RegRcw1; + u32 RegTc; + u16 BaseAddr = XXGE_PORT_ADDRBASE; + + RegRcw1 = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), 0); + RegTc = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_TC_OFFSET), 0); + + if(onoff) + { + RegRcw1 |= XXGE_RCW1_JUM_MASK; + RegTc |= XXGE_TC_JUM_MASK; + } + else + { + RegRcw1 &= ~XXGE_RCW1_JUM_MASK; + RegTc &= ~XXGE_TC_JUM_MASK; + } + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), RegRcw1, 0); + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_TC_OFFSET), RegTc, 0); +} + +void grtnic_SetAdrsFilter(struct net_device *netdev, int filter) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + GRTNIC_WRITE_REG(hw, MAC_ADRS_FILTER, filter, 0); +} + +int grtnic_GetAdrsFilter(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + int filter; + + filter = GRTNIC_READ_REG(hw, MAC_ADRS_FILTER, 0); + return filter; +} + +void grtnic_SetMacAddress(struct net_device *netdev, const u8 *AddressPtr) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u32 MacAddr; + u8 *Aptr = (u8 *) AddressPtr; + + MacAddr = Aptr[0]; + MacAddr |= Aptr[1] << 8; + MacAddr |= Aptr[2] << 16; + MacAddr |= Aptr[3] << 24; + GRTNIC_WRITE_REG(hw, MAC_ADRS_LOW, MacAddr, 0); //addr l + + MacAddr = 0; + MacAddr |= Aptr[4]; + MacAddr |= Aptr[5] << 8; + GRTNIC_WRITE_REG(hw, MAC_ADRS_HIGH, MacAddr, 0); //addr h +} + + +void grtnic_GetMacAddress(struct net_device *netdev, void *AddressPtr) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u32 MacAddr; + u8 *Aptr = (u8 *) AddressPtr; + + MacAddr = GRTNIC_READ_REG(hw, MAC_ADRS_LOW, 0); + Aptr[0] = (u8) MacAddr; + Aptr[1] = (u8) (MacAddr >> 8); + Aptr[2] = (u8) (MacAddr >> 16); + Aptr[3] = (u8) (MacAddr >> 24); + + MacAddr = GRTNIC_READ_REG(hw, MAC_ADRS_HIGH, 0); + Aptr[4] = (u8) MacAddr; + Aptr[5] = (u8) (MacAddr >> 8); +} + +void grtnic_PhySetMdioDivisor(struct net_device *netdev, u8 Divisor) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_MDIO_CFG0_OFFSET), ((u32) Divisor | XXGE_MDIO_CFG0_MDIOEN_MASK), 0); +} + +void grtnic_SetPhyAddr(struct net_device *netdev, u32 Prtad, u32 Devad, u32 RegisterNum) //only for 10G phy +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + u32 Address; + u32 MdioCtrlReg = 0; + u32 TimeoutLoops; + /* Sequence of steps is: + * - Set MDIO REG (TX Data) + * - TX Data opcode (CFG1) 0x00 and PRTAD, DEVAD be written (TX Data) + * - Check for MDIO ready at every step + */ + + /* + * Wait till the MDIO interface is ready to accept a new transaction. + */ +////////////////////////////////////////////////////////////////////////////////////////////// + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + + TimeoutLoops = XXGE_MDIO_RDY_LOOPCNT_VAL; + /* Poll until the reset is done */ + while (TimeoutLoops && !(MdioCtrlReg & XXGE_MDIO_CFG1_READY_MASK)) { + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + TimeoutLoops --; + } //return somthing + +// if(0 == TimeoutLoops ) printk("Timeout 1\n"); +////////////////////////////////////////////////////////////////////////////////////////////// + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_MDIO_TX_DATA_OFFSET), RegisterNum, 0); + + /* Now initiate the set PHY register address operation */ + Address = ((Prtad << 24) | (Devad << 16)); + MdioCtrlReg = Address | XXGE_MDIO_CFG1_INITIATE_MASK; + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), MdioCtrlReg, 0); + /* + * Wait till MDIO transaction is completed. + */ +////////////////////////////////////////////////////////////////////////////////////////////// + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + + TimeoutLoops = XXGE_MDIO_RDY_LOOPCNT_VAL; + /* Poll until the reset is done */ + while (TimeoutLoops && !(MdioCtrlReg & XXGE_MDIO_CFG1_READY_MASK)) { + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + TimeoutLoops --; + } //return somthing + +// if(0 == TimeoutLoops ) printk("Timeout 2\n"); +////////////////////////////////////////////////////////////////////////////////////////////// +} + + +void grtnic_PhyRead(struct net_device *netdev, u32 PhyAddress, u32 RegisterNum, u16 *PhyDataPtr) //if 10G, RegisterNum is devad +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + int max_speed = adapter->speed; + u32 mdio_cfg1_op_read_mask; + u32 TimeoutLoops; + + + u32 Address; + u32 MdioCtrlReg = 0; + u16 BaseAddr = XXGE_PORT_ADDRBASE; + + /* Sequence of steps is: + * - Set Address opcode (CFG1) and actual address (TX Data) + * - RX Data opcode (CFG1) and actual data read (RX Data) + * - Check for MDIO ready at every step + */ + + /* + * Wait till MDIO interface is ready to accept a new transaction. + */ +////////////////////////////////////////////////////////////////////////////////////////////// + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + + TimeoutLoops = XXGE_MDIO_RDY_LOOPCNT_VAL; + /* Poll until the reset is done */ + while (TimeoutLoops && !(MdioCtrlReg & XXGE_MDIO_CFG1_READY_MASK)) { + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + TimeoutLoops --; + } //return somthing + +// if(0 == TimeoutLoops ) printk("Timeout\n"); +////////////////////////////////////////////////////////////////////////////////////////////// + + + mdio_cfg1_op_read_mask = (max_speed==1) ? XXGE_MDIO_CFG1_OP_READ_MASK_10G : XXGE_MDIO_CFG1_OP_READ_MASK; + + + /* Now initiate the set PHY register address operation */ + + Address = ((PhyAddress << 24) | (RegisterNum << 16)); + MdioCtrlReg = Address | XXGE_MDIO_CFG1_INITIATE_MASK | mdio_cfg1_op_read_mask; + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), MdioCtrlReg, 0); + /* + * Wait till MDIO transaction is completed. + */ +////////////////////////////////////////////////////////////////////////////////////////////// + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + + TimeoutLoops = XXGE_MDIO_RDY_LOOPCNT_VAL; + /* Poll until the reset is done */ + while (TimeoutLoops && !(MdioCtrlReg & XXGE_MDIO_CFG1_READY_MASK)) { + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + TimeoutLoops --; + } //return somthing + +// if(0 == TimeoutLoops ) printk("Timeout\n"); +////////////////////////////////////////////////////////////////////////////////////////////// + + *PhyDataPtr = (u16) GRTNIC_READ_REG(&adapter->hw, (BaseAddr + XXGE_MDIO_RX_DATA_OFFSET), 0); +} + + +void grtnic_PhyWrite(struct net_device *netdev, u32 PhyAddress, u32 RegisterNum, u16 PhyData) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + u32 Address; + u32 TimeoutLoops; + + u32 MdioCtrlReg = 0; + /* Sequence of steps is: + * - Set Address opcode (CFG1) and actual address (TX Data) + * - TX Data opcode (CFG1) and actual data to be written (TX Data) + * - Check for MDIO ready at every step + */ + + /* + * Wait till the MDIO interface is ready to accept a new transaction. + */ +////////////////////////////////////////////////////////////////////////////////////////////// + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + + TimeoutLoops = XXGE_MDIO_RDY_LOOPCNT_VAL; + /* Poll until the reset is done */ + while (TimeoutLoops && !(MdioCtrlReg & XXGE_MDIO_CFG1_READY_MASK)) { + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + TimeoutLoops --; + } //return somthing + +// if(0 == TimeoutLoops ) printk("Timeout\n"); +////////////////////////////////////////////////////////////////////////////////////////////// + + /* Now initiate the set PHY register address operation */ + Address = ((PhyAddress << 24) | (RegisterNum << 16)); + MdioCtrlReg = Address | XXGE_MDIO_CFG1_INITIATE_MASK | XXGE_MDIO_CFG1_OP_WRITE_MASK; + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_MDIO_TX_DATA_OFFSET), (PhyData & XXGE_MDIO_TX_DATA_MASK), 0); + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), MdioCtrlReg, 0); + /* + * Wait till MDIO transaction is completed. + */ +////////////////////////////////////////////////////////////////////////////////////////////// + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + + TimeoutLoops = XXGE_MDIO_RDY_LOOPCNT_VAL; + /* Poll until the reset is done */ + while (TimeoutLoops && !(MdioCtrlReg & XXGE_MDIO_CFG1_READY_MASK)) { + MdioCtrlReg = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_MDIO_CFG1_OFFSET), 0); + TimeoutLoops --; + } //return somthing + +// if(0 == TimeoutLoops ) printk("Timeout\n"); +////////////////////////////////////////////////////////////////////////////////////////////// + +} + +void grtnic_SetPause (struct net_device *netdev, u8 flowctl) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u32 RegFc; + u16 BaseAddr = XXGE_PORT_ADDRBASE; + + + RegFc = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_FCC_OFFSET), 0); + + printk("RegFc = %08x, flowctl=%x\n",RegFc, flowctl); + RegFc &= ~(XXGE_FCC_FCRX_MASK | XXGE_FCC_FCTX_MASK); + RegFc |= flowctl<<29; + + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_FCC_OFFSET), RegFc, 0); +} + +void grtnic_set_fc_watermarks (struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u32 value = ((ETH_HIGH_MARK << 16) | ETH_LOW_MARK); + GRTNIC_WRITE_REG(hw, FC_WATERMARK, value, 0); +} + +void grtnic_SetMacPauseAddress(struct net_device *netdev, const u8 *AddressPtr) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + + u32 MacAddr; + u8 *Aptr = (u8 *) AddressPtr; + + /* Set the MAC bits [31:0] in RCW0 register */ + MacAddr = Aptr[0]; + MacAddr |= Aptr[1] << 8; + MacAddr |= Aptr[2] << 16; + MacAddr |= Aptr[3] << 24; + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_RCW0_OFFSET), MacAddr, 0); + + /* RCW1 contains other info that must be preserved */ + MacAddr = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), 0); + MacAddr &= ~XXGE_RCW1_PAUSEADDR_MASK; + /* Set MAC bits [47:32] */ + MacAddr |= Aptr[4]; + MacAddr |= Aptr[5] << 8; + GRTNIC_WRITE_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), MacAddr, 0); +} + +void grtnic_GetMacPauseAddress(struct net_device *netdev, void *AddressPtr) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + u16 BaseAddr = XXGE_PORT_ADDRBASE; + + u32 MacAddr; + u8 *Aptr = (u8 *) AddressPtr; + + /* Read MAC bits [31:0] in ERXC0 */ + MacAddr = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW0_OFFSET), 0); + Aptr[0] = (u8) MacAddr; + Aptr[1] = (u8) (MacAddr >> 8); + Aptr[2] = (u8) (MacAddr >> 16); + Aptr[3] = (u8) (MacAddr >> 24); + + /* Read MAC bits [47:32] in RCW1 */ + MacAddr = GRTNIC_READ_REG(hw, (BaseAddr + XXGE_RCW1_OFFSET), 0); + Aptr[4] = (u8) MacAddr; + Aptr[5] = (u8) (MacAddr >> 8); +} + +u64 grtnic_get_statistics_cnt(struct grtnic_adapter *adapter, u32 reg, u32 old_cnt) +{ + struct grtnic_hw *hw = &adapter->hw; + u64 new_val; + u32 temp_val0 = 0; + u64 temp_val1 = 0; + u16 BaseAddr = XXGE_PORT_ADDRBASE; + + temp_val0 = GRTNIC_READ_REG(hw, (BaseAddr + reg), 0); //low + temp_val1 = GRTNIC_READ_REG(hw, (BaseAddr + reg + 4), 0); //hi + new_val = (temp_val1 << 32) | temp_val0; + + return new_val; +} + +///////////////////////////////////////////////////////////////////////////////// +/** + * grtnic_lower_i2c_clk - Lowers the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Lowers the I2C clock line '1'->'0' + * Asserts the I2C clock output enable on X550 hardware. + **/ +static void grtnic_lower_i2c_clk(struct grtnic_hw *hw, u32 *i2cctl) +{ + DEBUGFUNC("grtnic_lower_i2c_clk"); + + *i2cctl &= ~(GRTNIC_I2C_CLK_OUT); + + GRTNIC_WRITE_REG(hw, I2CCTL, *i2cctl, 0); + GRTNIC_WRITE_FLUSH(hw); + + /* SCL fall time (300ns) */ + usec_delay(GRTNIC_I2C_T_FALL); +} + +/** + * grtnic_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Raises the I2C clock line '0'->'1' + * Negates the I2C clock output enable on X550 hardware. + **/ +static void grtnic_raise_i2c_clk(struct grtnic_hw *hw, u32 *i2cctl) +{ + u32 i = 0; + u32 timeout = GRTNIC_I2C_CLOCK_STRETCHING_TIMEOUT; + u32 i2cctl_r = 0; + + DEBUGFUNC("grtnic_raise_i2c_clk"); + + for (i = 0; i < timeout; i++) { + *i2cctl |= GRTNIC_I2C_CLK_OUT; + + GRTNIC_WRITE_REG(hw, I2CCTL, *i2cctl, 0); + GRTNIC_WRITE_FLUSH(hw); + /* SCL rise time (1000ns) */ + usec_delay(GRTNIC_I2C_T_RISE); + + i2cctl_r = GRTNIC_READ_REG(hw, I2CCTL, 0); + if (i2cctl_r & GRTNIC_I2C_CLK_IN) + break; + } +} + +/** + * grtnic_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + * Negates the I2C data output enable on X550 hardware. + **/ +static bool grtnic_get_i2c_data(struct grtnic_hw *hw, u32 *i2cctl) +{ + bool data; + + DEBUGFUNC("grtnic_get_i2c_data"); + + if (*i2cctl & GRTNIC_I2C_DATA_IN) + data = 1; + else + data = 0; + + return data; +} + +/** + * grtnic_set_i2c_data - Sets the I2C data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * @data: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + * Asserts the I2C data output enable on X550 hardware. + **/ +static s32 grtnic_set_i2c_data(struct grtnic_hw *hw, u32 *i2cctl, bool data) +{ + s32 status = GRTNIC_SUCCESS; + + DEBUGFUNC("grtnic_set_i2c_data"); + + if (data) + *i2cctl |= GRTNIC_I2C_DATA_OUT; + else + *i2cctl &= ~(GRTNIC_I2C_DATA_OUT); + + GRTNIC_WRITE_REG(hw, I2CCTL, *i2cctl, 0); + GRTNIC_WRITE_FLUSH(hw); + + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ + usec_delay(GRTNIC_I2C_T_RISE + GRTNIC_I2C_T_FALL + GRTNIC_I2C_T_SU_DATA); + + if (!data) /* Can't verify data in this case */ + return GRTNIC_SUCCESS; + + /* Verify data was set correctly */ + *i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + if (data != grtnic_get_i2c_data(hw, i2cctl)) { + status = GRTNIC_ERR_I2C; + printk("Error - I2C data was not set to %X.\n", data); + } + + return status; +} + +/** + * grtnic_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: read data value + * + * Clocks in one bit via I2C data/clock + **/ +static void grtnic_clock_in_i2c_bit(struct grtnic_hw *hw, bool *data) +{ + u32 i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + + DEBUGFUNC("grtnic_clock_in_i2c_bit"); + + grtnic_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(GRTNIC_I2C_T_HIGH); + + i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + *data = grtnic_get_i2c_data(hw, &i2cctl); + + grtnic_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(GRTNIC_I2C_T_LOW); +} + +/** + * grtnic_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: data value to write + * + * Clocks out one bit via I2C data/clock + **/ +static s32 grtnic_clock_out_i2c_bit(struct grtnic_hw *hw, bool data) +{ + s32 status; + u32 i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + + DEBUGFUNC("grtnic_clock_out_i2c_bit"); + + status = grtnic_set_i2c_data(hw, &i2cctl, data); + if (status == GRTNIC_SUCCESS) { + grtnic_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(GRTNIC_I2C_T_HIGH); + + grtnic_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us. + * This also takes care of the data hold time. + */ + usec_delay(GRTNIC_I2C_T_LOW); + } else { + status = GRTNIC_ERR_I2C; + printk("I2C data was not set to %X\n", data); + } + + return status; +} + +/** + * grtnic_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure + * + * Sets I2C start condition (High -> Low on SDA while SCL is High) + * Set bit-bang mode on X550 hardware. + **/ +static void grtnic_i2c_start(struct grtnic_hw *hw) +{ + u32 i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + + DEBUGFUNC("grtnic_i2c_start"); + + /* Start condition must begin with data and clock high */ + grtnic_set_i2c_data(hw, &i2cctl, 1); + grtnic_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for start condition (4.7us) */ + usec_delay(GRTNIC_I2C_T_SU_STA); + + grtnic_set_i2c_data(hw, &i2cctl, 0); + + /* Hold time for start condition (4us) */ + usec_delay(GRTNIC_I2C_T_HD_STA); + + grtnic_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(GRTNIC_I2C_T_LOW); + +} + +/** + * grtnic_i2c_stop - Sets I2C stop condition + * @hw: pointer to hardware structure + * + * Sets I2C stop condition (Low -> High on SDA while SCL is High) + * Disables bit-bang mode and negates data output enable on X550 + * hardware. + **/ +static void grtnic_i2c_stop(struct grtnic_hw *hw) +{ + u32 i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + + DEBUGFUNC("grtnic_i2c_stop"); + + /* Stop condition must begin with data low and clock high */ + grtnic_set_i2c_data(hw, &i2cctl, 0); + grtnic_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for stop condition (4us) */ + usec_delay(GRTNIC_I2C_T_SU_STO); + + grtnic_set_i2c_data(hw, &i2cctl, 1); + + /* bus free time between stop and start (4.7us)*/ + usec_delay(GRTNIC_I2C_T_BUF); +} + +/** + * grtnic_clock_in_i2c_byte - Clocks in one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte to clock in + * + * Clocks in one byte data via I2C data/clock + **/ +static void grtnic_clock_in_i2c_byte(struct grtnic_hw *hw, u8 *data) +{ + s32 i; + bool bit = 0; + + DEBUGFUNC("grtnic_clock_in_i2c_byte"); + + *data = 0; + for (i = 7; i >= 0; i--) { + grtnic_clock_in_i2c_bit(hw, &bit); + *data |= bit << i; + } +} + +/** + * grtnic_clock_out_i2c_byte - Clocks out one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte clocked out + * + * Clocks out one byte data via I2C data/clock + **/ +static s32 grtnic_clock_out_i2c_byte(struct grtnic_hw *hw, u8 data) +{ + s32 status = GRTNIC_SUCCESS; + s32 i; + u32 i2cctl; + bool bit; + + DEBUGFUNC("grtnic_clock_out_i2c_byte"); + + for (i = 7; i >= 0; i--) { + bit = (data >> i) & 0x1; + status = grtnic_clock_out_i2c_bit(hw, bit); + + if (status != GRTNIC_SUCCESS) + break; + } + + /* Release SDA line (set high) */ + i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + i2cctl |= GRTNIC_I2C_DATA_OUT; + GRTNIC_WRITE_REG(hw, I2CCTL, i2cctl, 0); + GRTNIC_WRITE_FLUSH(hw); + + return status; +} + +/** + * grtnic_get_i2c_ack - Polls for I2C ACK + * @hw: pointer to hardware structure + * + * Clocks in/out one bit via I2C data/clock + **/ +static s32 grtnic_get_i2c_ack(struct grtnic_hw *hw) +{ + s32 status = GRTNIC_SUCCESS; + u32 i = 0; + u32 i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + u32 timeout = 10; + bool ack = 1; + + DEBUGFUNC("grtnic_get_i2c_ack"); + + grtnic_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(GRTNIC_I2C_T_HIGH); + + /* Poll for ACK. Note that ACK in I2C spec is + * transition from 1 to 0 */ + for (i = 0; i < timeout; i++) { + i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + ack = grtnic_get_i2c_data(hw, &i2cctl); + + usec_delay(1); + if (!ack) + break; + } + + if (ack) { + printk("I2C ack was not received.\n"); + status = GRTNIC_ERR_I2C; + } + + grtnic_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(GRTNIC_I2C_T_LOW); + + return status; +} + +/** + * grtnic_i2c_bus_clear - Clears the I2C bus + * @hw: pointer to hardware structure + * + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. + **/ +void grtnic_i2c_bus_clear(struct grtnic_hw *hw) +{ + u32 i2cctl; + u32 i; + + DEBUGFUNC("grtnic_i2c_bus_clear"); + + grtnic_i2c_start(hw); + i2cctl = GRTNIC_READ_REG(hw, I2CCTL, 0); + + grtnic_set_i2c_data(hw, &i2cctl, 1); + + for (i = 0; i < 9; i++) { + grtnic_raise_i2c_clk(hw, &i2cctl); + + /* Min high period of clock is 4us */ + usec_delay(GRTNIC_I2C_T_HIGH); + + grtnic_lower_i2c_clk(hw, &i2cctl); + + /* Min low period of clock is 4.7us*/ + usec_delay(GRTNIC_I2C_T_LOW); + } + + grtnic_i2c_start(hw); + + /* Put the i2c bus back to default state */ + grtnic_i2c_stop(hw); +} + +/** + * grtnic_read_i2c_byte_generic_int - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: address to read from + * @data: value read + * @lock: true if to take and release semaphore + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +static s32 grtnic_read_i2c_byte_generic_int(struct grtnic_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data, bool lock) +{ + s32 status; + u32 max_retry = 10; + u32 retry = 0; + bool nack = 1; + *data = 0; + + DEBUGFUNC("grtnic_read_i2c_byte_generic"); + + do { + grtnic_i2c_start(hw); + + /* Device Address and write indication */ + status = grtnic_clock_out_i2c_byte(hw, dev_addr); + if (status != GRTNIC_SUCCESS) + goto fail; + + status = grtnic_get_i2c_ack(hw); + if (status != GRTNIC_SUCCESS) + goto fail; + + status = grtnic_clock_out_i2c_byte(hw, byte_offset); + if (status != GRTNIC_SUCCESS) + goto fail; + + status = grtnic_get_i2c_ack(hw); + if (status != GRTNIC_SUCCESS) + goto fail; + + grtnic_i2c_start(hw); + + /* Device Address and read indication */ + status = grtnic_clock_out_i2c_byte(hw, (dev_addr | 0x1)); + if (status != GRTNIC_SUCCESS) + goto fail; + + status = grtnic_get_i2c_ack(hw); + if (status != GRTNIC_SUCCESS) + goto fail; + + grtnic_clock_in_i2c_byte(hw, data); + + status = grtnic_clock_out_i2c_bit(hw, nack); + if (status != GRTNIC_SUCCESS) + goto fail; + + grtnic_i2c_stop(hw); + + return GRTNIC_SUCCESS; + +fail: + grtnic_i2c_bus_clear(hw); + + if (retry < max_retry) + printk("I2C byte read error - Retrying.\n"); + else + printk("I2C byte read error.\n"); + retry++; + + } while (retry <= max_retry); + + return status; +} + +/** + * grtnic_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: address to read from + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 grtnic_read_i2c_byte(struct grtnic_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) +{ + return grtnic_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, data, true); +} + +/** + * grtnic_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 grtnic_read_i2c_eeprom(struct grtnic_hw *hw, u8 byte_offset, u8 *eeprom_data) +{ + DEBUGFUNC("grtnic_read_i2c_eeprom_generic"); + + return grtnic_read_i2c_byte(hw, byte_offset, GRTNIC_I2C_EEPROM_DEV_ADDR, eeprom_data); +} + +/** + * grtnic_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @sff8472_data: value read + * + * Performs byte read operation to SFP module's SFF-8472 data over I2C + **/ +s32 grtnic_read_i2c_sff8472(struct grtnic_hw *hw, u8 byte_offset, u8 *sff8472_data) +{ + return grtnic_read_i2c_byte(hw, byte_offset, GRTNIC_I2C_EEPROM_DEV_ADDR2, sff8472_data); +} diff --git a/drivers/net/ethernet/guangruntong/grtnic_macphy.h b/drivers/net/ethernet/guangruntong/grtnic_macphy.h new file mode 100755 index 0000000000000..4526da67fe515 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_macphy.h @@ -0,0 +1,256 @@ +#ifndef _GRTNICMACPHY_H_ +#define _GRTNICMACPHY_H_ + +struct sfp_info { + u8 wr_cmd; + u8 count; + u8 dev_addr; + u8 reg_addr; +}; + + +void grtnic_SetSpeed(struct net_device *netdev, int speed); +void grtnic_SetFc(struct net_device *netdev, int onoff); +int grtnic_ResetTx(struct net_device *netdev); +int grtnic_ResetRx(struct net_device *netdev); +void grtnic_SetTx(struct net_device *netdev, int onoff); +void grtnic_SetRx(struct net_device *netdev, int onoff); +void grtnic_GetRx(struct net_device *netdev, u32 *status); + +void grtnic_SetMaxFrameLen(struct net_device *netdev, int len); +void grtnic_SetJumbo(struct net_device *netdev, int onoff); + +void grtnic_SetAdrsFilter(struct net_device *netdev, int filter); +int grtnic_GetAdrsFilter(struct net_device *netdev); +u32 grtnic_GetSFP_Reg(struct net_device *netdev, struct sfp_info* sfc_info); + +void grtnic_SetMacAddress(struct net_device *netdev, const u8 *AddressPtr); +void grtnic_GetMacAddress(struct net_device *netdev, void *AddressPtr); +void grtnic_PhySetMdioDivisor(struct net_device *netdev, u8 Divisor); +int grtnic_Get_phy_status(struct net_device *netdev, int *linkup); + +void grtnic_SetPhyAddr(struct net_device *netdev, u32 Prtad, u32 Devad, u32 RegisterNum); //only for 10G phy +void grtnic_PhyRead(struct net_device *netdev, u32 PhyAddress, u32 RegisterNum, u16 *PhyDataPtr); +void grtnic_PhyWrite(struct net_device *netdev, u32 PhyAddress, u32 RegisterNum, u16 PhyData); + +void grtnic_SetPause (struct net_device *netdev, u8 flowctl); +void grtnic_set_fc_watermarks (struct net_device *netdev); +void grtnic_SetMacPauseAddress(struct net_device *netdev, const u8 *AddressPtr); +void grtnic_GetMacPauseAddress(struct net_device *netdev, void *AddressPtr); +u64 grtnic_get_statistics_cnt(struct grtnic_adapter *adapter, u32 reg, u32 old_cnt); + +s32 grtnic_read_i2c_eeprom(struct grtnic_hw *hw, u8 byte_offset, u8 *eeprom_data); +s32 grtnic_read_i2c_sff8472(struct grtnic_hw *hw, u8 byte_offset, u8 *sff8472_data); + + +#define XXGE_PORT_ADDRBASE 0x00008000 + +#define XXGE_RCW0_OFFSET 0x00000400 /**< Rx Configuration Word 0 */ +#define XXGE_RCW1_OFFSET 0x00000404 /**< Rx Configuration Word 1 */ +#define XXGE_TC_OFFSET 0x00000408 /**< Tx Configuration */ +#define XXGE_FCC_OFFSET 0x0000040C /**< Flow Control Configuration */ + +#define XXGE_SPEED_OFFSET 0x00000410 /**< MAC Speed Configuration */ + +#define XXGE_RMTU_OFFSET 0x00000414 /**< Receiver MTU Configuration Word ~chng.. */ +#define XXGE_TMTU_OFFSET 0x00000418 /**< Transmitter MTU Configuration Word ~chng.. */ + +#define XXGE_MDIO_REGISTER_ADDRESS 32 /* Register to read for getting phy status */ +#define XXGE_MDIO_CFG0_OFFSET 0x00000500 /**< MDIO Configuration word 0 */ +#define XXGE_MDIO_CFG1_OFFSET 0x00000504 /**< MDIO Configuration word 1 */ +#define XXGE_MDIO_TX_DATA_OFFSET 0x00000508 /**< MDIO TX Data */ +#define XXGE_MDIO_RX_DATA_OFFSET 0x0000050C /**< MDIO RX Data (Read-only) */ + +#define XXGE_TC_TXCONTROLBIT 0x1C /* Set this bit to 0 to disable transmission */ + +/** @name Flow Control Configuration (FCC) Register Bit definitions + * @{ + */ +#define XXGE_FCC_FCRX_MASK 0x20000000 /**< Rx flow control enable */ +#define XXGE_FCC_FCTX_MASK 0x40000000 /**< Tx flow control enable */ + + +/** @name Receive Configuration Word 1 (RCW1) Register bit definitions + * @{ + */ +#define XXGE_RCW1_RST_MASK 0x80000000 /**< Reset */ +#define XXGE_RCW1_JUM_MASK 0x40000000 /**< Jumbo frame enable */ +#define XXGE_RCW1_FCS_MASK 0x20000000 /**< In-Band FCS enable + * (FCS not stripped) */ +#define XXGE_RCW1_RX_MASK 0x10000000 /**< Receiver enable */ +#define XXGE_RCW1_VLAN_MASK 0x08000000 /**< VLAN frame enable */ +#define XXGE_RCW1_HD_MASK 0x04000000 /**< Receiver Preserve Preamble Enable !!chng... change HD<->PP */ +#define XXGE_RCW1_LT_DIS_MASK 0x02000000 /**< Length/type field valid check + * disable + */ +#define XXGE_RCW1_CL_DIS_MASK 0x01000000 /**< Control frame Length check + * disable + */ +#define XXGE_RCW1_PAUSEADDR_MASK 0x0000FFFF /**< Pause frame source + * address bits [47:32].Bits + * [31:0] are stored in register + * RCW0 + */ +/** @name Transmitter Configuration (TC) Register bit definitions + * @{ + */ +#define XXGE_TC_RST_MASK 0x80000000 /**< Reset */ +#define XXGE_TC_JUM_MASK 0x40000000 /**< Jumbo frame enable */ +#define XXGE_TC_FCS_MASK 0x20000000 /**< In-Band FCS enable + * (FCS not generated) + */ +#define XXGE_TC_TX_MASK 0x10000000 /**< Transmitter enable */ +#define XXGE_TC_VLAN_MASK 0x08000000 /**< VLAN frame enable */ +#define XXGE_TC_HD_MASK 0x04000000 /**< WAN Mode Enable !!chng...bit-26 we may NOT use*/ +#define XXGE_TC_IFG_MASK 0x02000000 /**< Inter-frame gap adjustment enable */ +#define XXGE_TC_DIC_MASK 0x01000000 /**< Deficit Idle Count Enable */ + + +/** @name MDIO Management Configuration (MC) Register bit definitions + * @{ + */ +#define XXGE_MDIO_CFG0_MDIOEN_MASK 0x00000040 /**< MII management enable*/ +#define XXGE_MDIO_CFG0_CLOCK_DIVIDE_MAX 0x3F /**< Maximum MDIO divisor */ +#define XXGE_MDIO_PHY_LINK_UP_MASK 0x1000 /* Checking for 12th bit */ + + +#define XXGE_MDIO_MC_MDIOPRTAD_MASK 0x1F000000 /**< PRTAD ...b28:24*/ +#define XXGE_MDIO_MC_CLOCK_DEVAD_MAX 0x001F0000 /**< DEVAD ...b20:16*/ +#define XXGE_MDIO_MC_MDIO_TXOP_MASK 0x0000C000 /**< TX OP ...b15:14*/ +#define XXGE_MDIO_CFG1_INITIATE_MASK 0x00000800 /**< Initiate ...b11 */ +#define XXGE_MDIO_CFG1_READY_MASK 0x00000080 /**< MDIO Ready ...b7*/ +#define XXGE_MDIO_CFG1_OP_SETADDR_MASK 0x00000000 /**< Opcode Set Addr Mask */ +#define XXGE_MDIO_CFG1_OP_READ_MASK 0x00008000 /**< Opcode Read Mask */ +#define XXGE_MDIO_CFG1_OP_WRITE_MASK 0x00004000 /**< Opcode Write Mask */ + +#define XXGE_MDIO_CFG1_OP_READ_MASK_10G 0x0000C000 /**< Opcode Read Mask for 10G*/ + +/*@}*/ + + +/** @name MDIO TX Data (MTX) Register bit definitions + * @{ + */ +#define XXGE_MDIO_TX_DATA_MASK 0x0000FFFF /**< MDIO TX Data ...b15:0 */ + +/** @name MDIO TX Data (MTX) Register bit definitions + * @{ + */ +#define XXGE_MDIO_RX_DATA_MASK 0x0000FFFF /**< MDIO RX Data ...b15:0 */ + + +//user define +#define XXGE_RST_DELAY_LOOPCNT_VAL 4 /**< Timeout in ticks used + * while checking if the core + * had come out of reset. The + * exact tick time is defined + * in each case/loop where it + * will be used + */ +#define XXGE_MDIO_RDY_LOOPCNT_VAL 100 // Timeout in ticks used + + + +/* PHY Control Register */ +#define PHY_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define PHY_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define PHY_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define PHY_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define PHY_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define PHY_POWER_DOWN 0x0800 /* Power down */ +#define PHY_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define PHY_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define PHY_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define PHY_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define PHY_SPEED_1000 0x0040 +#define PHY_SPEED_100 0x2000 +#define PHY_SPEED_10 0x0000 + + +/* SFP I2C */ +#define GRTNIC_I2C_CLOCK_STRETCHING_TIMEOUT 500 +#define GRTNIC_ERR_PHY_ADDR_INVALID -17 +#define GRTNIC_ERR_I2C -18 + +#define GRTNIC_I2C_EEPROM_DEV_ADDR 0xA0 +#define GRTNIC_I2C_EEPROM_DEV_ADDR2 0xA2 +#define GRTNIC_I2C_EEPROM_BANK_LEN 0xFF + +/* EEPROM byte offsets */ +#define GRTNIC_SFF_IDENTIFIER 0x0 +#define GRTNIC_SFF_IDENTIFIER_SFP 0x3 +#define GRTNIC_SFF_VENDOR_OUI_BYTE0 0x25 +#define GRTNIC_SFF_VENDOR_OUI_BYTE1 0x26 +#define GRTNIC_SFF_VENDOR_OUI_BYTE2 0x27 +#define GRTNIC_SFF_1GBE_COMP_CODES 0x6 +#define GRTNIC_SFF_10GBE_COMP_CODES 0x3 +#define GRTNIC_SFF_CABLE_TECHNOLOGY 0x8 +#define GRTNIC_SFF_CABLE_SPEC_COMP 0x3C +#define GRTNIC_SFF_SFF_8472_SWAP 0x5C +#define GRTNIC_SFF_SFF_8472_COMP 0x5E +#define GRTNIC_SFF_SFF_8472_OSCB 0x6E +#define GRTNIC_SFF_SFF_8472_ESCB 0x76 +#define GRTNIC_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define GRTNIC_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 +#define GRTNIC_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 +#define GRTNIC_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 +#define GRTNIC_SFF_QSFP_CONNECTOR 0x82 +#define GRTNIC_SFF_QSFP_10GBE_COMP 0x83 +#define GRTNIC_SFF_QSFP_1GBE_COMP 0x86 +#define GRTNIC_SFF_QSFP_CABLE_LENGTH 0x92 +#define GRTNIC_SFF_QSFP_DEVICE_TECH 0x93 + +/* Bitmasks */ +#define GRTNIC_SFF_DA_PASSIVE_CABLE 0x4 +#define GRTNIC_SFF_DA_ACTIVE_CABLE 0x8 +#define GRTNIC_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define GRTNIC_SFF_1GBASESX_CAPABLE 0x1 +#define GRTNIC_SFF_1GBASELX_CAPABLE 0x2 +#define GRTNIC_SFF_1GBASET_CAPABLE 0x8 +#define GRTNIC_SFF_10GBASESR_CAPABLE 0x10 +#define GRTNIC_SFF_10GBASELR_CAPABLE 0x20 +#define GRTNIC_SFF_SOFT_RS_SELECT_MASK 0x8 +#define GRTNIC_SFF_SOFT_RS_SELECT_10G 0x8 +#define GRTNIC_SFF_SOFT_RS_SELECT_1G 0x0 +#define GRTNIC_SFF_ADDRESSING_MODE 0x4 +#define GRTNIC_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define GRTNIC_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define GRTNIC_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 +#define GRTNIC_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 +#define GRTNIC_I2C_EEPROM_READ_MASK 0x100 +#define GRTNIC_I2C_EEPROM_STATUS_MASK 0x3 +#define GRTNIC_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define GRTNIC_I2C_EEPROM_STATUS_PASS 0x1 +#define GRTNIC_I2C_EEPROM_STATUS_FAIL 0x2 +#define GRTNIC_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 + +#define GRTNIC_TN_LASI_STATUS_REG 0x9005 +#define GRTNIC_TN_LASI_STATUS_TEMP_ALARM 0x0008 + +/* SFP+ SFF-8472 Compliance */ +#define GRTNIC_SFF_SFF_8472_UNSUP 0x00 + + +/* I2C SDA and SCL timing parameters for standard mode */ +#define GRTNIC_I2C_T_HD_STA 4 +#define GRTNIC_I2C_T_LOW 5 +#define GRTNIC_I2C_T_HIGH 4 +#define GRTNIC_I2C_T_SU_STA 5 +#define GRTNIC_I2C_T_HD_DATA 5 +#define GRTNIC_I2C_T_SU_DATA 1 +#define GRTNIC_I2C_T_RISE 1 +#define GRTNIC_I2C_T_FALL 1 +#define GRTNIC_I2C_T_SU_STO 4 +#define GRTNIC_I2C_T_BUF 5 + +#define GRTNIC_I2C_CLK_IN 0x00000001 +#define GRTNIC_I2C_CLK_OUT 0x00000002 +#define GRTNIC_I2C_DATA_IN 0x00000004 +#define GRTNIC_I2C_DATA_OUT 0x00000008 + +#define msec_delay(_x) msleep(_x) +#define usec_delay(_x) udelay(_x) + +#define DEBUGFUNC(S) do {} while (0) + +#endif /* _XDMANET_H_ */ \ No newline at end of file diff --git a/drivers/net/ethernet/guangruntong/grtnic_main.c b/drivers/net/ethernet/guangruntong/grtnic_main.c new file mode 100755 index 0000000000000..39e98373b13a9 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_main.c @@ -0,0 +1,2116 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +//#include + +#include +#include +#include +#ifdef NETIF_F_TSO +#include +#endif +#include +#include + +#include "grtnic.h" +#include "grtnic_macphy.h" + +#define DEFAULT_ETHER_ADDRESS "\02SUME\00" + +MODULE_AUTHOR("Beijing GRT Corporation, "); +MODULE_DESCRIPTION("GRTNIC Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRIVER_VERSION); +#if LINUX_VERSION_CODE <= KERNEL_VERSION(5,11,0) +MODULE_SUPPORTED_DEVICE(DRIVER_NAME); +#endif + +static const struct grt_gigeth_info *grt_gigeth_info_tbl[] = { + [board_902E_GRT_FF] = &grt_902eff_info, + [board_902T_GRT_FF] = &grt_902tff_info, + [board_901ELR_GRT_FF] = &grt_901elr_info, + [board_1001E_GRT_FF] = &grt_1001eff_info, + [board_1001E_QM_FF] = &qm_1001eff_info, + [board_1002E_GRT_FF] = &grt_1002eff_info, + [board_1005E_GRT_FX] = &grt_1005efx_info +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +static const struct pci_device_id grtnic_pci_tbl[] = { + {0x1E18, 0x0F82, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_902E_GRT_FF}, + {0x1E18, 0x0F02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_902T_GRT_FF}, + {0x1E18, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_901ELR_GRT_FF}, + {0x1E18, 0x1F81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_1001E_GRT_FF}, + {0x1E18, 0x1F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_1001E_QM_FF}, + {0x1E18, 0x1F82, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_1002E_GRT_FF}, + {0x1E18, 0x1F25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_1005E_GRT_FX}, + /* required last entry */ + {0 /* end */} +}; + +MODULE_DEVICE_TABLE(pci, grtnic_pci_tbl); + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + +static struct workqueue_struct *grtnic_wq; + +#if 0 +//#if IS_ENABLED(CONFIG_DCA) +static int grtnic_notify_dca(struct notifier_block *, unsigned long event, void *p); +static struct notifier_block dca_notifier = { + .notifier_call = grtnic_notify_dca, + .next = NULL, + .priority = 0 +}; +#endif /* CONFIG_DCA */ + +#if 0 +// These are not defined in the 2.x.y kernels, so just define them +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,39) +#define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x100 +#define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x200 +#else +/** + * These are badly named in pre-3.6.11 kernel versions. We COULD do the same + * check as above, however (annoyingly) linux for tegra (based on post-3.6.11) + * picked up the header file from some pre-3.6.11 version, so we'll just make + * our code ugly and handle the check here: + */ +#ifndef PCI_EXP_DEVCTL2_IDO_REQ_EN +#define PCI_EXP_DEVCTL2_IDO_REQ_EN PCI_EXP_IDO_REQ_EN +#endif +#ifndef PCI_EXP_DEVCTL2_IDO_CMP_EN +#define PCI_EXP_DEVCTL2_IDO_CMP_EN PCI_EXP_IDO_CMP_EN +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) + +int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) +{ + int ret; + + *val = 0; + if (pos & 3) + return -EINVAL; + + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_dword() fails, it may + * have been written as 0xFFFFFFFF if hardware error happens + * during pci_read_config_dword(). + */ + if (ret) + *val = 0; + return ret; + +} + +int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) +{ + if (pos & 3) + return -EINVAL; + + return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); +} + +#endif + +#endif + +static int grtnic_map_bars(struct grtnic_adapter *adapter, struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct grtnic_hw *hw = &adapter->hw; + + resource_size_t bar_start; + resource_size_t bar_end; + resource_size_t bar_len; + + bar_start = pci_resource_start(pdev, 0); + bar_end = pci_resource_end(pdev, 0); + bar_len = bar_end - bar_start + 1; + + hw->user_bar_len = bar_len; + hw->user_bar = pci_ioremap_bar(pdev, 0); + + if (!hw->user_bar) + { + dev_err(dev, "Could not map USER BAR"); + return -1; + } + dev_info(dev, "USER BAR mapped at 0x%p with length %llu", hw->user_bar, bar_len); + + bar_start = pci_resource_start(pdev, 1); + bar_end = pci_resource_end(pdev, 1); + bar_len = bar_end - bar_start + 1; + + hw->dma_bar_len = bar_len; + hw->dma_bar = pci_ioremap_bar(pdev, 1); + + if (!hw->dma_bar) + { + dev_err(dev, "Could not map DMA BAR"); + return -1; + } + + dev_info(dev, "DMA BAR mapped at 0x%p with length %llu", hw->dma_bar, bar_len); + + return 0; +} + +static void grtnic_free_bars(struct grtnic_adapter *adapter, struct pci_dev *pdev) +{ + struct grtnic_hw *hw = &adapter->hw; + + if(hw->user_bar) + pci_iounmap(pdev, hw->user_bar); + if(hw->dma_bar) + pci_iounmap(pdev, hw->dma_bar); +} + + +void grtnic_napi_enable_all(struct grtnic_adapter *adapter) +{ + struct grtnic_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; +#ifdef HAVE_NDO_BUSY_POLL + grtnic_qv_init_lock(adapter->q_vector[q_idx]); +#endif + napi_enable(&q_vector->napi); + } +} + +void grtnic_napi_disable_all(struct grtnic_adapter *adapter) +{ + struct grtnic_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); +#ifdef HAVE_NDO_BUSY_POLL + while(!grtnic_qv_disable(adapter->q_vector[q_idx])) { + pr_info("QV %d locked\n", q_idx); + usleep_range(1000, 20000); + } +#endif + } +} + +///////////////////////////////////////////////////////////////////////////////////// + +#if 0 +//#if IS_ENABLED(CONFIG_DCA) + +static void grtnic_update_tx_dca(struct grtnic_adapter *adapter, struct grtnic_ring *tx_ring, int cpu) +{ + u32 txctrl = 0; + u16 reg_idx = tx_ring->reg_idx; + + if (adapter->flags & GRTNIC_GRTNIC_FLAG_DCA_ENABLED) + txctrl = dca3_get_tag(tx_ring->dev, cpu); + + txctrl <<= GRTNIC_DCA_TXCTRL_CPUID_SHIFT; + + /* + * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + txctrl |= GRTNIC_DCA_TXCTRL_DESC_RRO_EN | + GRTNIC_DCA_TXCTRL_DATA_RRO_EN | + GRTNIC_DCA_TXCTRL_DESC_DCA_EN; + + write_register(txctrl, adapter->dma_bar+ (TARGET_H2C<<12) + (reg_idx<<8) + ADDR_DCA_RXTXCTL*4); + +} + +static void grtnic_update_rx_dca(struct grtnic_adapter *adapter, struct grtnic_ring *rx_ring, int cpu) +{ + u32 rxctrl = 0; + u8 reg_idx = rx_ring->reg_idx; + + if (adapter->flags & GRTNIC_GRTNIC_FLAG_DCA_ENABLED) + rxctrl = dca3_get_tag(rx_ring->dev, cpu); + + rxctrl <<= GRTNIC_DCA_RXCTRL_CPUID_SHIFT; + + /* + * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + rxctrl |= GRTNIC_DCA_RXCTRL_DESC_RRO_EN | + GRTNIC_DCA_RXCTRL_DATA_DCA_EN | + GRTNIC_DCA_RXCTRL_DESC_DCA_EN; + + write_register(rxctrl, adapter->dma_bar+ (TARGET_C2H<<12) + (reg_idx<<8) + ADDR_DCA_RXTXCTL*4); +} + +void grtnic_update_dca(struct grtnic_q_vector *q_vector) +{ + struct grtnic_adapter *adapter = q_vector->adapter; + struct grtnic_ring *ring; + int cpu = get_cpu(); + + if (q_vector->cpu == cpu) + goto out_no_update; + + grtnic_for_each_ring(ring, q_vector->tx) + grtnic_update_tx_dca(adapter, ring, cpu); + + grtnic_for_each_ring(ring, q_vector->rx) + grtnic_update_rx_dca(adapter, ring, cpu); + + q_vector->cpu = cpu; +out_no_update: + put_cpu(); +} + +void grtnic_setup_dca(struct grtnic_adapter *adapter) +{ + int v_idx; + + /* always use CB2 mode, difference is masked in the CB driver */ + if (adapter->flags & GRTNIC_FLAG_DCA_ENABLED) + write_register(GRTNIC_DCA_CTRL_DCA_MODE_CB2, adapter->dma_bar+ (TARGET_CONFIG<<12) + ADDR_DCA_GTCL*4); + else + write_register(GRTNIC_DCA_CTRL_DCA_DISABLE, adapter->dma_bar+ (TARGET_CONFIG<<12) + ADDR_DCA_GTCL*4); + + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + adapter->q_vector[v_idx]->cpu = -1; + grtnic_update_dca(adapter->q_vector[v_idx]); + } +} + +static int __grtnic_notify_dca(struct device *dev, void *data) +{ + struct grtnic_adapter *adapter = dev_get_drvdata(dev); + unsigned long event = *(unsigned long *)data; + + if (!(adapter->flags & GRTNIC_FLAG_DCA_CAPABLE)) + return 0; + + switch (event) { + case DCA_PROVIDER_ADD: + /* if we're already enabled, don't do it again */ + if (adapter->flags & GRTNIC_FLAG_DCA_ENABLED) + break; + if (dca_add_requester(dev) == 0) { + adapter->flags |= GRTNIC_FLAG_DCA_ENABLED; + write_register(GRTNIC_DCA_CTRL_DCA_MODE_CB2, adapter->dma_bar+ (TARGET_CONFIG<<12) + ADDR_DCA_GTCL*4); + break; + } + /* fall through - DCA is disabled */ + case DCA_PROVIDER_REMOVE: + if (adapter->flags & GRTNIC_FLAG_DCA_ENABLED) { + dca_remove_requester(dev); + adapter->flags &= ~GRTNIC_FLAG_DCA_ENABLED; + write_register(GRTNIC_DCA_CTRL_DCA_DISABLE, adapter->dma_bar+ (TARGET_CONFIG<<12) + ADDR_DCA_GTCL*4); + } + break; + } + + return 0; +} +#endif /* CONFIG_DCA */ + + +/** + * grtnic_rss_indir_tbl_entries - Return RSS indirection table entries + * @adapter: device handle + * + * - 82598/82599/X540: 128 + * - X550(non-SRIOV mode): 512 + * - X550(SRIOV mode): 64 + */ +u32 grtnic_rss_indir_tbl_entries(struct grtnic_adapter *adapter) +{ + return 128; +} + +/** + * grtnic_store_key - Write the RSS key to HW + * @adapter: device handle + * + * Write the RSS key stored in adapter.rss_key to HW. + */ +void grtnic_store_key(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < 10; i++) + GRTNIC_WRITE_REG(hw, (RSS_KEY_BEGIN + i*4), adapter->rss_key[i], 0); +} + +/** + * grtnic_init_rss_key - Initialize adapter RSS key + * @adapter: device handle + * + * Allocates and initializes the RSS key if it is not allocated. + **/ +static inline int grtnic_init_rss_key(struct grtnic_adapter *adapter) +{ + +// static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741, +// 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE, +// 0xA32DCB77, 0x0CF23080, 0x3BB7426A, +// 0xFA01ACBE }; + u32 *rss_key; + + if (!adapter->rss_key) { + rss_key = kzalloc(GRTNIC_RSS_KEY_SIZE, GFP_KERNEL); + if (unlikely(!rss_key)) + return -ENOMEM; + + netdev_rss_key_fill(rss_key, GRTNIC_RSS_KEY_SIZE); + adapter->rss_key = rss_key; + } + + return 0; +} + +/** + * grtnic_store_reta - Write the RETA table to HW + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +void grtnic_store_reta(struct grtnic_adapter *adapter) +{ + u32 i = 0, reta_entries = grtnic_rss_indir_tbl_entries(adapter); + struct grtnic_hw *hw = &adapter->hw; + u8 *indir_tbl = adapter->rss_indir_tbl; + + /* Write redirection table to HW */ + while (i < reta_entries) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= indir_tbl[i + j]; + } + + GRTNIC_WRITE_REG(hw, (RSS_RETA_BEGIN + i), val, 0); + i += 4; + + } +} + +static void grtnic_setup_reta(struct grtnic_adapter *adapter) +{ + u32 i, j; + u32 reta_entries = grtnic_rss_indir_tbl_entries(adapter); +// u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + u16 rss_i = adapter->rss_queues; + + /* Fill out hash function seeds */ + grtnic_store_key(adapter); + + /* Fill out redirection table */ + memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + grtnic_store_reta(adapter); +} + +/** + * grtnic_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + **/ + +void grtnic_setup_mrqc(struct grtnic_adapter *adapter) +{ +// u32 mrqc = 0, rss_field = 0; //暂时没用到,这个地方可以设置RSS的方式 + grtnic_setup_reta(adapter); +// mrqc |= rss_field; +// IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); +} + + +//////////////////////////////////////////////////////////////////////////////////////////// +void grtnic_irq_disable(struct grtnic_adapter *adapter) +{ + u32 var; + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) + var = adapter->eims_enable_mask; + else + var = ~0; + + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMC*4), var, 1); + GRTNIC_WRITE_FLUSH(&adapter->hw); //flush + + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) { + int vector; + for (vector = 0; vector < adapter->num_q_vectors; vector++) + synchronize_irq(adapter->msix_entries[vector].vector); + + synchronize_irq(adapter->msix_entries[vector++].vector); //other + + } else { + synchronize_irq(adapter->pdev->irq); + } + +} + +void grtnic_irq_enable(struct grtnic_adapter *adapter) +{ + u32 var; + + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) + var = adapter->eims_enable_mask; + else + var = ~0; + + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_IAM*4), var, 1); //当发出中断后自动禁止所有中断 + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMS*4), var, 1); //enable all interrupt + GRTNIC_WRITE_FLUSH(&adapter->hw); //flush +} + + +void grtnic_free_irq(struct grtnic_adapter *adapter) +{ + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) { + int vector = 0, i; + + for (i = 0; i < adapter->num_q_vectors; i++) + { +#ifdef HAVE_IRQ_AFFINITY_HINT + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(adapter->msix_entries[vector].vector, NULL); +#endif + free_irq(adapter->msix_entries[vector++].vector, adapter->q_vector[i]); + } + + free_irq(adapter->msix_entries[vector++].vector, adapter); //other + + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +#define N0_QUEUE -1 +static void grtnic_assign_vector(struct grtnic_q_vector *q_vector, int msix_vector) +{ + struct grtnic_adapter *adapter = q_vector->adapter; + int rx_queue = N0_QUEUE; + int tx_queue = N0_QUEUE; + u8 ivar; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + if (rx_queue > N0_QUEUE) + { + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_MODE*4), ((rx_queue*2+1)<<16 | 0x01), 1); //1: c2s eop interrupt mode + ivar = adapter->ivar[rx_queue]; + /* clear any bits that are currently set */ + ivar &= 0x0F; + ivar |= (msix_vector <<4); + adapter->ivar[rx_queue] = ivar; + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADD_INTR_IVAR*4), (rx_queue<<16 | ivar), 1); + } + + if (tx_queue > N0_QUEUE) + { + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_MODE*4), ((tx_queue*2)<<16 | 0x00), 1); //0:s2c normal interrupt 1: no desc wb & no interrupt + ivar = adapter->ivar[tx_queue]; + /* clear any bits that are currently set */ + ivar &= 0xF0; + ivar |= msix_vector; + adapter->ivar[tx_queue] = ivar; + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADD_INTR_IVAR*4), (tx_queue<<16 | ivar), 1); + } + + q_vector->eims_value = BIT(msix_vector); + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + +} + + +/** + * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + * @adapter: board private structure + * + **/ +void grtnic_configure_msi_and_legacy(struct grtnic_adapter *adapter) +{ + struct grtnic_q_vector *q_vector = adapter->q_vector[0]; + + grtnic_write_itr(q_vector); + + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_MODE*4), ((0*2+1)<<16 | 0x01), 1); //1: c2s eop interrupt mode + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADDR_INTR_MODE*4), ((0*2)<<16 | 0x00), 1); //0:s2c normal interrupt 1: no desc wb & no interrupt + + adapter->eims_other = BIT(0); + + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADD_INTR_IVAR_MISC*4), 0, 1); + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADD_INTR_IVAR*4), (0<<16 | 0x11), 1); +} + +/** + * grtnic_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * grtnic_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +void grtnic_configure_msix(struct grtnic_adapter *adapter) +{ + int i, vector = 0; + + adapter->eims_enable_mask = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + { + grtnic_assign_vector(adapter->q_vector[i], vector++); + grtnic_write_itr(adapter->q_vector[i]); + } + + /* enable msix_other interrupt */ + adapter->eims_other = BIT(vector); + + GRTNIC_WRITE_REG(&adapter->hw, ((TARGET_IRQ<<12) + ADD_INTR_IVAR_MISC*4), vector, 1); + + adapter->eims_enable_mask |= adapter->eims_other; + + GRTNIC_WRITE_FLUSH(&adapter->hw); //flush +} + +/** + * grtnic_request_msix - Initialize MSI-X interrupts + * @adapter: board private structure + * + * grtnic_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int grtnic_request_msix(struct grtnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + unsigned int ri = 0, ti = 0; + int vector, err; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct grtnic_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-TxRx-%u", netdev->name, ri++); + ti++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-rx-%u", netdev->name, ri++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-tx-%u", netdev->name, ti++); + } else { + /* skip this unused q_vector */ + continue; + } + + err = request_irq(entry->vector, &grtnic_msix_ring, 0, + q_vector->name, q_vector); + if (err) { + e_err(probe, "request_irq failed for MSIX interrupt '%s' " + "Error: %d\n", q_vector->name, err); + goto free_queue_irqs; + } + +#ifdef HAVE_IRQ_AFFINITY_HINT + /* If Flow Director is enabled, set interrupt affinity */ +// if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + /* assign the mask for this irq */ + irq_set_affinity_hint(adapter->msix_entries[vector].vector, &q_vector->affinity_mask); +// } +#endif /* HAVE_IRQ_AFFINITY_HINT */ + } + + err = request_irq(adapter->msix_entries[vector].vector, &grtnic_msix_other, 0, netdev->name, adapter); + if (err) { + e_err(probe, "request_irq for msix_other failed: %d\n", err); + goto free_queue_irqs; + } + + return GRTNIC_SUCCESS; + +free_queue_irqs: + while (vector) { + vector--; +#ifdef HAVE_IRQ_AFFINITY_HINT + irq_set_affinity_hint(adapter->msix_entries[vector].vector, NULL); +#endif + free_irq(adapter->msix_entries[vector].vector, + adapter->q_vector[vector]); + } + adapter->flags &= ~GRTNIC_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; +} + + +int grtnic_request_irq(struct grtnic_adapter *adapter) +{ + int irq_flag; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) + err = grtnic_request_msix(adapter); + + else + { + irq_flag = (adapter->flags & GRTNIC_FLAG_MSI_ENABLED) ? 0 : IRQF_SHARED; + err = request_irq(pdev->irq, grtnic_isr, irq_flag, DRIVER_NAME, adapter); + } + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +void grtnic_reset_interrupt_capability(struct grtnic_adapter *adapter) +{ + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) { + adapter->flags &= ~GRTNIC_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & GRTNIC_FLAG_MSI_ENABLED) { + adapter->flags &= ~GRTNIC_FLAG_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } +} + + +/** + * grtnic_set_rss_queues: Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool grtnic_set_rss_queues(struct grtnic_adapter *adapter) +{ + u16 rss_i = adapter->rss_queues; + + adapter->num_rx_queues = rss_i; +#ifdef HAVE_TX_MQ + adapter->num_tx_queues = rss_i; +#endif + + return true; +} + + +/* + * grtnic_set_num_queues: Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void grtnic_set_num_queues(struct grtnic_adapter *adapter) +{ + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + + grtnic_set_rss_queues(adapter); +} + +/** + * grtnic_acquire_msix_vectors - acquire MSI-X vectors + * @adapter: board private structure + * + * Attempts to acquire a suitable range of MSI-X vector interrupts. Will + * return a negative error code if unable to acquire MSI-X vectors for any + * reason. + */ +static int grtnic_acquire_msix_vectors(struct grtnic_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, vectors; + + if (!(adapter->flags & GRTNIC_FLAG_MSIX_CAPABLE)) + return -EOPNOTSUPP; + + /* We start by asking for one vector per queue pair with XDP queues + * being stacked with TX queues. + */ + vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); + /* if tx handler is separate make it 1 for every queue */ +// if (!(adapter->flags & FLAG_QUEUE_PAIRS)) +// vectors = adapter->num_tx_queues + adapter->num_rx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = vectors; + + /* add 1 vector for link status interrupts */ + vectors++; + + adapter->msix_entries = kcalloc(vectors, sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < vectors; i++) + adapter->msix_entries[i].entry = i; + + vectors = pci_enable_msix_range(pdev, adapter->msix_entries, vectors, vectors); + + if (vectors < 0) { + /* A negative count of allocated vectors indicates an error in + * acquiring within the specified range of MSI-X vectors */ + e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", + vectors); + + adapter->flags &= ~GRTNIC_FLAG_MSIX_ENABLED; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + return vectors; + } + + /* we successfully allocated some number of vectors within our + * requested range. + */ + adapter->flags |= GRTNIC_FLAG_MSIX_ENABLED; + return 0; +} + + +void grtnic_set_interrupt_capability(struct grtnic_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int err, i; + + for(i=0; iivar[i] = 0; + + /* We will try to get MSI-X interrupts first */ + if (!grtnic_acquire_msix_vectors(adapter)) + return; + + /* At this point, we do not have MSI-X capabilities. We need to + * reconfigure or disable various features which require MSI-X + * capability. + */ + /* Disable RSS */ + e_dev_warn("Disabling RSS support\n"); + adapter->rss_queues = 1; + + /* recalculate number of queues now that many features have been + * changed or disabled. + */ + grtnic_set_num_queues(adapter); + adapter->num_q_vectors = 1; + + if (!(adapter->flags & GRTNIC_FLAG_MSI_CAPABLE)) + return; + + err = pci_enable_msi(pdev); + if (err) + e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", + err); + else + adapter->flags |= GRTNIC_FLAG_MSI_ENABLED; +} + +/** + * grtnic_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + **/ +static void grtnic_free_q_vector(struct grtnic_adapter *adapter, int v_idx) +{ + struct grtnic_q_vector *q_vector = adapter->q_vector[v_idx]; + /* if we're coming from grtnic_set_interrupt_capability, the vectors are + * not yet allocated + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; + +#ifdef HAVE_NDO_BUSY_POLL + napi_hash_del(&q_vector->napi); +#endif + netif_napi_del(&q_vector->napi); + kfree_rcu(q_vector, rcu); +} + +static void grtnic_add_ring(struct grtnic_ring *ring, struct grtnic_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * grtnic_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int grtnic_alloc_q_vector(struct grtnic_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct grtnic_q_vector *q_vector; + struct grtnic_ring *ring; + int node = -1; +#ifdef HAVE_IRQ_AFFINITY_HINT + int cpu = -1; + u8 tcs = 0; +// u8 tcs = netdev_get_num_tc(adapter->netdev); +#endif + int ring_count,size; + + /* only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + size = sizeof(struct grtnic_q_vector) + + (sizeof(struct grtnic_ring) * ring_count); + +#ifdef HAVE_IRQ_AFFINITY_HINT + /* customize cpu for Flow Director mapping */ + if (tcs <= 1) { + if (cpu_online(v_idx)) { + cpu = v_idx; + node = cpu_to_node(cpu); + } + } + +#endif + + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ +#ifdef HAVE_IRQ_AFFINITY_HINT + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); +#endif + q_vector->node = node; + + /* initialize CPU for DCA */ + q_vector->cpu = -1; + + /* initialize NAPI */ +// netif_napi_add(adapter->netdev, &q_vector->napi, grtnic_poll, 64); + netif_napi_add(adapter->netdev, &q_vector->napi, grtnic_poll); + +#ifndef HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#ifdef HAVE_NDO_BUSY_POLL + napi_hash_add(&q_vector->napi); +#endif +#endif + +#ifdef HAVE_NDO_BUSY_POLL + /* initialize busy poll */ + atomic_set(&q_vector->state, GRTNIC_QV_STATE_DISABLE); + +#endif + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* Initialize setting for adaptive ITR */ + q_vector->tx.itr = ITR_ADAPTIVE_MAX_USECS | ITR_ADAPTIVE_LATENCY; + q_vector->rx.itr = ITR_ADAPTIVE_MAX_USECS | ITR_ADAPTIVE_LATENCY; + + /* intialize ITR */ + if (txr_count && !rxr_count) { + /* tx only vector */ + if (adapter->tx_itr_setting == 1) + q_vector->itr = GRTNIC_12K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { + /* rx or rx/tx vector */ + if (adapter->rx_itr_setting == 1) + q_vector->itr = GRTNIC_20K_ITR; + else + q_vector->itr = adapter->rx_itr_setting; + } + + /* initialize pointer to rings */ + ring = q_vector->ring; + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = adapter->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + grtnic_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = adapter->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + grtnic_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + +/** + * grtnic_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void grtnic_free_q_vectors(struct grtnic_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + grtnic_free_q_vector(adapter, v_idx); +} + +/** + * grtnic_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int grtnic_alloc_q_vectors(struct grtnic_adapter *adapter) +{ + int q_vectors = adapter->num_q_vectors; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int i; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = grtnic_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = grtnic_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + if (adapter->rx_ring[i]) + adapter->rx_ring[i]->reg_idx = i; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]) + adapter->tx_ring[i]->reg_idx = i; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + grtnic_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * grtnic_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: board private structure + * + * This function resets the device so that it has 0 rx queues, tx queues, and + * MSI-X interrupts allocated. + */ +void grtnic_clear_interrupt_scheme(struct grtnic_adapter *adapter) +{ + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + + grtnic_free_q_vectors(adapter); + grtnic_reset_interrupt_capability(adapter); +} + +/** + * grtnic_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Kernel support (MSI, MSI-X) + * - which can be user-defined (via MODULE_PARAM) + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int grtnic_init_interrupt_scheme(struct grtnic_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + grtnic_set_num_queues(adapter); + + /* Set interrupt mode */ + grtnic_set_interrupt_capability(adapter); + + /* Allocate memory for queues */ + err = grtnic_alloc_q_vectors(adapter); + + if (err) { + e_err(probe, "Unable to allocate memory for queue vectors\n"); + grtnic_reset_interrupt_capability(adapter); + return err; + } + +// ixgbe_cache_ring_register(adapter); + + set_bit(__GRTNIC_DOWN, &adapter->state); + + return GRTNIC_SUCCESS; +} + + +static int grtnic_sw_init(struct grtnic_adapter *adapter) +{ + int i, err = 0; + struct grtnic_ring *tx_ring, *rx_ring; + int card_type = adapter->ei->type; + + if (grtnic_init_rss_key(adapter)) { + err = GRTNIC_ERR_OUT_OF_MEM; + e_err(probe, "rss_key allocation failed: %d\n", err); + goto out; + } + +//针对每个卡的phy设置,可以放在这里 + #if IS_ENABLED(CONFIG_DCA) + adapter->flags |= GRTNIC_FLAG_DCA_CAPABLE; + #endif + adapter->flags |= (GRTNIC_FLAG_MSI_CAPABLE | \ + GRTNIC_FLAG_MSIX_CAPABLE | \ + GRTNIC_FLAG_MQ_CAPABLE); + +// /* default flow control settings */ +// hw->fc.requested_mode = ixgbe_fc_full; +// hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ +// +// adapter->last_lfc_mode = hw->fc.current_mode; +// ixgbe_pbthresh_setup(adapter); +// hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; +// hw->fc.send_xon = true; +// hw->fc.disable_fc_autoneg = false; + + /* set default ring sizes */ + adapter->tx_ring_count = GRTNIC_DEFAULT_TXD; + adapter->rx_ring_count = GRTNIC_DEFAULT_RXD; + + /* set default work limits */ + adapter->tx_work_limit = GRTNIC_DEFAULT_TX_WORK; + + adapter->max_frame_size = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; //1500+18+4 + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; //60+4 + + grtnic_PhySetMdioDivisor(adapter->netdev, 24); + + if(card_type == board_902T_GRT_FF) //FF902T + { + u16 temp; + u32 phy_addr = adapter->hw.phy_addr; + + grtnic_PhyRead(adapter->netdev, phy_addr, 0x00, &temp); + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x00, temp | PHY_RESET); //rst phy + + //clear EEE LED + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x1F, 0xd04); //1F:change page, d04:ExtPage + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x11, 0x00); //EEELCR + // grtnic_PhyWrite(adapter->netdev[i], 0x01, 0x10, 0x207B); // LED config + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x10, 0x0D1B); // LED config + + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x1F, 0x00); //page 0 + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x0D, 0x07); //1 is phy add, 0d is MACR reg, 7 is device address + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x0E, 0x3C); //1 is phy add, 0e is MAADR reg, 0x3C is reg address + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x0D, 0x4007); //1 is phy add, 0d is MACR reg, 4007 get data from device add 7 & reg 14 + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x0E, 0x00); // EEEAR + } + +// else if(card_type == board_904T_GRT_FF || card_type == board_904E_GRT_FF)//FF904T & FF904E +// { +// u16 temp; +// for (i = 0; i < grtnic_ports_max; i++) +// { +// grtnic_PhyWrite(adapter->netdev[0], i, 0x1E, 0x00); //utp_ext_reg +// grtnic_PhyRead (adapter->netdev[0], i, 0x1F, &temp); +// +// grtnic_PhyWrite(adapter->netdev[0], i, 0x1E, 0x00); +// grtnic_PhyWrite(adapter->netdev[0], i, 0x1F, temp | 0x20); //jumbo enable +// +// grtnic_PhyRead (adapter->netdev[0], i, 0x00, &temp); +// grtnic_PhyWrite(adapter->netdev[0], i, 0x00, temp | 0x8000); //sw reset +// } +// +// } + + for (i = 0; i < adapter->num_tx_queues; i++) + { + + tx_ring = adapter->tx_ring[i]; + memset(&tx_ring->stats, 0, sizeof(tx_ring->stats)); + memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); + } + +//----------------------------------------------------------------------------------- + for (i = 0; i < adapter->num_rx_queues; i++) + { + rx_ring = adapter->rx_ring[i]; + + memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); + memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); + } + + set_bit(__GRTNIC_DOWN, &adapter->state); + +out: + return err; +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +/** + * grtnic_watchdog_update_link - update the link status + * @adapter: pointer to the device adapter structure + **/ +static void grtnic_watchdog_update_link(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + u32 xphy_status; + + if (!(adapter->flags & GRTNIC_FLAG_NEED_LINK_UPDATE)) + return; + + xphy_status = GRTNIC_READ_REG(hw, XPHY_STATUS, 0); + link_up = (xphy_status & 1) ? 1:0; + link_speed = (xphy_status >> 1) & 0x03; + +// if (link_up) { +// if (hw->phy.media_type == ixgbe_media_type_copper && +// (ixgbe_device_supports_autoneg_fc(hw))) +// ixgbe_setup_fc(hw); +// hw->mac.ops.fc_enable(hw); +// +// } + + if (link_up || time_after(jiffies, (adapter->link_check_timeout + GRTNIC_TRY_LINK_TIMEOUT))) { + adapter->flags &= ~GRTNIC_FLAG_NEED_LINK_UPDATE; + GRTNIC_WRITE_REG(hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMS*4), adapter->eims_other, 1); //打开相应的中断,user_interrupt + GRTNIC_WRITE_FLUSH(hw); + } + + adapter->link_up = link_up; + adapter->link_speed = link_speed; +} + +/** + * grtnic_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter: pointer to the device adapter structure + **/ +static void grtnic_watchdog_link_is_up(struct grtnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 link_speed = adapter->link_speed; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + if(adapter->type==1) //copper rtl8211 + { + grtnic_ResetRx(netdev); + grtnic_SetSpeed(netdev, link_speed); + } + grtnic_SetRx(netdev, 1); //start rx + + e_info(drv, "NIC Link is Up\n"); + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); +} + +/** + * grtnic_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter: pointer to the adapter structure + **/ +static void grtnic_watchdog_link_is_down(struct grtnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->link_up = false; + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + grtnic_SetRx(netdev, 0); //stop rx + + e_info(drv, "NIC Link is Down\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); +} + +static bool grtnic_ring_tx_pending(struct grtnic_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct grtnic_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->next_to_use != tx_ring->next_to_clean) + return true; + } + + return false; +} + +/** + * grtnic_watchdog_flush_tx - flush queues on link down + * @adapter: pointer to the device adapter structure + **/ +static void grtnic_watchdog_flush_tx(struct grtnic_adapter *adapter) +{ + if (!netif_carrier_ok(adapter->netdev)) { + if (grtnic_ring_tx_pending(adapter)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + e_warn(drv, "initiating reset due to lost link with pending Tx work\n"); + set_bit(__GRTNIC_RESET_REQUESTED, &adapter->state); + } + } +} + +/** + * grtnic_watchdog_subtask - check and bring link up + * @adapter: pointer to the device adapter structure + **/ +static void grtnic_watchdog_subtask(struct grtnic_adapter *adapter) +{ + /* if interface is down, removing or resetting, do nothing */ + if (test_bit(__GRTNIC_DOWN, &adapter->state) || + test_bit(__GRTNIC_REMOVING, &adapter->state) || + test_bit(__GRTNIC_RESETTING, &adapter->state)) + return; + + grtnic_watchdog_update_link(adapter); + + if (adapter->link_up) + grtnic_watchdog_link_is_up(adapter); + else + grtnic_watchdog_link_is_down(adapter); + +// grtnic_update_stats(adapter); //要检查 + + grtnic_watchdog_flush_tx(adapter); +} + + +void grtnic_service_event_schedule(struct grtnic_adapter *adapter) +{ + if (!test_bit(__GRTNIC_DOWN, &adapter->state) && + !test_bit(__GRTNIC_REMOVING, &adapter->state) && + !test_and_set_bit(__GRTNIC_SERVICE_SCHED, &adapter->state)) + queue_work(grtnic_wq, &adapter->service_task); +} + +static void grtnic_service_event_complete(struct grtnic_adapter *adapter) +{ + BUG_ON(!test_bit(__GRTNIC_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchog */ + smp_mb__before_atomic(); + clear_bit(__GRTNIC_SERVICE_SCHED, &adapter->state); +} + +static void grtnic_remove_adapter(struct grtnic_hw *hw) +{ + struct grtnic_adapter *adapter = hw->back; + + if ((!hw->dma_bar) || (!hw->user_bar)) + return; + hw->dma_bar = NULL; + hw->user_bar = NULL; + e_dev_err("Adapter removed\n"); + if (test_bit(__GRTNIC_SERVICE_INITED, &adapter->state)) + grtnic_service_event_schedule(adapter); +} + +static u32 grtnic_check_remove(struct grtnic_hw *hw, u32 reg, u8 bar) +{ + u8 __iomem *reg_addr; + u8 __iomem *userbar_reg_addr; + u32 value; + int i; + + reg_addr = bar ? hw->dma_bar : hw->user_bar; + if (GRTNIC_REMOVED(reg_addr)) + return GRTNIC_FAILED_READ_REG; + + userbar_reg_addr = READ_ONCE(hw->user_bar); + /* Register read of 0xFFFFFFFF can indicate the adapter has been + * removed, so perform several status register reads to determine if + * the adapter has been removed. + */ + for (i = 0; i < GRTNIC_FAILED_READ_RETRIES; ++i) { + value = readl(userbar_reg_addr + XPHY_STATUS); + if (value != GRTNIC_FAILED_READ_REG) + break; + mdelay(3); + } + + if (value == GRTNIC_FAILED_READ_REG) + grtnic_remove_adapter(hw); + else + value = readl(reg_addr + reg); + + return value; +} + +static u32 grtnic_validate_register_read(struct grtnic_hw *_hw, u32 reg, u8 bar) +{ + int i; + u32 value; + u8 __iomem *reg_addr; + struct grtnic_adapter *adapter = _hw->back; + + reg_addr = bar ? _hw->dma_bar : _hw->user_bar; + if (GRTNIC_REMOVED(reg_addr)) + return GRTNIC_FAILED_READ_REG; + for (i = 0; i < GRTNIC_DEAD_READ_RETRIES; ++i) { + value = readl(reg_addr + reg); + if (value != GRTNIC_DEAD_READ_REG) + break; + } + + if (value == GRTNIC_DEAD_READ_REG) + e_err(drv, "%s: register %x read unchanged\n", __func__, reg); + else + e_warn(hw, "%s: register %x read recovered after %d retries\n", + __func__, reg, i + 1); + return value; +} + +u32 grtnic_read_reg(struct grtnic_hw *hw, u32 reg, u8 bar) +{ + u32 value; + u8 __iomem *reg_addr; + + reg_addr = bar ? hw->dma_bar : hw->user_bar; + if (GRTNIC_REMOVED(reg_addr)) + return GRTNIC_FAILED_READ_REG; + + value = readl(reg_addr + reg); + if (unlikely(value == GRTNIC_FAILED_READ_REG)) + value = grtnic_check_remove(hw, reg, bar); + if (unlikely(value == GRTNIC_DEAD_READ_REG)) + value = grtnic_validate_register_read(hw, reg, bar); + return value; +} + +/** + * grtnic_service_timer - Timer Call-back + * @t: pointer to timer_list + **/ +static void grtnic_service_timer(struct timer_list *t) +{ + struct grtnic_adapter *adapter = from_timer(adapter, t, service_timer); + unsigned long next_event_offset; + + /* poll faster when waiting for link */ + if (adapter->flags & GRTNIC_FLAG_NEED_LINK_UPDATE) + next_event_offset = HZ / 10; + else + next_event_offset = HZ * 2; + + /* Reset the timer */ + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + + grtnic_service_event_schedule(adapter); +} + +/** + * grtnic_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void grtnic_service_task(struct work_struct *work) +{ + struct grtnic_adapter *adapter = container_of(work, struct grtnic_adapter, service_task); + if (GRTNIC_REMOVED(adapter->hw.dma_bar)) { + if (!test_bit(__GRTNIC_DOWN, &adapter->state)) { + rtnl_lock(); + grtnic_down(adapter); + rtnl_unlock(); + } + grtnic_service_event_complete(adapter); + return; + } + +// ixgbe_reset_subtask(adapter); +// ixgbe_phy_interrupt_subtask(adapter); +// ixgbe_sfp_detection_subtask(adapter); +// ixgbe_sfp_link_config_subtask(adapter); +// ixgbe_check_overtemp_subtask(adapter); + grtnic_watchdog_subtask(adapter); +// ixgbe_check_hang_subtask(adapter); + grtnic_service_event_complete(adapter); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +static int grtnic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct grtnic_adapter *adapter = NULL; + struct grtnic_hw *hw = NULL; + struct device *dev = &pdev->dev; + static int cards_found; + int err, pci_using_dac; + int csum_tx_mode = 0, csum_rx_mode = 0; + +#ifdef HAVE_TX_MQ + unsigned int indices = MAX_TX_QUEUES; +#endif /* HAVE_TX_MQ */ + bool disable_dev = false; + + u32 coresettings; + u8 mac_addr[6]; + + const struct grt_gigeth_info *ei = grt_gigeth_info_tbl[ent->driver_data]; //根据vidpid来配置对应的driver_data + + dev_info(dev, "adapter PCI probe"); + + // Enable device + err = pci_enable_device_mem(pdev); + if (err) + return err; + + if (!dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(pci_dev_to_dev(pdev), + DMA_BIT_MASK(32)); + if (err) { + dev_err(pci_dev_to_dev(pdev), "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + pci_using_dac = 0; + } + + err = pci_request_mem_regions(pdev, DRIVER_NAME); + if (err) { + dev_err(pci_dev_to_dev(pdev), + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + // Disable ASPM + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); + +#ifdef HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING + pci_enable_pcie_error_reporting(pdev); +#endif /* HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING */ + + // Enable bus mastering for DMA + pci_set_master(pdev); + +#ifdef HAVE_TX_MQ + indices = min_t(int, ei->dma_channel_max, num_online_cpus()); + netdev = alloc_etherdev_mq(sizeof(struct grtnic_adapter), indices); +#else /* HAVE_TX_MQ */ + netdev = alloc_etherdev(sizeof(struct grtnic_adapter)); +#endif /* HAVE_TX_MQ */ + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_MODULE_OWNER(netdev); + SET_NETDEV_DEV(netdev, dev); + adapter = netdev_priv(netdev); + + adapter->netdev = netdev; + adapter->dev = dev; + adapter->pdev = pdev; + adapter->func = PCI_FUNC(pdev->devfn); + adapter->ei = ei; + hw = &adapter->hw; + hw->back = adapter; + if(ei->type == board_902T_GRT_FF) + hw->phy_addr = 0x01; + else + hw->phy_addr = 0x00; + + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + adapter->flags = 0; + + adapter->type = ei->port_type; //fiber or copper? + adapter->speed = ei->port_speed; + + adapter->rss_queues = 1; +#ifdef HAVE_TX_MQ + adapter->rss_queues = indices; + printk("rss_queues = %d\n", adapter->rss_queues); +#endif + +// adapter->flags |= FLAG_QUEUE_PAIRS; + + // Map BARs + err = grtnic_map_bars(adapter, pdev); + if (err) + { + dev_err(dev, "Failed to map bar"); + err = -EIO; + goto err_ioremap; + } + + grtnic_assign_netdev_ops(netdev); + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + + adapter->bd_number = cards_found; + + /* setup adapter struct */ + err = grtnic_sw_init(adapter); + if (err) + goto err_sw_init; + + /* + * check_options must be called before setup_link to set up + * hw->fc completely + */ + grtnic_check_options(adapter); //内核加载参数这里设置 + +//reset_hw + coresettings = GRTNIC_READ_REG(hw, ((TARGET_CONFIG<<12) + ADDR_CORESETTINGS*4), 1); //main reason is for reset dma (RESET_LOGIC) + printk("Number of channels:%d\n", ((coresettings>>0) & 0xf)); + printk("Bus interface width:%d\n", ((coresettings>>19) & 0xf)*32); + printk("Bus master enable:%d\n", ((coresettings>>4) & 0x1)); + printk("Negotiated link width:X%d\n", ((coresettings>>5) & 0x3f)); + printk("Negotiated link rate:%d MTs\n", ((coresettings>>11) & 0x3)*2500); + printk("Max downstream payload:%d bytes\n", 128 << ((coresettings>>13) & 0x7)); + printk("Max upstream payload:%d bytes\n", 128 << ((coresettings>>16) & 0x7)); + + if(coresettings==GRTNIC_FAILED_READ_REG) { + e_dev_err("HW Init failed\n"); + goto err_sw_init; + } + + GRTNIC_WRITE_REG(hw, ASIC_RX_FIFO_RST, 0xff, 0); //reset all channel rx fifo data + GRTNIC_WRITE_REG(hw, ASIC_TX_FIFO_RST, 0xff, 0); //reset all channel tx fifo data + + if(adapter->flags & GRTNIC_FLAG_TXCSUM_CAPABLE) csum_tx_mode = 1; + if(adapter->flags & GRTNIC_FLAG_RXCSUM_CAPABLE) csum_rx_mode = 1; + + GRTNIC_WRITE_REG(hw, CSUM_ENABLE, (csum_rx_mode << 1 | csum_tx_mode), 0); //告诉asic, tx checksum offload + GRTNIC_WRITE_REG(hw, MAX_LED_PKT_NUM, (100<<16 | 1), 0); //200 is delay time and 1 is pkt number + + netdev->features |= NETIF_F_SG; + netdev->features |= NETIF_F_GSO; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + +// netdev->flags &= ~IFF_MULTICAST; + + if(csum_tx_mode) + netdev->features |= NETIF_F_HW_CSUM; +#ifdef NETIF_F_RXCSUM + if(csum_rx_mode) + netdev->features |= NETIF_F_RXCSUM; +#endif +#ifdef NETIF_F_RXHASH + netdev->features |= NETIF_F_RXHASH; +#endif /* NETIF_F_RXHASH */ + +#if defined(HAVE_NDO_SET_FEATURES) && !defined(HAVE_RHEL6_NET_DEVICE_OPS_EXT) + netdev->hw_features = netdev->features; +#endif + +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU range: 68 - 9710 */ +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + netdev->extended->min_mtu = ETH_MIN_MTU; + netdev->extended->max_mtu = GRTNIC_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); +#else + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = GRTNIC_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); +#endif //HAVE_RHEL7_EXTENDED_MIN_MAX_MTU +#endif //HAVE_NETDEVICE_MIN_MAX_MTU + + hw->mac.fc.fc_autoneg = false; + hw->mac.fc.current_mode = fc_rx_pause; + + grtnic_GetMacAddress(netdev, mac_addr); + + if (is_valid_ether_addr((unsigned char *)(mac_addr))) + eth_hw_addr_set(netdev, mac_addr); + else + { + memcpy(mac_addr, DEFAULT_ETHER_ADDRESS, netdev->addr_len); + mac_addr[netdev->addr_len-1] = adapter->func; + eth_hw_addr_set(netdev, mac_addr); + grtnic_SetMacAddress(netdev, netdev->dev_addr); //added + } + +#ifdef ETHTOOL_GPERMADDR + memcpy(netdev->perm_addr, mac_addr, netdev->addr_len); +#endif + + grtnic_SetMacPauseAddress(netdev, netdev->dev_addr); + + grtnic_SetPause(netdev, 1); //rx pause, tx off + + printk("add=%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->dev_addr[5],netdev->dev_addr[4],netdev->dev_addr[3],netdev->dev_addr[2],netdev->dev_addr[1],netdev->dev_addr[0]); + + grtnic_ResetRx(netdev); + grtnic_SetRx(netdev, 0); //disable rx + grtnic_ResetTx(netdev); + grtnic_SetTx(netdev, 0); //disable tx + + timer_setup(&adapter->service_timer, grtnic_service_timer, 0); + INIT_WORK(&adapter->service_task, grtnic_service_task); + + set_bit(__GRTNIC_SERVICE_INITED, &adapter->state); + clear_bit(__GRTNIC_SERVICE_SCHED, &adapter->state); + + err = grtnic_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + +// err = hw->mac.ops.start_hw(hw); +//主要调用了ixgbe_start_hw_generic:Clear statistics registers & Setup flow control + + strscpy(netdev->name, "eth%d", sizeof(netdev->name)); + pci_set_drvdata(pdev, adapter); + err = register_netdev(netdev); + if (err) + goto err_register; + adapter->netdev_registered = true; + + +#ifdef HAVE_PCI_ERS + /* + * call save state here in standalone driver because it relies on + * adapter struct to exist, and needs to call netdev_priv + */ + pci_save_state(pdev); + +#endif + + /* power down the optics for 82599 SFP+ fiber */ + GRTNIC_WRITE_REG(hw, PHY_TX_DISABLE, 0x01, 1); //disable laser; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + /* keep stopping all the transmit queues for older kernels */ + netif_tx_stop_all_queues(netdev); + +#if 0 +//#if IS_ENABLED(CONFIG_DCA) + if (adapter->flags & GRTNIC_FLAG_DCA_CAPABLE) { + ret = dca_add_requester(pci_dev_to_dev(pdev)); + switch (ret) { + case 0: + adapter->flags |= GRTNIC_FLAG_DCA_ENABLED; + grtnic_setup_dca(adapter); + break; + /* -19 is returned from the kernel when no provider is found */ + case -19: + printk("No DCA provider found. Please " + "start ioatdma for DCA functionality.\n"); + break; + default: + printk("DCA registration failed: %d\n", ret); + break; + } + } +#endif + + cards_found++; + +#ifdef GRTNIC_PROCFS + if (grtnic_procfs_init(adapter)) + e_err(probe, "failed to allocate procfs resources\n"); +#endif /* IXGBE_PROCFS */ + + // probe complete + return 0; + +err_register: + grtnic_clear_interrupt_scheme(adapter); +err_sw_init: + kfree(adapter->rss_key); + grtnic_free_bars(adapter, pdev); +err_ioremap: + disable_dev = !test_and_set_bit(__GRTNIC_DISABLED, &adapter->state); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + + +static void grtnic_pci_remove(struct pci_dev *pdev) +{ + struct grtnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev; + bool disable_dev; + + dev_info(&pdev->dev, "grtnic PCI remove"); + + /* if !adapter then we already cleaned up in probe */ + if (!adapter) + return; + + netdev = adapter->netdev; + + set_bit(__GRTNIC_REMOVING, &adapter->state); + cancel_work_sync(&adapter->service_task); + +#if 0 +//#if IS_ENABLED(CONFIG_DCA) + if (adapter->flags & GRTNIC_FLAG_DCA_ENABLED) { + adapter->flags &= ~GRTNIC_FLAG_DCA_ENABLED; + dca_remove_requester(pci_dev_to_dev(pdev)); + write_register(GRTNIC_DCA_CTRL_DCA_DISABLE, adapter->dma_bar+ (TARGET_CONFIG<<12) + ADDR_DCA_GTCL*4); + } +#endif /* CONFIG_DCA */ + +#ifdef GRTNIC_PROCFS + grtnic_procfs_exit(adapter); +#endif /* GRTNIC_PROCFS */ + + if (adapter->netdev_registered) { + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + + grtnic_clear_interrupt_scheme(adapter); + grtnic_free_bars(adapter, pdev); + pci_release_regions(pdev); + kfree(adapter->rss_key); + disable_dev = !test_and_set_bit(__GRTNIC_DISABLED, &adapter->state); + free_netdev(netdev); + +#ifdef HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING + pci_disable_pcie_error_reporting(pdev); +#endif /* HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING */ + + if (disable_dev) + pci_disable_device(pdev); +} + + +/* + * __grtnic_shutdown is not used when power management + * is disabled on older kernels (<2.6.12). causes a compile + * warning/error, because it is defined and not used. + */ +#if defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) +static int __grtnic_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct grtnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; +// u32 wufc = adapter->wol; + u32 wufc = 0; +#ifdef CONFIG_PM + int retval = 0; +#endif + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + grtnic_close_suspend(adapter); + + grtnic_clear_interrupt_scheme(adapter); + rtnl_unlock(); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; + +#endif + + *enable_wake = !!wufc; + + if (!test_and_set_bit(__GRTNIC_DISABLED, &adapter->state)) + pci_disable_device(pdev); + + return 0; +} +#endif /* defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) */ + + +#ifndef USE_REBOOT_NOTIFIER +static void grtnic_pci_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __grtnic_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#endif + + +static struct pci_driver grtnic_pci_driver = { + .name = DRIVER_NAME, + .id_table = grtnic_pci_tbl, + .probe = grtnic_pci_probe, + .remove = grtnic_pci_remove, +#ifndef USE_REBOOT_NOTIFIER + .shutdown = grtnic_pci_shutdown, +#endif +}; + +static int __init grtnic_init(void) +{ + int ret; + printk("Beijing GRT(R) NIC Network Driver - %s\n", DRIVER_VERSION); + printk("Copyright(c) 2020-2022 Beijing GRT Corporation.\n"); + + grtnic_wq = create_singlethread_workqueue(DRIVER_NAME); + if (!grtnic_wq) { + pr_err("%s: Failed to create workqueue\n", DRIVER_NAME); + return -ENOMEM; + } + +#ifdef GRTNIC_PROCFS + if (grtnic_procfs_topdir_init()) + pr_info("Procfs failed to initialize topdir\n"); +#endif + + ret = pci_register_driver(&grtnic_pci_driver); + if (ret) + { + destroy_workqueue(grtnic_wq); +#ifdef GRTNIC_PROCFS + grtnic_procfs_topdir_exit(); +#endif + return ret; + } + +//#if IS_ENABLED(CONFIG_DCA) +// dca_register_notify(&dca_notifier); +//#endif + + return ret; +} + +static void __exit grtnic_exit(void) +{ +//#if IS_ENABLED(CONFIG_DCA) +// dca_unregister_notify(&dca_notifier); +//#endif + pci_unregister_driver(&grtnic_pci_driver); +#ifdef GRTNIC_PROCFS + grtnic_procfs_topdir_exit(); +#endif + destroy_workqueue(grtnic_wq); +} + +#if 0 +//#if IS_ENABLED(CONFIG_DCA) +static int grtnic_notify_dca(struct notifier_block __always_unused *nb, unsigned long event, void __always_unused *p) +{ + int ret_val; + + ret_val = driver_for_each_device(&grtnic_pci_driver.driver, NULL, &event, + __grtnic_notify_dca); + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; +} +#endif + +const struct grt_gigeth_info grt_902eff_info = { + .type = board_902E_GRT_FF, + .dma_channel_max = 1, + .port_type = 0, + .port_speed = 0, +}; + +const struct grt_gigeth_info grt_902tff_info = { + .type = board_902T_GRT_FF, + .dma_channel_max = 1, + .port_type = 1, + .port_speed = 0, +}; + +const struct grt_gigeth_info grt_901elr_info = { + .type = board_901ELR_GRT_FF, + .dma_channel_max = 1, + .port_type = 0, + .port_speed = 0, +}; + +const struct grt_gigeth_info grt_1002eff_info = { + .type = board_1002E_GRT_FF, + .dma_channel_max = 8, + .port_type = 0, + .port_speed = 1, +}; + +const struct grt_gigeth_info grt_1001eff_info = { + .type = board_1001E_GRT_FF, + .dma_channel_max = 8, + .port_type = 0, + .port_speed = 1, +}; + +const struct grt_gigeth_info qm_1001eff_info = { + .type = board_1001E_QM_FF, + .dma_channel_max = 8, + .port_type = 0, + .port_speed = 1, +}; + +const struct grt_gigeth_info grt_1005efx_info = { + .type = board_1005E_GRT_FX, + .dma_channel_max = 8, + .port_type = 0, + .port_speed = 1, +}; + +module_init(grtnic_init); +module_exit(grtnic_exit); \ No newline at end of file diff --git a/drivers/net/ethernet/guangruntong/grtnic_netdev.c b/drivers/net/ethernet/guangruntong/grtnic_netdev.c new file mode 100755 index 0000000000000..368350456f6c0 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_netdev.c @@ -0,0 +1,3451 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +//#include + +#include +#include +#include +#ifdef NETIF_F_TSO +#include +#endif +#include +#include + +#include "grtnic.h" +#include "grtnic_nvm.h" +#include "grtnic_macphy.h" + +/* only works for sizes that are powers of 2 */ +#define GRTNIC_ROUNDUP_SIZE(i, size) ( (size) - ((i) & ((size) - 1)) ) + +static void grtnic_clean_tx_ring(struct grtnic_ring *tx_ring); +static void grtnic_clean_rx_ring(struct grtnic_ring *rx_ring); + +#ifdef NETIF_F_RXHASH +static inline void grtnic_rx_hash(struct grtnic_ring *ring, union grtnic_rx_desc *rx_desc, struct sk_buff *skb) +{ + u16 rss_type; + + if (!(netdev_ring(ring)->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & 0x0f; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (rss_type & 0xc0) ? //tcp or udp + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} +#endif /* NETIF_F_RXHASH */ + +static int grtnic_desc_unused(struct grtnic_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +static inline void grtnic_release_rx_desc(struct grtnic_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; +#ifndef CONFIG_DISABLE_PACKET_SPLIT + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; +#endif + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); //rx_ring->tail, 这个地方别忘记设置,desc要在clean_rx_irq里面清0 +} + +///////////////////////////////////////////////////////////////////////////////////////////////////// +#ifdef CONFIG_DISABLE_PACKET_SPLIT +static bool grtnic_alloc_mapped_skb(struct grtnic_ring *rx_ring, struct grtnic_rx_buffer *buffer_info) +{ + struct sk_buff *skb = buffer_info->skb; + dma_addr_t dma = buffer_info->dma; + + if (unlikely(dma)) + return true; + + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring), rx_ring->rx_buffer_len); + + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + buffer_info->skb = skb; + } + + dma = dma_map_single(rx_ring->dev, skb->data, rx_ring->rx_buffer_len, DMA_FROM_DEVICE); + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + dev_kfree_skb_any(skb); + buffer_info->skb = NULL; + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + buffer_info->dma = dma; + buffer_info->length = rx_ring->rx_buffer_len; + return true; +} + +#else /* CONFIG_DISABLE_PACKET_SPLIT */ + +static inline unsigned int grtnic_rx_offset(struct grtnic_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? GRTNIC_SKB_PAD : 0; +} + +static bool grtnic_alloc_mapped_page(struct grtnic_ring *rx_ring, struct grtnic_rx_buffer *buffer_info) +{ + struct page *page = buffer_info->page; + dma_addr_t dma; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(grtnic_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, grtnic_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + GRTNIC_RX_DMA_ATTR); +#endif + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, grtnic_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + buffer_info->dma = dma; + buffer_info->page = page; + buffer_info->page_offset = grtnic_rx_offset(rx_ring); +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + page_ref_add(page, USHRT_MAX - 1); + buffer_info->pagecnt_bias = USHRT_MAX; +#else + buffer_info->pagecnt_bias = 1; +#endif + rx_ring->rx_stats.alloc_rx_page++; +// buffer_info->length = grtnic_rx_bufsz(rx_ring); +// buffer_info->length = GRTNIC_RX_BUFSZ; //注意,这里告知asic缓冲区大小不是整个page,因为整个page可能有几个缓冲区 + +// printk("offset = %d, length = %d\n", buffer_info->page_offset, buffer_info->length); + + return true; +} +#endif /* CONFIG_DISABLE_PACKET_SPLIT */ +///////////////////////////////////////////////////////////////////////////////////////////////////// + +void grtnic_alloc_rx_buffers(struct grtnic_ring *rx_ring, u16 cleaned_count) +{ + union grtnic_rx_desc *rx_desc; + struct grtnic_rx_buffer *buffer_info; + u16 i = rx_ring->next_to_use; +#ifndef CONFIG_DISABLE_PACKET_SPLIT + u16 bufsz; +#endif + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = GRTNIC_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; +#ifndef CONFIG_DISABLE_PACKET_SPLIT + bufsz = grtnic_rx_bufsz(rx_ring); +#endif + + do { +#ifdef CONFIG_DISABLE_PACKET_SPLIT + if (!grtnic_alloc_mapped_skb(rx_ring, buffer_info)) + break; +#else + if (!grtnic_alloc_mapped_page(rx_ring, buffer_info)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, buffer_info->dma, + buffer_info->page_offset, bufsz, + DMA_FROM_DEVICE); +#endif /* CONFIG_DISABLE_PACKET_SPLIT */ + + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ +#ifdef CONFIG_DISABLE_PACKET_SPLIT + rx_desc->read.src_addr = cpu_to_le64(buffer_info->dma); + rx_desc->read.len_ctl.len = cpu_to_le16(buffer_info->length); +#else + rx_desc->read.src_addr = cpu_to_le64(buffer_info->dma + buffer_info->page_offset); + rx_desc->read.len_ctl.len = cpu_to_le16(bufsz); +#endif + rx_desc->read.len_ctl.desc_num = 0; + rx_desc->read.len_ctl.chl = 0; + rx_desc->read.len_ctl.cmp = 0; + rx_desc->read.len_ctl.sop = 0; + rx_desc->read.len_ctl.eop = 0; + + rx_desc++; + buffer_info++; + i++; + + if (unlikely(!i)) { + rx_desc = GRTNIC_RX_DESC(*rx_ring, 0); + buffer_info = &rx_ring->rx_buffer_info[0]; + i -= rx_ring->count; + } + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) + grtnic_release_rx_desc(rx_ring, i); +} + +static inline bool grtnic_container_is_rx(struct grtnic_q_vector *q_vector, struct grtnic_ring_container *rc) +{ + return &q_vector->rx == rc; +} +/** + * ixgbe_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + **/ +static void grtnic_update_itr(struct grtnic_q_vector *q_vector, struct grtnic_ring_container *ring_container) +{ + unsigned int itr = ITR_ADAPTIVE_MIN_USECS | ITR_ADAPTIVE_LATENCY; + unsigned int avg_wire_size, packets, bytes; + unsigned long next_update = jiffies; + + /* If we don't have any rings just leave ourselves set for maximum + * possible latency so we take ourselves out of the equation. + */ + if (!ring_container->ring) + return; + + /* If we didn't update within up to 1 - 2 jiffies we can assume + * that either packets are coming in so slow there hasn't been + * any work, or that there is so much work that NAPI is dealing + * with interrupt moderation and we don't need to do anything. + */ + if (time_after(next_update, ring_container->next_update)) + goto clear_counts; + + packets = ring_container->total_packets; + bytes = ring_container->total_bytes; + + if (grtnic_container_is_rx(q_vector, ring_container)) { + /* If Rx and there are 1 to 23 packets and bytes are less than + * 12112 assume insufficient data to use bulk rate limiting + * approach. Instead we will focus on simply trying to target + * receiving 8 times as much data in the next interrupt. + */ + if (packets && packets < 24 && bytes < 12112) { + itr = ITR_ADAPTIVE_LATENCY; + avg_wire_size = (bytes + packets * 24) * 2; + avg_wire_size = clamp_t(unsigned int, avg_wire_size, 2560, 12800); + goto adjust_for_speed; + } + } + + /* Less than 48 packets we can assume that our current interrupt delay + * is only slightly too low. As such we should increase it by a small + * fixed amount. + */ + if (packets < 48) { + itr = (q_vector->itr >> 2) + ITR_ADAPTIVE_MIN_INC; + if (itr > ITR_ADAPTIVE_MAX_USECS) + itr = ITR_ADAPTIVE_MAX_USECS; + + /* If sample size is 0 - 7 we should probably switch + * to latency mode instead of trying to control + * things as though we are in bulk. + * + * Otherwise if the number of packets is less than 48 + * we should maintain whatever mode we are currently + * in. The range between 8 and 48 is the cross-over + * point between latency and bulk traffic. + */ + if (packets < 8) + itr += ITR_ADAPTIVE_LATENCY; + else + itr += ring_container->itr & ITR_ADAPTIVE_LATENCY; + goto clear_counts; + } + + /* Between 48 and 96 is our "goldilocks" zone where we are working + * out "just right". Just report that our current ITR is good for us. + */ + if (packets < 96) { + itr = q_vector->itr >> 2; + goto clear_counts; + } + + /* If packet count is 96 or greater we are likely looking at a slight + * overrun of the delay we want. Try halving our delay to see if that + * will cut the number of packets in half per interrupt. + */ + if (packets < 256) { + itr = q_vector->itr >> 3; + if (itr < ITR_ADAPTIVE_MIN_USECS) + itr = ITR_ADAPTIVE_MIN_USECS; + goto clear_counts; + } + + /* The paths below assume we are dealing with a bulk ITR since number + * of packets is 256 or greater. We are just going to have to compute + * a value and try to bring the count under control, though for smaller + * packet sizes there isn't much we can do as NAPI polling will likely + * be kicking in sooner rather than later. + */ + itr = ITR_ADAPTIVE_BULK; + + /* If packet counts are 256 or greater we can assume we have a gross + * overestimation of what the rate should be. Instead of trying to fine + * tune it just use the formula below to try and dial in an exact value + * give the current packet size of the frame. + */ + avg_wire_size = bytes / packets; + + /* The following is a crude approximation of: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to + * + * (170 * (size + 24)) / (size + 640) = ITR + * + * We first do some math on the packet size and then finally bitshift + * by 8 after rounding up. We also have to account for PCIe link speed + * difference as ITR scales based on this. + */ + if (avg_wire_size <= 60) { + /* Start at 50k ints/sec */ + avg_wire_size = 5120; + } else if (avg_wire_size <= 316) { + /* 50K ints/sec to 16K ints/sec */ + avg_wire_size *= 40; + avg_wire_size += 2720; + } else if (avg_wire_size <= 1084) { + /* 16K ints/sec to 9.2K ints/sec */ + avg_wire_size *= 15; + avg_wire_size += 11452; + } else if (avg_wire_size <= 1980) { + /* 9.2K ints/sec to 8K ints/sec */ + avg_wire_size *= 5; + avg_wire_size += 22420; + } else { + /* plateau at a limit of 8K ints/sec */ + avg_wire_size = 32256; + } + +adjust_for_speed: + /* Resultant value is 256 times larger than it needs to be. This + * gives us room to adjust the value as needed to either increase + * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. + * + * Use addition as we have already recorded the new latency flag + * for the ITR value. + */ + + if (q_vector->adapter->speed == 1) //10G + itr += DIV_ROUND_UP(avg_wire_size, ITR_ADAPTIVE_MIN_INC * 256) * ITR_ADAPTIVE_MIN_INC; + else //1G + itr += DIV_ROUND_UP(avg_wire_size, ITR_ADAPTIVE_MIN_INC * 64) * ITR_ADAPTIVE_MIN_INC; + + /* In the case of a latency specific workload only allow us to + * reduce the ITR by at most 2us. By doing this we should dial + * in so that our number of interrupts is no more than 2x the number + * of packets for the least busy workload. So for example in the case + * of a TCP worload the ack packets being received would set the + * the interrupt rate as they are a latency specific workload. + */ + if ((itr & ITR_ADAPTIVE_LATENCY) && itr < ring_container->itr) + itr = ring_container->itr - ITR_ADAPTIVE_MIN_INC; + +clear_counts: + /* write back value */ + ring_container->itr = itr; + + /* next update should occur within next jiffy */ + ring_container->next_update = next_update + 1; + + ring_container->total_bytes = 0; + ring_container->total_packets = 0; +} + +void grtnic_write_itr (struct grtnic_q_vector *q_vector) +{ + struct grtnic_adapter *adapter = q_vector->adapter; + struct grtnic_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = q_vector->itr & MAX_EITR; + + GRTNIC_WRITE_REG(hw, ((TARGET_IRQ<<12) + ADDR_INTR_ITR*4), (v_idx<<16 | itr_reg), 1); +} + + +static void grtnic_set_itr(struct grtnic_q_vector *q_vector) +{ + u32 new_itr; + + grtnic_update_itr(q_vector, &q_vector->tx); + grtnic_update_itr(q_vector, &q_vector->rx); + + /* use the smallest value of new ITR delay calculations */ + new_itr = min(q_vector->rx.itr, q_vector->tx.itr); + + /* Clear latency flag if set, shift into correct position */ + new_itr &= ITR_ADAPTIVE_MASK_USECS; + new_itr <<= 2; + + if (new_itr != q_vector->itr) { + /* save the algorithm value here */ + q_vector->itr = new_itr; +// printk("new_itr = %d\n", new_itr); + grtnic_write_itr(q_vector); + } +} + +#ifdef CONFIG_DISABLE_PACKET_SPLIT +/** + * ixgbe_merge_active_tail - merge active tail into lro skb + * @tail: pointer to active tail in frag_list + * + * This function merges the length and data of an active tail into the + * skb containing the frag_list. It resets the tail's pointer to the head, + * but it leaves the heads pointer to tail intact. + **/ +static inline struct sk_buff *grtnic_merge_active_tail(struct sk_buff *tail) +{ + struct sk_buff *head = GRTNIC_CB(tail)->head; + + if (!head) + return tail; + + head->len += tail->len; + head->data_len += tail->len; + head->truesize += tail->truesize; + + GRTNIC_CB(tail)->head = NULL; + + return head; +} + +/** + * ixgbe_add_active_tail - adds an active tail into the skb frag_list + * @head: pointer to the start of the skb + * @tail: pointer to active tail to add to frag_list + * + * This function adds an active tail to the end of the frag list. This tail + * will still be receiving data so we cannot yet ad it's stats to the main + * skb. That is done via ixgbe_merge_active_tail. + **/ +static inline void grtnic_add_active_tail(struct sk_buff *head, struct sk_buff *tail) +{ + struct sk_buff *old_tail = GRTNIC_CB(head)->tail; + + if (old_tail) { + grtnic_merge_active_tail(old_tail); + old_tail->next = tail; + } else { + skb_shinfo(head)->frag_list = tail; + } + + GRTNIC_CB(tail)->head = head; + GRTNIC_CB(head)->tail = tail; +} + +/** + * ixgbe_close_active_frag_list - cleanup pointers on a frag_list skb + * @head: pointer to head of an active frag list + * + * This function will clear the frag_tail_tracker pointer on an active + * frag_list and returns true if the pointer was actually set + **/ +static inline bool grtnic_close_active_frag_list(struct sk_buff *head) +{ + struct sk_buff *tail = GRTNIC_CB(head)->tail; + + if (!tail) + return false; + + grtnic_merge_active_tail(tail); + + GRTNIC_CB(head)->tail = NULL; + + return true; +} + +#endif + + +static void grtnic_process_skb_fields(struct grtnic_ring *rx_ring, union grtnic_rx_desc *rx_desc, struct sk_buff *skb) +{ + struct net_device *netdev = netdev_ring(rx_ring); + u8 TCPCS, UDPCS, IPCS, CSUM_OK, UDP_CSUM_FLAG; + +#ifdef NETIF_F_RXHASH + grtnic_rx_hash(rx_ring, rx_desc, skb); +#endif /* NETIF_F_RXHASH */ + + CSUM_OK = rx_desc->wb.upper.rx_info.csum_ok; + IPCS = rx_desc->wb.upper.rx_info.ipcs; + TCPCS = rx_desc->wb.upper.rx_info.tcpcs; + UDPCS = rx_desc->wb.upper.rx_info.udpcs; + UDP_CSUM_FLAG = rx_desc->wb.upper.rx_info.udp_csum_flag; + +// printk("CSUM_OK=%d, IPCS=%d, TCPCS=%d, UDPCS=%d, UDP_CSUM_FLAG=%d\n", CSUM_OK, IPCS, TCPCS, UDPCS, UDP_CSUM_FLAG); + + if((netdev->features & NETIF_F_RXCSUM) && IPCS) //is ip protocol + { + if((TCPCS & CSUM_OK) || (UDPCS & CSUM_OK & UDP_CSUM_FLAG)) //UDP_CSUM_FLAG means: udp checksum not is 0 + { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + else if(TCPCS || (UDPCS & UDP_CSUM_FLAG)) + { + printk("CSUM_OK=%d, IPCS=%d, TCPCS=%d, UDPCS=%d, UDP_CSUM_FLAG=%d\n", CSUM_OK, IPCS, TCPCS, UDPCS, UDP_CSUM_FLAG); + rx_ring->rx_stats.csum_err++; + } + } + + skb_record_rx_queue(skb, ring_queue_index(rx_ring)); + + skb->protocol = eth_type_trans(skb, netdev_ring(rx_ring)); +} + + +void grtnic_rx_skb(struct grtnic_q_vector *q_vector, + struct grtnic_ring *rx_ring, + union grtnic_rx_desc *rx_desc, + struct sk_buff *skb) +{ +#ifdef HAVE_NDO_BUSY_POLL + skb_mark_napi_id(skb, &q_vector->napi); + + if (grtnic_qv_busy_polling(q_vector) || q_vector->netpoll_rx) { + netif_receive_skb(skb); + /* exit early if we busy polled */ + return; + } +#endif + + napi_gro_receive(&q_vector->napi, skb); + +#ifndef NETIF_F_GRO + netdev_ring(rx_ring)->last_rx = jiffies; +#endif +} + + +static bool grtnic_is_non_eop(struct grtnic_ring *rx_ring, union grtnic_rx_desc *rx_desc, struct sk_buff *skb) +{ +#ifdef CONFIG_DISABLE_PACKET_SPLIT + struct sk_buff *next_skb; +#endif + + u32 ntc = rx_ring->next_to_clean + 1; + + rx_desc->wb.upper.len_ctl.cmp = 0; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(GRTNIC_RX_DESC(*rx_ring, ntc)); + + if (likely(rx_desc->wb.upper.len_ctl.eop)) + return false; + + /* place skb in next buffer to be received */ +#ifdef CONFIG_DISABLE_PACKET_SPLIT + next_skb = rx_ring->rx_buffer_info[ntc].skb; + + grtnic_add_active_tail(skb, next_skb); + GRTNIC_CB(next_skb)->head = skb; +#else + rx_ring->rx_buffer_info[ntc].skb = skb; +#endif + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +#ifdef CONFIG_DISABLE_PACKET_SPLIT +/* grtnic_clean_rx_irq -- * legacy */ +static int grtnic_clean_rx_irq(struct grtnic_q_vector *q_vector, int budget) +{ + struct grtnic_ring *rx_ring = q_vector->rx.ring; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; +//#if IS_ENABLED(CONFIG_FCOE) +// int ddp_bytes; +// unsigned int mss = 0; +//#endif /* CONFIG_FCOE */ + u16 len = 0; + u16 cleaned_count = grtnic_desc_unused(rx_ring); + + while (likely(total_rx_packets < budget)) { + struct grtnic_rx_buffer *rx_buffer; + union grtnic_rx_desc *rx_desc; + struct sk_buff *skb; + u16 ntc; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= GRTNIC_RX_BUFFER_WRITE) { + grtnic_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + ntc = rx_ring->next_to_clean; + rx_desc = GRTNIC_RX_DESC(*rx_ring, ntc); + rx_buffer = &rx_ring->rx_buffer_info[ntc]; + + if (!rx_desc->wb.upper.len_ctl.cmp) + break; + +// printk("rx len = %d, desc_num = %d, chl = %d, cmp = %d, rs = %d, irq = %d, eop = %d, sop = %d\n", rx_desc->len_ctl.len, +// rx_desc->len_ctl.desc_num,rx_desc->len_ctl.chl,rx_desc->len_ctl.cmp,rx_desc->len_ctl.rs,rx_desc->len_ctl.irq, +// rx_desc->len_ctl.eop,rx_desc->len_ctl.sop); + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + skb = rx_buffer->skb; + + prefetch(skb->data); + + len = le16_to_cpu(rx_desc->wb.upper.len_ctl.len); + /* pull the header of the skb in */ + __skb_put(skb, len); + +// printk("rx len = %d\n", len); + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header after + * the writeback. Only unmap it when EOP is reached + */ + if (!GRTNIC_CB(skb)->head) { + GRTNIC_CB(skb)->dma = rx_buffer->dma; + } else { + skb = grtnic_merge_active_tail(skb); + dma_unmap_single(rx_ring->dev, rx_buffer->dma, rx_ring->rx_buffer_len, DMA_FROM_DEVICE); + } + + /* clear skb reference in buffer info structure */ + rx_buffer->skb = NULL; + rx_buffer->dma = 0; + + cleaned_count++; + + if (grtnic_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + dma_unmap_single(rx_ring->dev, GRTNIC_CB(skb)->dma, rx_ring->rx_buffer_len, DMA_FROM_DEVICE); + GRTNIC_CB(skb)->dma = 0; + + if (grtnic_close_active_frag_list(skb) && !GRTNIC_CB(skb)->append_cnt) { + /* if we got here without RSC the packet is invalid */ + dev_kfree_skb_any(skb); + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + grtnic_process_skb_fields(rx_ring, rx_desc, skb); + + grtnic_rx_skb(q_vector, rx_ring, rx_desc, skb); + + /* update budget accounting */ + total_rx_packets++; + } + + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + if (cleaned_count) + grtnic_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_rx_packets; +} + +#else /* CONFIG_DISABLE_PACKET_SPLIT */ + +static void grtnic_reuse_rx_page(struct grtnic_ring *rx_ring, struct grtnic_rx_buffer *old_buff) +{ + struct grtnic_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls and unnecessary copy of skb. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static inline bool grtnic_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +static bool grtnic_can_reuse_rx_page(struct grtnic_rx_buffer *rx_buffer) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote pages */ + if (unlikely(grtnic_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) +#else + if (unlikely((page_count(page) - pagecnt_bias) > 1)) +#endif + return false; +#else + /* The last offset is a bit aggressive in that we assume the + * worst case of FCoE being enabled and using a 3K buffer. + * However this should have minimal impact as the 1K extra is + * still less than one buffer in size. + */ +#define GRTNIC_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - GRTNIC_RXBUFFER_3K) + if (rx_buffer->page_offset > GRTNIC_LAST_OFFSET) + return false; +#endif + +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } +#else + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + if (likely(!pagecnt_bias)) { + page_ref_inc(page); + rx_buffer->pagecnt_bias = 1; + } +#endif + + return true; +} + +static void grtnic_add_rx_frag(struct grtnic_ring *rx_ring, struct grtnic_rx_buffer *rx_buffer, struct sk_buff *skb, unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = grtnic_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(GRTNIC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static void grtnic_dma_sync_frag(struct grtnic_ring *rx_ring, struct sk_buff *skb) +{ +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); + +#endif + /* if the page was released unmap it, else just sync our portion */ + if (unlikely(GRTNIC_CB(skb)->page_released)) { + dma_unmap_page_attrs(rx_ring->dev, GRTNIC_CB(skb)->dma, + grtnic_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + GRTNIC_RX_DMA_ATTR); +#endif + } else if (ring_uses_build_skb(rx_ring)) { + unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + + dma_sync_single_range_for_cpu(rx_ring->dev, + GRTNIC_CB(skb)->dma, + offset, + skb_headlen(skb), + DMA_FROM_DEVICE); + } else { + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + GRTNIC_CB(skb)->dma, + skb_frag_off(frag), + skb_frag_size(frag), + DMA_FROM_DEVICE); + } +} + + +/////////////////////////////////////////////////////////////// + +static struct grtnic_rx_buffer *grtnic_get_rx_buffer(struct grtnic_ring *rx_ring, + union grtnic_rx_desc *rx_desc, struct sk_buff **skb, const unsigned int size) +{ + struct grtnic_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; + + /* Delay unmapping of the first packet. It carries the header + * information, HW may still access the header after the writeback. + * Only unmap it when EOP is reached + */ + if (!likely(rx_desc->wb.upper.len_ctl.eop)) { + if (!*skb) + goto skip_sync; + } else { + if (*skb) + grtnic_dma_sync_frag(rx_ring, *skb); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); +skip_sync: + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void grtnic_put_rx_buffer(struct grtnic_ring *rx_ring, struct grtnic_rx_buffer *rx_buffer, struct sk_buff *skb) +{ +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); + +#endif + if (grtnic_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + grtnic_reuse_rx_page(rx_ring, rx_buffer); + } else { + if (!IS_ERR(skb) && GRTNIC_CB(skb)->dma == rx_buffer->dma) { + /* the page has been released from the ring */ + GRTNIC_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + grtnic_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + GRTNIC_RX_DMA_ATTR); +#endif + } + __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + rx_buffer->skb = NULL; +} + +static struct sk_buff *grtnic_construct_skb(struct grtnic_ring *rx_ring, + struct grtnic_rx_buffer *rx_buffer, + union grtnic_rx_desc *rx_desc, + unsigned int size) +{ + + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = grtnic_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(GRTNIC_SKB_PAD + size); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, GRTNIC_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + if (size > GRTNIC_RX_HDR_SIZE) { + if (!likely(rx_desc->wb.upper.len_ctl.eop)) + GRTNIC_CB(skb)->dma = rx_buffer->dma; + + skb_add_rx_frag(skb, 0, rx_buffer->page, rx_buffer->page_offset, size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC +static struct sk_buff *grtnic_build_skb(struct grtnic_ring *rx_ring, + struct grtnic_rx_buffer *rx_buffer, + union grtnic_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = grtnic_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(GRTNIC_SKB_PAD + size); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb around the page buffer */ + skb = build_skb(va - GRTNIC_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, GRTNIC_SKB_PAD); + __skb_put(skb, size); + + /* record DMA address if this is the start of a chain of buffers */ + if (!likely(rx_desc->wb.upper.len_ctl.eop)) + GRTNIC_CB(skb)->dma = rx_buffer->dma; + + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ + +static void grtnic_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(skb->dev, va, GRTNIC_RX_HDR_SIZE); +// pull_len = eth_get_headlen(va, GRTNIC_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +static bool grtnic_cleanup_headers(struct grtnic_ring *rx_ring, union grtnic_rx_desc *rx_desc, struct sk_buff *skb) +{ + + /* place header in linear portion of buffer */ + if (!skb_headlen(skb)) + grtnic_pull_tail(skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/* grtnic_clean_rx_irq -- * packet split */ +static int grtnic_clean_rx_irq(struct grtnic_q_vector *q_vector, int budget) +{ + struct grtnic_ring *rx_ring = q_vector->rx.ring; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; +//#if IS_ENABLED(CONFIG_FCOE) +// int ddp_bytes; +// unsigned int mss = 0; +//#endif /* CONFIG_FCOE */ + u16 cleaned_count = grtnic_desc_unused(rx_ring); + + while (likely(total_rx_packets < budget)) { + union grtnic_rx_desc *rx_desc; + struct grtnic_rx_buffer *rx_buffer; + struct sk_buff *skb; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= GRTNIC_RX_BUFFER_WRITE) { + grtnic_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = GRTNIC_RX_DESC(*rx_ring, rx_ring->next_to_clean); + if (!rx_desc->wb.upper.len_ctl.cmp) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + size = le16_to_cpu(rx_desc->wb.upper.len_ctl.len); + rx_buffer = grtnic_get_rx_buffer(rx_ring, rx_desc, &skb, size); + + /* retrieve a buffer from the ring */ + if (skb) { + grtnic_add_rx_frag(rx_ring, rx_buffer, skb, size); +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + } else if (ring_uses_build_skb(rx_ring)) { + skb = grtnic_build_skb(rx_ring, rx_buffer, rx_desc, size); +#endif + } else { + skb = grtnic_construct_skb(rx_ring, rx_buffer, rx_desc, size); + } + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + grtnic_put_rx_buffer(rx_ring, rx_buffer, skb); + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (grtnic_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (grtnic_cleanup_headers(rx_ring, rx_desc, skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + grtnic_process_skb_fields(rx_ring, rx_desc, skb); + + grtnic_rx_skb(q_vector, rx_ring, rx_desc, skb); + + /* update budget accounting */ + total_rx_packets++; + } + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +#endif /* CONFIG_DISABLE_PACKET_SPLIT */ +////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +#ifdef HAVE_NDO_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int grtnic_busy_poll_recv(struct napi_struct *napi) +{ + struct grtnic_q_vector *q_vector = + container_of(napi, struct grtnic_q_vector, napi); + struct grtnic_adapter *adapter = q_vector->adapter; + int found = 0; + + if (test_bit(__GRTNIC_DOWN, &adapter->state)) + return LL_FLUSH_FAILED; + + if (!grtnic_qv_lock_poll(q_vector)) + return LL_FLUSH_BUSY; + + found = grtnic_clean_rx_irq(q_vector, 4); +#ifdef BP_EXTENDED_STATS + if (found) + q_vector->rx.ring->stats.cleaned += found; + else + q_vector->rx.ring->stats.misses++; +#endif +// if (found) +// break; + + grtnic_qv_unlock_poll(q_vector); + + return found; +} + +#endif /* HAVE_NDO_BUSY_POLL */ + +static bool grtnic_clean_tx_irq_reg(struct grtnic_q_vector *q_vector, int napi_budget) +{ + struct grtnic_adapter *adapter = q_vector->adapter; + struct grtnic_ring *tx_ring = q_vector->tx.ring; + struct grtnic_tx_buffer *tx_buffer; + union grtnic_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__GRTNIC_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = GRTNIC_TX_DESC(*tx_ring, i); + i -= tx_ring->count; + + do { + union grtnic_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + if (!eop_desc->wb.len_ctl.cmp) + break; + +// printk("tx len = %d, desc_num = %d, chl = %d, cmp = %d, rs = %d, irq = %d, eop = %d, sop = %d\n", tx_desc->len_ctl.len, +// tx_desc->len_ctl.desc_num,tx_desc->len_ctl.chl,tx_desc->len_ctl.cmp,tx_desc->len_ctl.rs,tx_desc->len_ctl.irq, +// tx_desc->len_ctl.eop,tx_desc->len_ctl.sop); + + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + napi_consume_skb(tx_buffer->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = GRTNIC_TX_DESC(*tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = GRTNIC_TX_DESC(*tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + +// printk("next_to_clean = %d\n", i); + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(netdev_ring(tx_ring)) && + (grtnic_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); +#ifdef HAVE_TX_MQ + if (__netif_subqueue_stopped(netdev_ring(tx_ring), + ring_queue_index(tx_ring)) + && !test_bit(__GRTNIC_DOWN, &q_vector->adapter->state)) { + netif_wake_subqueue(netdev_ring(tx_ring), + ring_queue_index(tx_ring)); + ++tx_ring->tx_stats.restart_queue; + } +#else + if (netif_queue_stopped(netdev_ring(tx_ring)) && + !test_bit(__GRTNIC_DOWN, &q_vector->adapter->state)) { + netif_wake_queue(netdev_ring(tx_ring)); + ++tx_ring->tx_stats.restart_queue; + } +#endif + } + + return !!budget; +} + + +/** + * grtnic_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + **/ +int grtnic_poll(struct napi_struct *napi, int budget) +{ + struct grtnic_q_vector *q_vector = container_of(napi, struct grtnic_q_vector, napi); + struct grtnic_adapter *adapter = q_vector->adapter; + struct grtnic_hw *hw = &adapter->hw; + int work_done = 0; + bool clean_complete = true; + u32 var; + +// bool clean_complete = true; +// int work_done = 0; +// int cleaned = 0; + +#if 0 +//#if IS_ENABLED(CONFIG_DCA) + if (adapter->flags & GRTNIC_FLAG_DCA_ENABLED) + grtnic_update_dca(q_vector); +#endif /* CONFIG_DCA */ + + + if (q_vector->tx.ring) + { + if(!grtnic_clean_tx_irq_reg(q_vector, budget)) + clean_complete = false; + } + +#ifdef HAVE_NDO_BUSY_POLL + if (test_bit(NAPI_STATE_NPSVC, &napi->state)) + return budget; + + /* Exit if we are called by netpoll or busy polling is active */ + if ((budget <= 0) || !grtnic_qv_lock_napi(q_vector)) + return budget; +#else + /* Exit if we are called by netpoll */ + if (budget <= 0) + return budget; +#endif + + if (q_vector->rx.ring) + { + int cleaned = grtnic_clean_rx_irq(q_vector, budget); + work_done += cleaned; + + if (cleaned >= budget) + clean_complete = false; + } + + +#ifdef HAVE_NDO_BUSY_POLL + grtnic_qv_unlock_napi(q_vector); +#endif +#ifndef HAVE_NETDEV_NAPI_LIST + if (!netif_running(adapter->netdev)) + clean_complete = true; +#endif + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + /* all work done, exit the polling mode */ + if (likely(napi_complete_done(napi, work_done))) { + if (adapter->rx_itr_setting == 1) + grtnic_set_itr(q_vector); + if (!test_bit(__GRTNIC_DOWN, &adapter->state)) + { + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) + var = q_vector->eims_value; + else + var = ~0; + + GRTNIC_WRITE_REG(hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMS*4), var, 1); + } + } + return min(work_done, budget - 1); +} + + +static void grtnic_trigger_lsc(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + GRTNIC_WRITE_REG(hw, ((TARGET_IRQ<<12) + ADDR_INTR_ICS*4), adapter->eims_other, 1); //trigger user interrupt +} +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +int grtnic_setup_tx_resources(struct grtnic_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int node = -1; + int size; + + size = sizeof(struct grtnic_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + node = tx_ring->q_vector->node; + + tx_ring->tx_buffer_info = vmalloc_node(size, node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vmalloc(size); + if (!tx_ring->tx_buffer_info) + goto err_tx_buffer; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union grtnic_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + set_dev_node(dev, node); + tx_ring->desc = dma_alloc_coherent(dev, + tx_ring->size, + &tx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc) + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err_tx_ring_dma; + + + set_dev_node(dev, node); + tx_ring->desc_wb = dma_alloc_coherent(dev, sizeof(struct grtnic_desc_wb), &tx_ring->desc_wb_dma, GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc_wb) + tx_ring->desc_wb = dma_alloc_coherent(dev, sizeof(struct grtnic_desc_wb), &tx_ring->desc_wb_dma, GFP_KERNEL); + + if (!tx_ring->desc_wb) + goto err_tx_ring_wb; + + ((struct grtnic_desc_wb *) tx_ring->desc_wb)->desc_hw_ptr = 0; + +// tx_ring->next_to_use = 0; //检查一下这里,其他地方设置了,这里就不需要了 +// tx_ring->next_to_clean = 0; +// +//#ifndef CONFIG_DISABLE_PACKET_SPLIT +// tx_ring->next_to_alloc = 0; +//#endif + + return 0; +err_tx_ring_wb: + dma_free_coherent(dev, tx_ring->size, tx_ring->desc, tx_ring->dma); +err_tx_ring_dma: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; +err_tx_buffer: + printk("Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; +} + +int grtnic_setup_rx_resources(struct grtnic_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int node = -1; + int size; + + size = sizeof(struct grtnic_rx_buffer) * rx_ring->count; + + if (rx_ring->q_vector) + node = rx_ring->q_vector->node; + + rx_ring->rx_buffer_info = vmalloc_node(size, node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vmalloc(size); + if (!rx_ring->rx_buffer_info) + goto err_rx_buffer; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union grtnic_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + set_dev_node(dev, node); + rx_ring->desc = dma_alloc_coherent(dev, + rx_ring->size, + &rx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!rx_ring->desc) + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err_rx_ring_dma; + +// rx_ring->next_to_clean = 0; //检查一下这里,其他地方设置了,这里就不需要了 +// rx_ring->next_to_use = 0; +// +//#ifndef CONFIG_DISABLE_PACKET_SPLIT +// rx_ring->next_to_alloc = 0; +//#endif + + return 0; + +err_rx_ring_dma: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; +err_rx_buffer: + printk("Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; +} + + +void grtnic_free_tx_resources(struct grtnic_ring *tx_ring) +{ + grtnic_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + + dma_free_coherent(tx_ring->dev, sizeof(struct grtnic_desc_wb), + tx_ring->desc_wb, tx_ring->desc_wb_dma); + tx_ring->desc = NULL; +} + +void grtnic_free_rx_resources(struct grtnic_ring *rx_ring) +{ + grtnic_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; +} + +/** + * grtnic_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int grtnic_setup_all_tx_resources(struct grtnic_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + + + err = grtnic_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + grtnic_free_tx_resources(adapter->tx_ring[i]); + return err; +} + +/** + * grtnic_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int grtnic_setup_all_rx_resources(struct grtnic_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = grtnic_setup_rx_resources(adapter->rx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + + return 0; + +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + grtnic_free_rx_resources(adapter->rx_ring[i]); + return err; +} + + +/** + * grtnic_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void grtnic_free_all_tx_resources(struct grtnic_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + grtnic_free_tx_resources(adapter->tx_ring[i]); +} + + +/** + * grtnic_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void grtnic_free_all_rx_resources(struct grtnic_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + grtnic_free_rx_resources(adapter->rx_ring[i]); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +/** + * grtnic_configure_tx_ring - Configure 8259x Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void grtnic_configure_tx_ring(struct grtnic_adapter *adapter, struct grtnic_ring *ring) +{ + struct grtnic_hw *hw = &adapter->hw; + u32 w; + u32 txdctl = (1u << 25); /* LWTHRESH */ + u8 reg_idx = ring->reg_idx; + + /* flush pending descriptor writebacks to memory */ +// GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_INT_DELAY*4), (TX_INT_DELAY | GRTNIC_TIDV_FPD), 1); + /* execute the writes immediately */ + GRTNIC_WRITE_FLUSH(hw); + + /* write lower 32-bit of bus address of transfer first descriptor */ + w = cpu_to_le32(PCI_DMA_L(ring->dma)); + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_SG_ADDRLO*4), w, 1); + /* write upper 32-bit of bus address of transfer first descriptor */ + w = cpu_to_le32(PCI_DMA_H(ring->dma)); + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_SG_ADDRHI*4), w, 1); + /* write lower 32-bit of bus address of desc write back address*/ + w = cpu_to_le32(PCI_DMA_L(ring->desc_wb_dma)); + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_SG_WBADDRLO*4), w, 1); + /* write upper 32-bit of bus address of desc write back address*/ + w = cpu_to_le32(PCI_DMA_H(ring->desc_wb_dma)); + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_SG_WBADDRHI*4), w, 1); + + /* setup max SG num */ + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_SG_MAXNUM*4), ring->count, 1); +// /* Set the Tx Interrupt Delay register TIDV */ 前面为了flush,已经设置过了,这里就不用了 + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_INT_DELAY*4), TX_INT_DELAY, 1); +// write_register(tx_int_delay, adapter->dma_bar+ (TARGET_H2C<<12) + (reg_idx<<8) + ADDR_INT_DELAY*4); + + ring->tail = hw->dma_bar + (TARGET_H2C<<12) + (reg_idx <<8) + (ADDR_SG_SWPT*4); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + /* + * set WTHRESH to encourage burst writeback, it should not be set + * higher than 1 when: + * - ITR is 0 as it could cause false TX hangs + * - ITR is set to > 100k int/sec and BQL is enabled + * + * In order to avoid issues WTHRESH + PTHRESH should always be equal + * to or less than the number of on chip descriptors, which is + * currently 40. + */ + if (!ring->q_vector || (ring->q_vector->itr < GRTNIC_100K_ITR)) + txdctl |= (1 << 16); /* WTHRESH = 1 */ + else + txdctl |= (8 << 16); /* WTHRESH = 8 */ + + /* + * Setting PTHRESH to 32 both improves performance + * and avoids a TX hang with DFP enabled + */ + txdctl |= (1 << 8) | /* HTHRESH = 1 */ + 32; /* PTHRESH = 32 */ + + //PTHRESH=32, HTHRESH=1, WTHRESH=1,LWTHRESH=1 预读的方式就是等待其它都不忙的时候才进行描述符指令发出 +// write_register(GRTNIC_TXDCTL_DMA_BURST_ENABLE, adapter->dma_bar+ (TARGET_H2C<<12) + (reg_idx<<8) + ADDR_DESC_CTRL*4); + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_DESC_CTRL*4), txdctl, 1); +// GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_DESC_CTRL*4), GRTNIC_TXDCTL_DMA_BURST_ENABLE, 1); + + /* initialize XPS */ + if (!test_and_set_bit(__GRTNIC_TX_XPS_INIT_DONE, &ring->state)) { + struct grtnic_q_vector *q_vector = ring->q_vector; + + if (q_vector) + netif_set_xps_queue(adapter->netdev, get_cpu_mask(reg_idx % adapter->rss_queues), ring->queue_index); +// netif_set_xps_queue(adapter->netdev, &q_vector->affinity_mask, ring->queue_index); + } + + clear_bit(__GRTNIC_HANG_CHECK_ARMED, &ring->state); + + /* reinitialize tx_buffer_info */ + memset(ring->tx_buffer_info, 0, + sizeof(struct grtnic_tx_buffer) * ring->count); + + /* TX dma engine start */ + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_ENGINE_CTRL*4), 0x01, 1); +} + +/** + * grtnic_configure_tx - Configure 8259x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ + +static void grtnic_configure_tx(struct grtnic_adapter *adapter) +{ + u32 i; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + grtnic_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + + +void grtnic_configure_rx_ring(struct grtnic_adapter *adapter, struct grtnic_ring *ring) +{ + struct grtnic_hw *hw = &adapter->hw; + u32 w; + u32 rxdctl = (1u << 25); /* LWTHRESH */ + u8 reg_idx = ring->reg_idx; + + union grtnic_rx_desc *rx_desc; + + /* flush pending descriptor writebacks to memory */ +// GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_INT_DELAY*4), (RX_INT_DELAY | GRTNIC_RDTR_FPD), 1); + /* execute the writes immediately */ + GRTNIC_WRITE_FLUSH(hw); + + w = cpu_to_le32(PCI_DMA_L(ring->dma)); + GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_SG_ADDRLO*4), w, 1); + /* write upper 32-bit of bus address of transfer first descriptor */ + w = cpu_to_le32(PCI_DMA_H(ring->dma)); + GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_SG_ADDRHI*4), w, 1); + + /* setup max SG num */ + GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_SG_MAXNUM*4), ring->count, 1); +// /* set the Receive Delay Timer Register RDTR #define BURST_RDTR 0x20 */ /*前面为了flush,已经执行过了,这些就不再执行了*/ + GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_INT_DELAY*4), RX_INT_DELAY, 1); +// write_register(rx_int_delay, adapter->dma_bar+ (TARGET_C2H<<12) + (channel<<8) + ADDR_INT_DELAY*4); + + ring->tail = hw->dma_bar + (TARGET_C2H<<12) + (reg_idx <<8) + (ADDR_SG_SWPT*4); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; +#ifndef CONFIG_DISABLE_PACKET_SPLIT + ring->next_to_alloc = 0; +#endif + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct grtnic_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = GRTNIC_RX_DESC(*ring, 0); + rx_desc->wb.upper.len_ctl.cmp = 0; + + rxdctl = GRTNIC_RXDCTL_DMA_BURST_ENABLE; + //PTHRESH=32, HTHRESH=4, WTHRESH=4, LWTHRESH=1 这与intel功能不同,当描述符数量低于LWTHRESH时候,优先级最高,立刻进行读描述符,不关系bus是不是busy,否则采取预读的方式,优先级最低 + GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_DESC_CTRL*4), rxdctl, 1); + + /* RX dma engine start */ + GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_ENGINE_CTRL*4), 0x01, 1); + + grtnic_alloc_rx_buffers(ring, grtnic_desc_unused(ring)); +} + + +static void grtnic_set_rx_buffer_len(struct grtnic_adapter *adapter) +{ + struct grtnic_ring *rx_ring; + int i; + +#if defined(CONFIG_DISABLE_PACKET_SPLIT) || (defined (HAVE_SWIOTLB_SKIP_CPU_SYNC) && (PAGE_SIZE < 8192)) + int max_frame = adapter->max_frame_size; +#endif + +#ifdef CONFIG_DISABLE_PACKET_SPLIT + max_frame += VLAN_HLEN; + if(max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) + max_frame = MAXIMUM_ETHERNET_VLAN_SIZE; + else + max_frame = ALIGN(max_frame, 1024); +#endif + + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + +#ifndef CONFIG_DISABLE_PACKET_SPLIT + clear_bit(__GRTNIC_RX_3K_BUFFER, &rx_ring->state); + clear_bit(__GRTNIC_RX_BUILD_SKB_ENABLED, &rx_ring->state); + rx_ring->rx_buffer_len = GRTNIC_RXBUFFER_2K; + +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + set_bit(__GRTNIC_RX_BUILD_SKB_ENABLED, &rx_ring->state); + +#if (PAGE_SIZE < 8192) + if (GRTNIC_2K_TOO_SMALL_WITH_PADDING || (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + { + set_bit(__GRTNIC_RX_3K_BUFFER, &rx_ring->state); + rx_ring->rx_buffer_len = GRTNIC_RXBUFFER_3K; + } + +#endif /* PAGE_SIZE < 8192*/ +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ +#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + rx_ring->rx_buffer_len = max_frame; +#endif /*!CONFIG_DISABLE_PACKET_SPLIT*/ + } +} + +/** + * grtnic_configure_rx - Configure 8259x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void grtnic_configure_rx(struct grtnic_adapter *adapter) +{ + int i; + + /* Program registers for the distribution of queues */ + grtnic_setup_mrqc(adapter); + + /* set_rx_buffer_len must be called before ring initialization */ + grtnic_set_rx_buffer_len(adapter); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + grtnic_configure_rx_ring(adapter, adapter->rx_ring[i]); + + /* enable all receives */ + grtnic_SetRx(adapter->netdev, 1); //start rx +} + + +static void grtnic_configure(struct grtnic_adapter *adapter) +{ + +//#if IS_ENABLED(CONFIG_DCA) +// /* configure DCA */ +// if (adapter->flags & FLAG_DCA_CAPABLE) +// grtnic_setup_dca(adapter); +//#endif + + grtnic_configure_tx(adapter); + grtnic_configure_rx(adapter); +} + +static void grtnic_up_complete(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + u32 phy_addr = hw->phy_addr; + u16 temp; + +// ixgbe_get_hw_control(adapter); +// ixgbe_setup_gpie(adapter); + + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) + grtnic_configure_msix(adapter); + else + grtnic_configure_msi_and_legacy(adapter); + + if(adapter->ei->type == board_902T_GRT_FF) + { + grtnic_PhyRead(adapter->netdev, phy_addr, 0x00, &temp); //prtad_reg + grtnic_PhyWrite(adapter->netdev, phy_addr, 0x00, temp | PHY_RESET); //rst phy + } + else + { + /* enable the optics for 82599 SFP+ fiber */ + GRTNIC_WRITE_REG(hw, PHY_TX_DISABLE, 0x00, 0); //enable laser; + } + + smp_mb__before_atomic(); + clear_bit(__GRTNIC_DOWN, &adapter->state); + grtnic_napi_enable_all(adapter); +//#ifndef IXGBE_NO_LLI +// grtnic_configure_lli(adapter); +//#endif + + /* clear any pending interrupts, may auto mask */ + GRTNIC_READ_REG(hw, ((TARGET_IRQ<<12) + ADDR_INTR_VECTOR*4), 1); + grtnic_irq_enable(adapter); + + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem */ + adapter->flags |= GRTNIC_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + mod_timer(&adapter->service_timer, jiffies); + +// +// ixgbe_clear_vf_stats_counters(adapter); +// /* Set PF Reset Done bit so PF/VF Mail Ops can work */ +// ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); +// ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; +// IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); +// +// /* update setting rx tx for all active vfs */ +// ixgbe_set_all_vfs(adapter); +} + +void grtnic_reset(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + GRTNIC_READ_REG(hw, ((TARGET_CONFIG<<12) + ADDR_FUNC_RST*4), 1); //function reset; +} + +int grtnic_open(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__GRTNIC_TESTING, &adapter->state)) + return -EBUSY; + + grtnic_SetRx(netdev, 0); //stop rx + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = grtnic_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = grtnic_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + grtnic_configure(adapter); + + err = grtnic_request_irq(adapter); + if (err) + goto err_req_irq; + + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + + + grtnic_up_complete(adapter); + + grtnic_SetTx(netdev, 1); //start tx + grtnic_trigger_lsc(adapter); //Fire a link status change interrupt to start the watchdog. + + return GRTNIC_SUCCESS; + + +err_set_queues: + grtnic_free_irq(adapter); +err_req_irq: + grtnic_free_all_rx_resources(adapter); +err_setup_rx: + grtnic_free_all_tx_resources(adapter); +err_setup_tx: + grtnic_reset(adapter); + + return err; +} +/////////////////////////////////////////////////////////////////////////////// + + +void grtnic_disable_rx_queue(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int i; + + /* disable receives */ + grtnic_SetRx(netdev, 0); //stop rx + + /* disable all enabled Rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct grtnic_ring *ring = adapter->rx_ring[i]; + u8 reg_idx = ring->reg_idx; + /* flush pending descriptor writebacks to memory */ + GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_INT_DELAY*4), (RX_INT_DELAY | GRTNIC_RDTR_FPD), 1); + /* channel stop */ + GRTNIC_WRITE_REG(hw, ((TARGET_C2H<<12) + (reg_idx<<8) + ADDR_ENGINE_CTRL*4), 0x00, 1); + } +} + +void grtnic_disable_tx_queue(struct grtnic_adapter *adapter) +{ + struct grtnic_hw *hw = &adapter->hw; + int i; + + /* disable all enabled Tx queues */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct grtnic_ring *ring = adapter->tx_ring[i]; + u8 reg_idx = ring->reg_idx; + + /* flush pending descriptor writebacks to memory */ + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_INT_DELAY*4), (TX_INT_DELAY | GRTNIC_TIDV_FPD), 1); + /* channel stop */ + GRTNIC_WRITE_REG(hw, ((TARGET_H2C<<12) + (reg_idx<<8) + ADDR_ENGINE_CTRL*4), 0x00, 1); + } +} + +/** + * grtnic_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void grtnic_clean_tx_ring(struct grtnic_ring *tx_ring) +{ + u16 i = tx_ring->next_to_clean; + struct grtnic_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + unsigned int size; + + + while (i != tx_ring->next_to_use) { + union grtnic_tx_desc *eop_desc, *tx_desc; + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = GRTNIC_TX_DESC(*tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = GRTNIC_TX_DESC(*tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + size = sizeof(struct grtnic_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + + +/** + * grtnic_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void grtnic_clean_rx_ring(struct grtnic_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + struct grtnic_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + unsigned int size; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + + + /* Free all the Rx ring sk_buffs */ +#ifdef CONFIG_DISABLE_PACKET_SPLIT + while (i != rx_ring->next_to_use) { +#else + while (i != rx_ring->next_to_alloc) { +#endif + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; +#ifndef CONFIG_DISABLE_PACKET_SPLIT + if (GRTNIC_CB(skb)->page_released) + dma_unmap_page_attrs(rx_ring->dev, + GRTNIC_CB(skb)->dma, + grtnic_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + GRTNIC_RX_DMA_ATTR); +#endif +#else + /* We need to clean up RSC frag lists */ + skb = grtnic_merge_active_tail(skb); + if (grtnic_close_active_frag_list(skb)) + dma_unmap_single(rx_ring->dev, GRTNIC_CB(skb)->dma, rx_ring->rx_buffer_len, DMA_FROM_DEVICE); + GRTNIC_CB(skb)->dma = 0; +#endif /* CONFIG_DISABLE_PACKET_SPLIT */ + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + +#ifndef CONFIG_DISABLE_PACKET_SPLIT + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + grtnic_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + grtnic_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + GRTNIC_RX_DMA_ATTR); +#endif + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); +#else /* CONFIG_DISABLE_PACKET_SPLIT */ + if (rx_buffer->dma) { + dma_unmap_single(rx_ring->dev, rx_buffer->dma, rx_ring->rx_buffer_len, DMA_FROM_DEVICE); + rx_buffer->dma = 0; + } +#endif /* CONFIG_DISABLE_PACKET_SPLIT */ + i++; + rx_buffer++; + if (i == rx_ring->count) { + i = 0; + rx_buffer = rx_ring->rx_buffer_info; + } + } + + size = sizeof(struct grtnic_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + +#ifndef CONFIG_DISABLE_PACKET_SPLIT + rx_ring->next_to_alloc = 0; +#endif + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * grtnic_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void grtnic_clean_all_tx_rings(struct grtnic_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + grtnic_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * grtnic_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void grtnic_clean_all_rx_rings(struct grtnic_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + grtnic_clean_rx_ring(adapter->rx_ring[i]); +} + + +void grtnic_down(struct grtnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct grtnic_hw *hw = &adapter->hw; + u32 phy_addr = hw->phy_addr; + u16 temp; + + /* signal that we are down to the interrupt handler */ + if (test_and_set_bit(__GRTNIC_DOWN, &adapter->state)) + return; /* do nothing if already down */ + + /* Shut off incoming Tx traffic */ + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + + /* Disable Rx */ + grtnic_disable_rx_queue(adapter); + + grtnic_irq_disable(adapter); + + grtnic_napi_disable_all(adapter); + + clear_bit(__GRTNIC_RESET_REQUESTED, &adapter->state); + adapter->flags &= ~GRTNIC_FLAG_NEED_LINK_UPDATE; + + del_timer_sync(&adapter->service_timer); + + /* disable transmits in the hardware now that interrupts are off */ + grtnic_disable_tx_queue(adapter); + +#ifdef HAVE_PCI_ERS + if (!pci_channel_offline(adapter->pdev)) +#endif + grtnic_reset(adapter); + + if(adapter->ei->type == board_902T_GRT_FF) + { + grtnic_PhyRead(netdev, phy_addr, 0x00, &temp); //prtad_reg + grtnic_PhyWrite(netdev, phy_addr, 0x00, temp | PHY_POWER_DOWN); //power down + } + else + { + /* power down the optics for 82599 SFP+ fiber */ + GRTNIC_WRITE_REG(hw, PHY_TX_DISABLE, 0x01, 0); //disable laser; + } + + grtnic_clean_all_tx_rings(adapter); + grtnic_clean_all_rx_rings(adapter); +} + + +void grtnic_up(struct grtnic_adapter *adapter) +{ + + /* hardware has been reset, we need to reload some things */ + grtnic_configure(adapter); + + grtnic_up_complete(adapter); +} + +void grtnic_reinit_locked(struct grtnic_adapter *adapter) +{ + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ +#ifdef HAVE_NETIF_TRANS_UPDATE + netif_trans_update(adapter->netdev); +#else + adapter->netdev->trans_start = jiffies; +#endif + + while (test_and_set_bit(__GRTNIC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + grtnic_down(adapter); + grtnic_up(adapter); + clear_bit(__GRTNIC_RESETTING, &adapter->state); +} + +void grtnic_do_reset(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + grtnic_reinit_locked(adapter); + else + grtnic_reset(adapter); +} + +/** + * grtnic_close_suspend - actions necessary to both suspend and close flows + * @adapter: the private adapter struct + * + * This function should contain the necessary work common to both suspending + * and closing of the device. + */ +void grtnic_close_suspend(struct grtnic_adapter *adapter) +{ + grtnic_down(adapter); + grtnic_free_irq(adapter); + + grtnic_free_all_rx_resources(adapter); + grtnic_free_all_tx_resources(adapter); +} + + +int grtnic_close(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + + if (netif_device_present(netdev)) + grtnic_close_suspend(adapter); + + return 0; +} + +static int __grtnic_maybe_stop_tx(struct grtnic_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(grtnic_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int grtnic_maybe_stop_tx(struct grtnic_ring *tx_ring, u16 size) +{ + if (likely(grtnic_desc_unused(tx_ring) >= size)) + return 0; + + return __grtnic_maybe_stop_tx(tx_ring, size); +} + + +netdev_tx_t grtnic_xmit_frame_ring (struct sk_buff *skb, + struct grtnic_adapter __maybe_unused *adapter, + struct grtnic_ring *tx_ring) + +{ + struct grtnic_tx_buffer *first, *tx_buffer; + union grtnic_tx_desc *tx_desc; + unsigned int i, f; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int data_len, size; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + unsigned int csum_info = 0; + +//////////////////////////////////////////////////////// + + /* + * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f])); + + if (grtnic_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + i = tx_ring->next_to_use; + first = &tx_ring->tx_buffer_info[i]; + + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; +/////////////////////////////////////////////////////////////////// + tx_desc = GRTNIC_TX_DESC(*tx_ring, i); + + memset(&tx_desc->read.len_ctl, 0, sizeof(tx_desc->read.len_ctl)); + + if (skb->ip_summed == CHECKSUM_PARTIAL) + { + unsigned int csum_start = skb_checksum_start_offset(skb); + unsigned int csum_offset = skb->csum_offset; + + if (csum_start > 255 || csum_offset > 127) + { + if (skb_checksum_help(skb)) //soft calc csum + csum_info = 0; //disable hw csum + } + else + { + csum_info = (csum_offset << 8) | (csum_start); + } + } + else + { + csum_info = 0; + } + + tx_desc->read.len_ctl.sop = 1; + tx_desc->read.tx_info.csum_info = csum_info; + +////////////////////////////////////////////////////////////////////////////// + size = skb_headlen(skb); + data_len = skb->data_len; + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + tx_buffer->tx_flags = 0; + + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.src_addr = cpu_to_le64(dma); + + while (unlikely(size > GRTNIC_MAX_DATA_PER_TXD)) { + tx_desc->read.len_ctl.len = cpu_to_le32(GRTNIC_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = GRTNIC_TX_DESC(*tx_ring, 0); + i = 0; + } + memset(&tx_desc->read.len_ctl, 0, sizeof(tx_desc->read.len_ctl)); +// tx_desc->read.olinfo_status = 0; + + dma += GRTNIC_MAX_DATA_PER_TXD; + size -= GRTNIC_MAX_DATA_PER_TXD; + + tx_desc->read.src_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.len_ctl.len = cpu_to_le32(size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = GRTNIC_TX_DESC(*tx_ring, 0); + i = 0; + } + memset(&tx_desc->read.len_ctl, 0, sizeof(tx_desc->read.len_ctl)); +// tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + tx_desc->read.len_ctl.eop = 1; + tx_desc->read.len_ctl.irq = 1; + tx_desc->read.len_ctl.rs = 1; + tx_desc->read.len_ctl.len = cpu_to_le32(size); + + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + +#ifndef HAVE_TRANS_START_IN_QUEUE + netdev_ring(tx_ring)->trans_start = first->time_stamp; +#endif + + + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + grtnic_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); + +// printk("next_to_use = %d\n", i); + +#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB + + /* The following mmiowb() is required on certain + * architechtures (IA64/Altix in particular) in order to + * synchronize the I/O calls with respect to a spin lock. This + * is because the wmb() on those architectures does not + * guarantee anything for posted I/O writes. + * + * Note that the associated spin_unlock() is not within the + * driver code, but in the networking core stack. + */ + mmiowb(); +#endif /* SPIN_UNLOCK_IMPLIES_MMIOWB */ + } + + return 0; + +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + if (tx_buffer == first) + break; + if (i == 0) + i += tx_ring->count; + i--; + } + + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + tx_ring->next_to_use = i; + + return -1; +} + +static netdev_tx_t grtnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_ring *tx_ring; +#ifdef HAVE_TX_MQ + unsigned int r_idx = skb->queue_mapping; +#endif + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb_put_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + +#ifdef HAVE_TX_MQ + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + tx_ring = adapter->tx_ring[r_idx]; +#else + tx_ring = adapter->tx_ring[0]; +#endif + + return grtnic_xmit_frame_ring(skb, adapter, tx_ring); + +} + +static void grtnic_check_lsc(struct grtnic_adapter *adapter) +{ + adapter->lsc_int++; +// printk("lsc = %d\n", adapter->lsc_int); + adapter->flags |= GRTNIC_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + if (!test_bit(__GRTNIC_DOWN, &adapter->state)) + grtnic_service_event_schedule(adapter); +} + + +irqreturn_t grtnic_isr (int __always_unused irq, void *data) +{ + struct grtnic_adapter *adapter = data; + struct grtnic_hw *hw = &adapter->hw; + struct grtnic_q_vector *q_vector = adapter->q_vector[0]; + u32 irq_vector; + + /* read ICR disables interrupts using IAM */ + irq_vector = GRTNIC_READ_REG(hw, ((TARGET_IRQ<<12) + ADDR_INTR_VECTOR*4), 1); + + if (!(adapter->flags & GRTNIC_FLAG_MSI_CAPABLE)) //legacy int + { + if(!(irq_vector & (1<<31))) + return IRQ_NONE; /* Not our interrupt */ + } + + if (irq_vector & adapter->eims_other) //link status change + grtnic_check_lsc(adapter); + + else if (((irq_vector & 0x7FFFFFFF) & ~(adapter->eims_other)) == 0) + { + GRTNIC_WRITE_REG(hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMS*4), ~0, 1); //打开所有的中断 + goto exit_int; + } + + /* would disable interrupts here but EIAM disabled it */ + napi_schedule_irqoff(&q_vector->napi); + +exit_int: + return IRQ_HANDLED; +} + +irqreturn_t grtnic_msix_other(int __always_unused irq, void *data) +{ + struct grtnic_adapter *adapter = data; + struct grtnic_hw *hw = &adapter->hw; + + grtnic_check_lsc(adapter); + + /* re-enable the original interrupt state, no lsc, no queues */ + if (!test_bit(__GRTNIC_DOWN, &adapter->state)) + GRTNIC_WRITE_REG(hw, ((TARGET_IRQ<<12) + ADDR_INTR_IMS*4), adapter->eims_other, 1); //打开相应的中断,user_interrupt + + return IRQ_HANDLED; +} + +irqreturn_t grtnic_msix_ring(int __always_unused irq, void *data) +{ + struct grtnic_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +static int grtnic_mdio_read(struct net_device *netdev, int prtad, int devad, + u16 addr) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + u16 value; + + if (prtad != hw->phy_addr) + return -EINVAL; + + if(adapter->speed) //10G + { + grtnic_SetPhyAddr(netdev, prtad, devad, addr); //only for 10G phy + grtnic_PhyRead(netdev, prtad, devad, &value); + } + else + { + grtnic_PhyRead(netdev, prtad, addr, &value); + } + + return value; +} + +static int grtnic_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + + if (prtad != hw->phy_addr) + return -EINVAL; + + if(adapter->speed) //10G + { + grtnic_SetPhyAddr(netdev, prtad, devad, addr); //only for 10G phy + grtnic_PhyWrite(netdev, prtad, devad, value); + } + else + { + grtnic_PhyWrite(netdev, prtad, addr, value); + } + return 0; +} + +static int grtnic_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct mii_ioctl_data *mii = (struct mii_ioctl_data *) &ifr->ifr_data; + int prtad, devad, ret; + + prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5; + devad = (mii->phy_id & MDIO_PHY_ID_DEVAD); + + if (cmd == SIOCGMIIREG) { + ret = grtnic_mdio_read(netdev, prtad, devad, mii->reg_num); + if (ret < 0) + return ret; + mii->val_out = ret; + return 0; + } else { + return grtnic_mdio_write(netdev, prtad, devad, mii->reg_num, + mii->val_in); + } +} + +static int grtnic_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { +#ifdef ETHTOOL_OPS_COMPAT + case SIOCETHTOOL: + return ethtool_ioctl(ifr); +#endif + case SIOCGMIIREG: + case SIOCSMIIREG: + return grtnic_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////// +static int grtnic_set_mac(struct net_device *netdev, void *p) +{ +// struct xdmanet_port *xdmanet_port = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return (-EADDRNOTAVAIL); +// memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); + + grtnic_SetMacAddress(netdev, netdev->dev_addr); //added + + grtnic_SetMacPauseAddress(netdev, addr->sa_data); + + write_flash_macaddr(netdev); + + return 0; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +static int grtnic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); +//#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; +//#endif + +#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > GRTNIC_MAX_JUMBO_FRAME_SIZE)) + return -EINVAL; + +#endif + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + + adapter->max_frame_size = max_frame; + + grtnic_SetMaxFrameLen(netdev, max_frame); + + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + grtnic_reinit_locked(adapter); + + return 0; +} + + +static u32 hash_mc_addr(struct net_device *netdev, u8 *mc_addr) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + struct grtnic_mac_info *mac = &hw->mac; + + u32 hash_value, hash_mask; + u8 bit_shift = 0; + +// printk("add=%02x:%02x:%02x:%02x:%02x:%02x\n", mc_addr[5],mc_addr[4],mc_addr[3],mc_addr[2],mc_addr[1],mc_addr[0]); + + /* Register count multiplied by bits per register */ + hash_mask = (mac->mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ +/* switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + }*/ + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | (((u16)mc_addr[5]) << bit_shift))); + + return hash_value; +} + +void update_mc_addr_list(struct net_device *netdev, u8 *mc_addr_list, u32 mc_addr_count) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + struct grtnic_hw *hw = &adapter->hw; + struct grtnic_mac_info *mac = &hw->mac; + + u32 hash_value, hash_bit, hash_reg; + int i; + + mac->mta_reg_count = 128; + + /* clear mta_shadow */ + memset(&mac->mta_shadow, 0, sizeof(mac->mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32)i < mc_addr_count; i++) { + hash_value = hash_mc_addr(netdev, mc_addr_list); + + hash_reg = (hash_value >> 5) & (mac->mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + mac->mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ALEN); + } + + GRTNIC_WRITE_REG(hw, MAC_HASH_TABLE_START, 0, 0); + + /* replace the entire MTA table */ + for (i = 0; i< mac->mta_reg_count; i++) + GRTNIC_WRITE_REG(hw, MAC_HASH_TABLE_WR, mac->mta_shadow[i], 0); +} + + + +static int grtnic_write_mc_addr_list(struct net_device *netdev) +{ + +#ifdef NETDEV_HW_ADDR_T_MULTICAST + struct netdev_hw_addr *ha; +#else + struct dev_mc_list *ha; +#endif + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + update_mc_addr_list(netdev, NULL, 0); + return 0; + } + + mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* update_mc_addr_list expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) +#ifdef NETDEV_HW_ADDR_T_MULTICAST + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); +#else + memcpy(mta_list + (i++ * ETH_ALEN), ha->dmi_addr, ETH_ALEN); +#endif + + update_mc_addr_list(netdev, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// +static void grtnic_set_rx_mode(struct net_device *netdev) +{ + int count; + u32 rctl, multicast_mode, all_multicast_mode, promisc_mode; + + promisc_mode = 1; + all_multicast_mode = 2; + multicast_mode = 4; + + rctl = grtnic_GetAdrsFilter(netdev); + rctl &= 0x0000000f; + + /* clear the affected bits */ + rctl &= ~(multicast_mode | all_multicast_mode| promisc_mode); //muliticast & all multicast & promisc + + /* Check for Promiscuous and All Multicast modes */ + + if (netdev->flags & IFF_PROMISC) + { + rctl |= promisc_mode; //promisc + } + + else + { + if (netdev->flags & IFF_ALLMULTI) + { + rctl |= all_multicast_mode; + } + else if (!netdev_mc_empty(netdev)) + { + count = netdev_mc_count(netdev); + rctl |= multicast_mode; + count = grtnic_write_mc_addr_list(netdev); + if (count < 0) + rctl |= all_multicast_mode; + } + } + grtnic_SetAdrsFilter(netdev, rctl); +} + + + +/** + * grtnic_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ + +void grtnic_update_stats(struct grtnic_adapter *adapter) +{ +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *net_stats = &adapter->netdev->stats; +#else + struct net_device_stats *net_stats = &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ + struct grtnic_hw_stats *hwstats = &adapter->stats; + u32 temp_val; + + u32 i; + u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 alloc_rx_page = 0; + u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; + + if (test_bit(__GRTNIC_DOWN, &adapter->state) || + test_bit(__GRTNIC_RESETTING, &adapter->state)) + return; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct grtnic_ring *rx_ring = adapter->rx_ring[i]; + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + + } + adapter->non_eop_descs = non_eop_descs; + adapter->alloc_rx_page = alloc_rx_page; + adapter->alloc_rx_page_failed = alloc_rx_page_failed; + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + adapter->hw_csum_rx_error = hw_csum_rx_error; + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + /* gather some stats to the adapter struct that are per queue */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct grtnic_ring *tx_ring = adapter->tx_ring[i]; + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + } + adapter->restart_queue = restart_queue; + adapter->tx_busy = tx_busy; + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + temp_val = GRTNIC_READ_REG(&adapter->hw, MAC_RX_OVERFLOW_FRAME, 0); + if(temp_val < hwstats->mpc) + hwstats->mpc = 0x100000000 + temp_val; + else + hwstats->mpc = temp_val; + + net_stats->rx_missed_errors = hwstats->mpc; + + hwstats->ruc = grtnic_get_statistics_cnt(adapter, 0x210, hwstats->ruc); + hwstats->roc = grtnic_get_statistics_cnt(adapter, 0x250, hwstats->roc); + hwstats->rfc = grtnic_get_statistics_cnt(adapter, 0x218, hwstats->rfc); //crc error(<64) + hwstats->crcerrs = grtnic_get_statistics_cnt(adapter, 0x298, hwstats->crcerrs); //crc error(>=64) + hwstats->rlec = grtnic_get_statistics_cnt(adapter, 0x2B8, hwstats->rlec); + hwstats->badopcode = grtnic_get_statistics_cnt(adapter, 0x2D0, hwstats->badopcode); + hwstats->algnerrc = grtnic_get_statistics_cnt(adapter, 0x340, hwstats->algnerrc); + + net_stats->rx_errors = hwstats->rfc + + hwstats->crcerrs + + hwstats->algnerrc + + hwstats->ruc + + hwstats->roc + + hwstats->rlec + + hwstats->badopcode; + + net_stats->rx_length_errors = hwstats->ruc + hwstats->roc + hwstats->rlec; + net_stats->rx_crc_errors = hwstats->rfc + hwstats->crcerrs; + net_stats->rx_frame_errors = hwstats->algnerrc; + + hwstats->ecol = grtnic_get_statistics_cnt(adapter, 0x330, hwstats->ecol); + hwstats->latecol = grtnic_get_statistics_cnt(adapter, 0x328, hwstats->latecol); + hwstats->tx_underrun = grtnic_get_statistics_cnt(adapter, 0x2F0, hwstats->tx_underrun); + + net_stats->tx_errors = hwstats->ecol + hwstats->latecol + hwstats->tx_underrun; + net_stats->tx_aborted_errors = hwstats->ecol; + net_stats->tx_window_errors = hwstats->latecol; + net_stats->tx_carrier_errors = hwstats->tx_underrun; +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + hwstats->gprc = grtnic_get_statistics_cnt(adapter, 0x290, hwstats->gprc); + hwstats->gorc = grtnic_get_statistics_cnt(adapter, 0x200, hwstats->gorc); + hwstats->bprc = grtnic_get_statistics_cnt(adapter, 0x2A0, hwstats->bprc); + hwstats->mprc = grtnic_get_statistics_cnt(adapter, 0x2A8, hwstats->mprc); + + hwstats->prc64 = grtnic_get_statistics_cnt(adapter, 0x220, hwstats->prc64); + hwstats->prc127 = grtnic_get_statistics_cnt(adapter, 0x228, hwstats->prc127); + hwstats->prc255 = grtnic_get_statistics_cnt(adapter, 0x230, hwstats->prc255); + hwstats->prc511 = grtnic_get_statistics_cnt(adapter, 0x238, hwstats->prc511); + hwstats->prc1023 = grtnic_get_statistics_cnt(adapter, 0x240, hwstats->prc1023); + hwstats->prc1522 = grtnic_get_statistics_cnt(adapter, 0x248, hwstats->prc1522); + hwstats->prcoversize = grtnic_get_statistics_cnt(adapter, 0x250, hwstats->prcoversize); + + hwstats->scc = grtnic_get_statistics_cnt(adapter, 0x310, hwstats->scc); + hwstats->mcc = grtnic_get_statistics_cnt(adapter, 0x318, hwstats->mcc); + hwstats->dc = grtnic_get_statistics_cnt(adapter, 0x320, hwstats->dc); + hwstats->rxpause = grtnic_get_statistics_cnt(adapter, 0x2C8, hwstats->rxpause); + hwstats->txpause = grtnic_get_statistics_cnt(adapter, 0x308, hwstats->txpause); + + hwstats->gptc = grtnic_get_statistics_cnt(adapter, 0x2D8, hwstats->gptc); + hwstats->gotc = grtnic_get_statistics_cnt(adapter, 0x208, hwstats->gotc); + hwstats->bptc = grtnic_get_statistics_cnt(adapter, 0x2E0, hwstats->bptc); + hwstats->mptc = grtnic_get_statistics_cnt(adapter, 0x2E8, hwstats->mptc); + + hwstats->ptc64 = grtnic_get_statistics_cnt(adapter, 0x258, hwstats->ptc64); + hwstats->ptc127 = grtnic_get_statistics_cnt(adapter, 0x260, hwstats->ptc127); + hwstats->ptc255 = grtnic_get_statistics_cnt(adapter, 0x268, hwstats->ptc255); + hwstats->ptc511 = grtnic_get_statistics_cnt(adapter, 0x270, hwstats->ptc511); + hwstats->ptc1023 = grtnic_get_statistics_cnt(adapter, 0x278, hwstats->ptc1023); + hwstats->ptc1522 = grtnic_get_statistics_cnt(adapter, 0x280, hwstats->ptc1522); + hwstats->ptcoversize = grtnic_get_statistics_cnt(adapter, 0x288, hwstats->ptcoversize); +} + + +#ifdef HAVE_NDO_GET_STATS64 +static void grtnic_get_ring_stats64(struct rtnl_link_stats64 *stats, struct grtnic_ring *ring) +{ + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } +} + +/** + * grtnic_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: storage space for 64bit statistics + * + * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This + * function replaces ixgbe_get_stats for kernels which support it. + */ +#ifdef HAVE_VOID_NDO_GET_STATS64 +static void grtnic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 * +grtnic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +#endif +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct grtnic_ring *ring = READ_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct grtnic_ring *ring = READ_ONCE(adapter->tx_ring[i]); + + grtnic_get_ring_stats64(stats, ring); + } + + rcu_read_unlock(); + + /* following stats updated by grtnic_watchdog_task() */ +// stats->multicast = netdev->stats.multicast; +// stats->rx_errors = netdev->stats.rx_errors; +// stats->rx_length_errors = netdev->stats.rx_length_errors; +// stats->rx_crc_errors = netdev->stats.rx_crc_errors; +// stats->rx_missed_errors = netdev->stats.rx_missed_errors; +#ifndef HAVE_VOID_NDO_GET_STATS64 + + return stats; +#endif +} + +#else + +/** + * grtnic_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are updated here and also from the timer callback. + **/ +static struct net_device_stats *grtnic_get_stats(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + + /* update the stats data */ + grtnic_update_stats(adapter); + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +} +#endif //HAVE_NDO_GET_STATS64 + +///////////////////////////////////////////////////////////////////////////////////////////////// +#ifdef HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_USER_QUEUE + +#if defined(HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED) +static u16 grtnic_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +#elif defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) +static u16 grtnic_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused struct net_device *sb_dev, + select_queue_fallback_t fallback) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +static u16 grtnic_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused void *accel, + select_queue_fallback_t fallback) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL) +static u16 grtnic_select_queue(struct net_device *dev, struct sk_buff *skb, + __always_unused void *accel) +#else +static u16 grtnic_select_queue(struct net_device *dev, struct sk_buff *skb) +#endif /* HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK */ +{ + struct grtnic_adapter *adapter = netdev_priv(dev); + int card_type = adapter->ei->type; + struct sock *sk = skb->sk; + int queue_index = sk_tx_queue_get(sk); + int new_index = -1; + + if (queue_index < 0 || skb->ooo_okay || queue_index >= dev->real_num_tx_queues) + { + if (skb_rx_queue_recorded(skb)) + { + new_index = skb_get_rx_queue(skb); + while (unlikely(new_index >= dev->real_num_tx_queues)) + new_index -= dev->real_num_tx_queues; + + if (queue_index != new_index && sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) + sk_tx_queue_set(sk, new_index); + + return new_index; + } + } + +#if defined(HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED) + return netdev_pick_tx(dev, skb, sb_dev); +#elif defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) + return fallback(dev, skb, sb_dev); +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) + return fallback(dev, skb); +#else + return __netdev_pick_tx(dev, skb); +#endif +} +#endif /* CONFIG_USER_QUEUE */ +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +///////////////////////////////////////////////////////////////////////////////////////////////// + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void grtnic_netpoll(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + + /* if interface is down do nothing */ + if (test_bit(__GRTNIC_DOWN, &adapter->state)) + return; + + if (adapter->flags & GRTNIC_FLAG_MSIX_ENABLED) { + int i; + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->netpoll_rx = true; + grtnic_msix_ring(0, adapter->q_vector[i]); + adapter->q_vector[i]->netpoll_rx = false; + } + } else { + grtnic_isr(0, adapter); + } +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + +///////////////////////////////////////////////////////////////////////////////////////////////////// + +#ifdef HAVE_NET_DEVICE_OPS +static const struct net_device_ops grtnic_netdev_ops = { + .ndo_open = grtnic_open, + .ndo_stop = grtnic_close, + .ndo_start_xmit = grtnic_xmit_frame, + +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = grtnic_get_stats64, +#else + .ndo_get_stats = grtnic_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = grtnic_netpoll, +#endif +#ifndef HAVE_RHEL6_NET_DEVICE_EXTENDED +#ifdef HAVE_NDO_BUSY_POLL + .ndo_busy_poll = grtnic_busy_poll_recv, +#endif /* HAVE_NDO_BUSY_POLL */ +#endif /* !HAVE_RHEL6_NET_DEVICE_EXTENDED */ + +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + .extended.ndo_change_mtu = grtnic_change_mtu, +#else + .ndo_change_mtu = grtnic_change_mtu, +#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */ + +#ifdef HAVE_NDO_ETH_IOCTL + .ndo_eth_ioctl = grtnic_ioctl, +#else + .ndo_do_ioctl = grtnic_ioctl, +#endif /* HAVE_NDO_ETH_IOCTL */ + +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT +/* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the + * function get_ndo_ext to retrieve offsets for extended fields from with the + * net_device_ops struct and ndo_size is checked to determine whether or not + * the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif + + .ndo_set_rx_mode = grtnic_set_rx_mode, + .ndo_set_mac_address= grtnic_set_mac, +#ifdef HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_USER_QUEUE + .ndo_select_queue = grtnic_select_queue, +#else +#ifndef HAVE_MQPRIO + .ndo_select_queue = __netdev_pick_tx, +#endif +#endif /* CONFIG_USER_QUEUE */ +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + +// .ndo_tx_timeout = xdmanet_tx_timeout, +}; +#endif /* HAVE_NET_DEVICE_OPS */ + +///////////////////////////////////////////////////////////////////////////////////////////////// + +void grtnic_assign_netdev_ops(struct net_device *netdev) +{ +#ifdef HAVE_NET_DEVICE_OPS + netdev->netdev_ops = &grtnic_netdev_ops; +#else + netdev->open = &grtnic_open; + netdev->stop = &grtnic_close; + netdev->hard_start_xmit = &grtnic_xmit_frame; + netdev->get_stats = &grtnic_get_stats; +#ifdef HAVE_SET_RX_MODE + netdev->set_rx_mode = &grtnic_set_rx_mode; +#endif + netdev->set_multicast_list = &grtnic_set_rx_mode; + + netdev->set_mac_address = &grtnic_set_mac; + netdev->change_mtu = &grtnic_change_mtu; +// netdev->tx_timeout = &xdmanet_tx_timeout; + netdev->do_ioctl = &grtnic_ioctl; + +#ifdef CONFIG_NET_POLL_CONTROLLER + netdev->poll_controller = &grtnic_netpoll; +#endif + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_USER_QUEUE + netdev->select_queue = &grtnic_select_queue; +#else + netdev->select_queue = &__netdev_pick_tx; +#endif /*CONFIG_USER_QUEUE*/ +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /*HAVE_NET_DEVICE_OPS*/ + +#ifdef HAVE_RHEL6_NET_DEVICE_EXTENDED +#ifdef HAVE_NDO_BUSY_POLL + netdev_extended(netdev)->ndo_busy_poll = grtnic_busy_poll_recv; +#endif /* HAVE_NDO_BUSY_POLL */ +#endif /* HAVE_RHEL6_NET_DEVICE_EXTENDED */ + + grtnic_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; +} \ No newline at end of file diff --git a/drivers/net/ethernet/guangruntong/grtnic_nvm.c b/drivers/net/ethernet/guangruntong/grtnic_nvm.c new file mode 100755 index 0000000000000..035d1071b7a9f --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_nvm.c @@ -0,0 +1,125 @@ +#include "grtnic.h" +#include "grtnic_nvm.h" + +///////////////////////////////////////////////////////////////////////////////////////////// +int erase_sector_flash(struct grtnic_adapter *adapter, u32 offset) //erase 0x10000(64k) every time +{ + struct grtnic_hw *hw = &adapter->hw; + int status = 0; + + writel( (SPI_CMD_ADDR(offset) | SPI_CMD_CMD(SECTOR_ERASE_CMD)), hw->user_bar + SPI_CMD); + + status = po32m(hw->user_bar, SPI_STATUS, + SPI_STATUS_OPDONE, SPI_STATUS_OPDONE, + SPI_ERASE_TIMEOUT, 0); + + if (status) { + printk("FLASH erase timed out\n"); + } + + return status; +} + + +int erase_subsector_flash(struct grtnic_adapter *adapter, u32 offset) //erase 0x1000(4k) every time +{ + struct grtnic_hw *hw = &adapter->hw; + int status = 0; + + writel( (SPI_CMD_ADDR(offset) | SPI_CMD_CMD(SUBSECTOR_ERASE_CMD)), hw->user_bar + SPI_CMD); + + status = po32m(hw->user_bar, SPI_STATUS, SPI_STATUS_OPDONE, SPI_STATUS_OPDONE, SPI_ERASE_TIMEOUT, 0); + if (status) { + printk("FLASH erase timed out\n"); + } + + return status; +} + +/** + * ngbe_read_flash_buffer - Read FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to read + * @dwords: number of dwords + * @data: dword(s) read from the EEPROM + * + * Retrieves 32 bit dword(s) read from EEPROM + **/ +int write_flash_buffer(struct grtnic_adapter *adapter, u32 offset, u32 dwords, u32 *data) +{ + struct grtnic_hw *hw = &adapter->hw; + int status = 0; + u32 i; + + for (i = 0; i < dwords; i++) { + writel(be32(data[i]), hw->user_bar + SPI_DATA); + writel( (SPI_CMD_ADDR(offset + (i << 2)) | SPI_CMD_CMD(PAGE_PROG_CMD)), hw->user_bar + SPI_CMD); + + status = po32m(hw->user_bar, SPI_STATUS, + SPI_STATUS_OPDONE, SPI_STATUS_OPDONE, + SPI_TIMEOUT, 0); + if (status) { + printk("FLASH write timed out\n"); + break; + } + } + + return status; +} + +/** + * ngbe_write_flash_buffer - Write FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to write + * @dwords: number of dwords + * @data: dword(s) write from to EEPROM + * + **/ +int read_flash_buffer(struct grtnic_adapter *adapter, u32 offset, u32 dwords, u32 *data) +{ + struct grtnic_hw *hw = &adapter->hw; + int status = 0; + u32 i; + + for (i = 0; i < dwords; i++) { + writel( (SPI_CMD_ADDR(offset + (i << 2)) | SPI_CMD_CMD(PAGE_READ_CMD)), hw->user_bar + SPI_CMD); + + status = po32m(hw->user_bar, SPI_STATUS, + SPI_DATA_OP_DONE, SPI_DATA_OP_DONE, + SPI_TIMEOUT, 0); + if (status != 0) { + printk("FLASH read timed out\n"); + break; + } + + data[i] = readl(hw->user_bar + SPI_DATA); + } + + return status; +} + +void write_flash_macaddr(struct net_device *netdev) +{ + struct grtnic_adapter *adapter = netdev_priv(netdev); + u32 *temp; + + int firmware_offset = adapter->speed; + int port = adapter->func; + u32 offset = VPD_OFFSET - (firmware_offset * 0x100000); + + temp = vmalloc(FLASH_SUBSECTOR_SIZE); + memset(temp, 0x00, FLASH_SUBSECTOR_SIZE); + + read_flash_buffer(adapter, offset, FLASH_SUBSECTOR_SIZE>>2, temp); //subsector is 4K + erase_subsector_flash(adapter, offset); + + temp[(MAC_ADDR_OFFSET>>2) + port*2] = (netdev->dev_addr[2] << 24 | netdev->dev_addr[3] << 16 | netdev->dev_addr[4] << 8 | netdev->dev_addr[5]); + temp[(MAC_ADDR_OFFSET>>2) + port*2+1] = (netdev->dev_addr[0] << 8 | netdev->dev_addr[1]); + + write_flash_buffer(adapter, offset, FLASH_SUBSECTOR_SIZE>>2, temp); + vfree(temp); +} diff --git a/drivers/net/ethernet/guangruntong/grtnic_nvm.h b/drivers/net/ethernet/guangruntong/grtnic_nvm.h new file mode 100755 index 0000000000000..5fb68361aecb6 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_nvm.h @@ -0,0 +1,72 @@ +#ifndef GRTNIC_NVM_H +#define GRTNIC_NVM_H + +#define be32(x) ((x<<24 & 0xff000000) | (x<<8 & 0x00ff0000) | (x>>8 & 0x0000ff00) | (x>>24 & 0x000000ff)) +#define be16(x) ((x<<8 & 0xff00) | (x>>8 & 0x00ff)) + +////////////////////////////////////////////////// +#define PAGE_READ_CMD 0x00 +#define SECTOR_ERASE_CMD 0x01 +#define SUBSECTOR_ERASE_CMD 0x02 +#define PAGE_PROG_CMD 0x03 + + +#define MAX_FLASH_LOAD_POLL_TIME 10 + +#define SPI_CMD 0x0400 +#define SPI_CMD_CMD(_v) (((_v) & 0x7) << 28) +#define SPI_CMD_ADDR(_v) (((_v) & 0xFFFFFF)) + +#define SPI_DATA 0x0404 + +#define SPI_STATUS 0x0408 +#define SPI_STATUS_OPDONE ((0x1)) +#define SPI_DATA_OP_DONE ((0x2)) + +#define SPI_ERASE_TIMEOUT 2000000 +#define SPI_TIMEOUT 20000 + +//////////////////////////////////////////////////////////////// +#define VPD_OFFSET 0xEFF000 +#define MAC_ADDR_OFFSET 0x100 +#define VERSION_OFFSET 0x200 + +#define PXE_OFFSET 0xE00000 + +#define FLASH_SECTOR_SIZE 0x10000 //64k +#define FLASH_SUBSECTOR_SIZE 0x1000 //4k +//////////////////////////////////////////////////////////////// + +static inline int po32m(uint8_t* hw, u32 reg, u32 mask, u32 field, int usecs, int count) +{ + int loop; + + loop = (count ? count : (usecs + 9) / 10); + usecs = (loop ? (usecs + loop - 1) / loop : 0); + + count = loop; +// printf("loop = %d, usecs = %d\n", loop, usecs); + do { + u32 value = readl(hw + reg); + + if ((value & mask) == (field & mask)) { + break; + } + + if (loop-- <= 0) + break; + +// udelay(20); + usleep_range(20,20); + } while (1); + + return (count - loop <= count ? 0 : 1); +} + +int erase_sector_flash(struct grtnic_adapter *adapter, u32 offset); +int erase_subsector_flash(struct grtnic_adapter *adapter, u32 offset); +int write_flash_buffer(struct grtnic_adapter *adapter, u32 offset, u32 dwords, u32 *data); +int read_flash_buffer(struct grtnic_adapter *adapter, u32 offset, u32 dwords, u32 *data); +void write_flash_macaddr(struct net_device *netdev); + +#endif /* GRTNIC_NVM_H */ diff --git a/drivers/net/ethernet/guangruntong/grtnic_param.c b/drivers/net/ethernet/guangruntong/grtnic_param.c new file mode 100755 index 0000000000000..7a16cf48137cd --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_param.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2021 Intel Corporation. */ + +#include +#include + +#include "grtnic.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define GRTNIC_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define STRINGIFY(foo) #foo /* magic for getting defines into strings */ +#define XSTRINGIFY(bar) STRINGIFY(bar) + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define GRTNIC_PARAM_INIT { [0 ... GRTNIC_MAX_NIC] = OPTION_UNSET } +#ifndef module_param_array +/* Module Parameters are always initialized to -1, so that the driver + * can tell the difference between no user specified value or the + * user asking for the default value. + * The true default values are loaded in when ixgbe_check_options is called. + * + * This is a GCC extension to ANSI C. + * See the item "Labelled Elements in Initializers" in the section + * "Extensions to the C Language Family" of the GCC documentation. + */ + +#define GRTNIC_PARAM(X, desc) \ + static const int __devinitdata X[GRTNIC_MAX_NIC+1] = GRTNIC_PARAM_INIT; \ + MODULE_PARM(X, "1-" __MODULE_STRING(GRTNIC_MAX_NIC) "i"); \ + MODULE_PARM_DESC(X, desc); +#else +#define GRTNIC_PARAM(X, desc) \ + static int X[GRTNIC_MAX_NIC+1] = GRTNIC_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); +#endif //module_param_array + +/* IntMode (Interrupt Mode) + * + * Valid Range: 0-2 + * - 0 - Legacy Interrupt + * - 1 - MSI Interrupt + * - 2 - MSI-X Interrupt(s) + * + * Default Value: 2 + */ +GRTNIC_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default 2"); +#define GRTNIC_INT_LEGACY 0 +#define GRTNIC_INT_MSI 1 +#define GRTNIC_INT_MSIX 2 + +GRTNIC_PARAM(InterruptType, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default IntMode (deprecated)"); + + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 956-488281 (0=off, 1=dynamic) + * + * Default Value: 1 + */ +#define DEFAULT_ITR 1 +GRTNIC_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, " + "(0,1,956-488281), default 1"); +#define MAX_ITR 488281 +#define MIN_ITR 956 + + +GRTNIC_PARAM(csum_tx_mode, "Disable or enable tx hecksum offload, default 1"); +GRTNIC_PARAM(csum_rx_mode, "Disable or enable rx hecksum offload, default 1"); + + + +struct grtnic_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + const char *msg; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct grtnic_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int grtnic_validate_option(struct net_device *netdev, + unsigned int *value, + struct grtnic_option *opt) +{ + if (*value == OPTION_UNSET) { + netdev_info(netdev, "Invalid %s specified (%d), %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + netdev_info(netdev, "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + netdev_info(netdev, "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if ((*value >= opt->arg.r.min && *value <= opt->arg.r.max) || + *value == opt->def) { + if (opt->msg) + netdev_info(netdev, "%s set to %d, %s\n", + opt->name, *value, opt->msg); + else + netdev_info(netdev, "%s set to %d\n", + opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + + for (i = 0; i < opt->arg.l.nr; i++) { + const struct grtnic_opt_list *ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + netdev_info(netdev, "%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + netdev_info(netdev, "Invalid %s specified (%d), %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +#define LIST_LEN(l) (sizeof(l) / sizeof(l[0])) +#define PSTR_LEN 10 + +/** + * grtnic_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void grtnic_check_options(struct grtnic_adapter *adapter) +{ + int bd = adapter->bd_number; + u32 *aflags = &adapter->flags; + + if (bd >= GRTNIC_MAX_NIC) { + netdev_notice(adapter->netdev, + "Warning: no configuration for board #%d\n", bd); + netdev_notice(adapter->netdev, + "Using defaults for all values\n"); +#ifndef module_param_array + bd = GRTNIC_MAX_NIC; +#endif + } + + + { /* Interrupt Mode */ + unsigned int int_mode; + static struct grtnic_option opt = { + .type = range_option, + .name = "Interrupt Mode", + .err = + "using default of " __MODULE_STRING(GRTNIC_INT_MSIX), + .def = GRTNIC_INT_MSIX, + .arg = { .r = { .min = GRTNIC_INT_LEGACY, + .max = GRTNIC_INT_MSIX} } + }; + +#ifdef module_param_array + if (num_IntMode > bd || num_InterruptType > bd) { +#endif + int_mode = IntMode[bd]; + if (int_mode == OPTION_UNSET) + int_mode = InterruptType[bd]; + grtnic_validate_option(adapter->netdev, + &int_mode, &opt); + switch (int_mode) { + case GRTNIC_INT_MSIX: + if (!(*aflags & GRTNIC_FLAG_MSIX_CAPABLE)) + netdev_info(adapter->netdev, + "Ignoring MSI-X setting; " + "support unavailable\n"); + break; + case GRTNIC_INT_MSI: + if (!(*aflags & GRTNIC_FLAG_MSI_CAPABLE)) { + netdev_info(adapter->netdev, + "Ignoring MSI setting; " + "support unavailable\n"); + } else { + *aflags &= ~GRTNIC_FLAG_MSIX_CAPABLE; + } + break; + case GRTNIC_INT_LEGACY: + default: + *aflags &= ~GRTNIC_FLAG_MSIX_CAPABLE; + *aflags &= ~GRTNIC_FLAG_MSI_CAPABLE; + break; + } +#ifdef module_param_array + } else { + /* default settings */ + if (*aflags & GRTNIC_FLAG_MSIX_CAPABLE) { + *aflags |= GRTNIC_FLAG_MSI_CAPABLE; + } else { + *aflags &= ~GRTNIC_FLAG_MSIX_CAPABLE; + *aflags &= ~GRTNIC_FLAG_MSI_CAPABLE; + } + } +#endif + } + + { /* Interrupt Throttling Rate */ + static struct grtnic_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of "__MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + +#ifdef module_param_array + if (num_InterruptThrottleRate > bd) { +#endif + u32 itr = InterruptThrottleRate[bd]; + switch (itr) { + case 0: + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + adapter->rx_itr_setting = 0; + break; + case 1: + DPRINTK(PROBE, INFO, "dynamic interrupt " + "throttling enabled\n"); + adapter->rx_itr_setting = 1; + break; + default: + grtnic_validate_option(adapter->netdev, + &itr, &opt); + /* the first bit is used as control */ + adapter->rx_itr_setting = (1000000/itr) << 2; + break; + } + adapter->tx_itr_setting = adapter->rx_itr_setting; +#ifdef module_param_array + } else { + adapter->rx_itr_setting = opt.def; + adapter->tx_itr_setting = opt.def; + } +#endif + } + + { /* Tx Checksum Support */ + static struct grtnic_option opt = { + .type = enable_option, + .name = "Tx checksum Enable", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + +#ifdef module_param_array + if (num_csum_tx_mode > bd) { +#endif + unsigned int csum_tx = csum_tx_mode[bd]; + grtnic_validate_option(adapter->netdev, &csum_tx, &opt); + if (csum_tx) + *aflags |= GRTNIC_FLAG_TXCSUM_CAPABLE; + else + *aflags &= ~GRTNIC_FLAG_TXCSUM_CAPABLE; +#ifdef module_param_array + } else { + *aflags |= GRTNIC_FLAG_TXCSUM_CAPABLE; + } +#endif + } + + { /* Rx Checksum Support */ + static struct grtnic_option opt = { + .type = enable_option, + .name = "Rx checksum Enable", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + +#ifdef module_param_array + if (num_csum_rx_mode > bd) { +#endif + unsigned int csum_rx = csum_rx_mode[bd]; + grtnic_validate_option(adapter->netdev, &csum_rx, &opt); + if (csum_rx) + *aflags |= GRTNIC_FLAG_RXCSUM_CAPABLE; + else + *aflags &= ~GRTNIC_FLAG_RXCSUM_CAPABLE; +#ifdef module_param_array + } else { + *aflags |= GRTNIC_FLAG_RXCSUM_CAPABLE; + } +#endif + } + + +} diff --git a/drivers/net/ethernet/guangruntong/grtnic_proc.c b/drivers/net/ethernet/guangruntong/grtnic_proc.c new file mode 100755 index 0000000000000..ae0e2f43fe986 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/grtnic_proc.c @@ -0,0 +1,233 @@ +#include +//#include +#include "grtnic.h" +#ifdef GRTNIC_PROCFS + +struct proc_dir_entry *grtnic_top_dir = NULL; + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0) ) +//ssize_t update_firmware(struct file *file, const char __user *buffer, size_t count, loff_t *pos); + +//static int grtnic_driver_generic_read(struct seq_file *m, void *v) +//{ +// return 0; +//} + +//static int grtnic_driver_generic_open(struct inode *inode, struct file *file) +//{ +// return single_open(file, NULL, PDE_DATA(inode)); +//} + +//////////////////////////////////////////////////////////////////////////////////////////////// +static int grtnic_driver_pktcnt_read(struct seq_file *m, void *v) +{ + struct grtnic_adapter *adapter = (struct grtnic_adapter *)m->private; + + seq_printf(m, "tx_count0 = %u, tx_count1 = %u, rx_count = %u\n", adapter->tx_count0, adapter->tx_count1, adapter->rx_count); + return 0; +} + +static int grtnic_driver_pktcnt_open(struct inode *inode, struct file *file) +{ + return single_open(file, grtnic_driver_pktcnt_read, PDE_DATA(inode)); +} +//////////////////////////////////////////////////////////////////////////////////////////////// +static int grtnic_hardware_pktcnt_read(struct seq_file *m, void *v) +{ + u32 h2c_cnt, c2h_cnt, rx_cnt; + struct grtnic_adapter *adapter = (struct grtnic_adapter *)m->private; + struct grtnic_hw *hw = &adapter->hw; + + h2c_cnt = readl(hw->user_bar + 0x604); + c2h_cnt = readl(hw->user_bar + 0x608); + rx_cnt = readl(hw->user_bar + 0x60C); + + seq_printf(m, "h2c_count = %u, c2h_count = %u, rx_count = %u\n", h2c_cnt, c2h_cnt, rx_cnt); + return 0; +} + +static int grtnic_hardware_pktcnt_open(struct inode *inode, struct file *file) +{ + return single_open(file, grtnic_hardware_pktcnt_read, PDE_DATA(inode)); +} +//////////////////////////////////////////////////////////////////////////////////////////////// + +static int grtnic_hardware_error(struct seq_file *m, void *v) +{ + u32 var; + int read_count_error, sgtxfifo_error, sgrxfifo_error, mainfifo_error; + struct grtnic_adapter *adapter = (struct grtnic_adapter *)m->private; + struct grtnic_hw *hw = &adapter->hw; + + var = readl(hw->user_bar + 0x600); + + read_count_error = (var >> 31) & 0x01; + sgtxfifo_error = (var >> 16) & 0xff; + sgrxfifo_error = (var >> 8) & 0xff; + mainfifo_error = (var >> 0) & 0xff; + seq_printf(m, "read_count_error = %d, sgtxfifo_error = %d, sgrxfifo_error = %d, mainfifo_error = %d\n", read_count_error, sgtxfifo_error, sgrxfifo_error, mainfifo_error); + return 0; +} + +static int grtnic_hardware_error_open(struct inode *inode, struct file *file) +{ + return single_open(file, grtnic_hardware_error, PDE_DATA(inode)); +} + +//////////////////////////////////////////////////////////////////////////////////////////////// +struct grtnic_proc_type { + char *name; + int (*open)(struct inode *inode, struct file *file); + int (*read)(struct seq_file *m, void *v); + ssize_t (*write)(struct file *file, const char __user *buffer, size_t count, loff_t *pos); +}; + +struct grtnic_proc_type grtnic_proc_entries[] = { + {"pktcnt", &grtnic_driver_pktcnt_open, NULL, NULL}, + {"fpktcnt", &grtnic_hardware_pktcnt_open, NULL, NULL}, + {"fharderr", &grtnic_hardware_error_open, NULL, NULL}, +// {"update", &grtnic_driver_generic_open, NULL, &update_firmware}, + {NULL, NULL, NULL, NULL} +}; + +//////////////////////////////////////////////////////////////////////////////////////////////// + + +#else //LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0) +//int update_firmware(struct file *file, const char *buffer, unsigned long count, void *data); + +static int grtnic_driver_pktcnt_read(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct grtnic_adapter *adapter = (struct grtnic_adapter *)data; + + return snprintf(page, count, "tx_count0 = %u, tx_count1 = %u, rx_count = %d\n", adapter->tx_count0, adapter->tx_count1, adapter->rx_count); +} + +static int grtnic_hardware_pktcnt_read(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + u32 h2c_cnt, c2h_cnt, rx_cnt; + struct grtnic_adapter *adapter = (struct grtnic_adapter *)data; + struct grtnic_hw *hw = &adapter->hw; + + h2c_cnt = readl(hw->user_bar + 0x604); + c2h_cnt = readl(hw->user_bar + 0x608); + rx_cnt = readl(hw->user_bar + 0x60C); + + return snprintf(page, count, "h2c_count = %u, c2h_count = %u, rx_count = %u\n", h2c_cnt, c2h_cnt, rx_cnt); +} + +static int grtnic_hardware_error(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + u32 var; + int read_count_error, sgtxfifo_error, sgrxfifo_error, mainfifo_error; + struct grtnic_adapter *adapter = (struct grtnic_adapter *)data; + struct grtnic_hw *hw = &adapter->hw; + + var = readl(hw->user_bar + 0x600); + + read_count_error = (var >> 31) & 0x01; + sgtxfifo_error = (var >> 16) & 0xff; + sgrxfifo_error = (var >> 8) & 0xff; + mainfifo_error = (var >> 0) & 0xff; + + return snprintf(page, count, "read_count_error = %d, sgtxfifo_error = %d, sgrxfifo_error = %d, mainfifo_error = %d\n", read_count_error, sgtxfifo_error, sgrxfifo_error, mainfifo_error); +} + +struct grtnic_proc_type { + char *name; + int (*read)(char *page, char **start, off_t off, int count, int *eof, void *data); + int (*write)(struct file *file, const char *buffer, unsigned long count, void *data); +}; + +struct grtnic_proc_type grtnic_proc_entries[] = { + {"pktcnt", &grtnic_driver_pktcnt_read, NULL}, + {"fpktcnt", &grtnic_hardware_pktcnt_read, NULL}, + {"fharderr", &grtnic_hardware_error, NULL}, +// {"update", NULL, &update_firmware}, + {NULL, NULL, NULL} +}; + +#endif + + +int grtnic_procfs_topdir_init() +{ + grtnic_top_dir = proc_mkdir("driver/grtnic", NULL); + if (grtnic_top_dir == NULL) + return -ENOMEM; + + return 0; +} + +void grtnic_procfs_topdir_exit() +{ + remove_proc_entry("driver/grtnic", NULL); +} + + +int grtnic_procfs_init(struct grtnic_adapter *adapter) +{ + int index; +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0) ) + struct file_operations *fops; +#else + struct proc_dir_entry *p; + mode_t mode = 0; +#endif + + adapter->proc_dir = proc_mkdir(pci_name(adapter->pdev), grtnic_top_dir); + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0) ) + for (index = 0; grtnic_proc_entries[index].name; index++) { + fops = kmalloc(sizeof(struct file_operations), GFP_KERNEL); + fops->open = grtnic_proc_entries[index].open; + fops->read = seq_read; + fops->write = grtnic_proc_entries[index].write; + fops->llseek = seq_lseek; + fops->release = single_release; + proc_create_data(grtnic_proc_entries[index].name, 0644, adapter->proc_dir, fops, adapter); + } + +#else + for (index = 0; grtnic_proc_entries[index].name; index++) { + if (grtnic_proc_entries[index].read) + mode = S_IFREG | S_IRUGO; + if (grtnic_proc_entries[index].write) + mode |= S_IFREG | S_IWUSR; + + p = create_proc_entry(grtnic_proc_entries[index].name, mode, adapter->proc_dir); + p->read_proc = grtnic_proc_entries[index].read; + p->write_proc = grtnic_proc_entries[index].write; + p->data = adapter; + } +#endif + + return 0; +} + + +void grtnic_del_proc_entries(struct grtnic_adapter *adapter) +{ + int index; + + if (grtnic_top_dir == NULL) + return; + + for (index = 0; ; index++) + { + if(grtnic_proc_entries[index].name == NULL) + break; + remove_proc_entry(grtnic_proc_entries[index].name, adapter->proc_dir); + } + + if (adapter->proc_dir != NULL) + remove_proc_entry(pci_name(adapter->pdev), grtnic_top_dir); +} + + +void grtnic_procfs_exit(struct grtnic_adapter *adapter) +{ + grtnic_del_proc_entries(adapter); +} + +#endif //GRTNIC_PROCFS diff --git a/drivers/net/ethernet/guangruntong/kcompat-generator.sh b/drivers/net/ethernet/guangruntong/kcompat-generator.sh new file mode 100755 index 0000000000000..3bd5ff8ae7c34 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat-generator.sh @@ -0,0 +1,438 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 1999 - 2024 Intel Corporation + +set -Eeuo pipefail + +# This file generates HAVE_ and NEED_ defines for current kernel +# (or KSRC if provided). +# +# It does so by 'gen' function calls (see body of 'gen-devlink' for examples). +# 'gen' could look for various kinds of declarations in provided kernel headers, +# eg look for an enum in one of files specified and check if given enumeration +# (single value) is present. See 'Documentation' or comment above the 'gen' fun +# in the kcompat-lib.sh. + +# Why using bash/awk instead of an old/legacy approach? +# +# The aim is to replicate all the defines provided by human developers +# in the past. Additional bonus is the fact, that we no longer need to care +# about backports done by OS vendors (RHEL, SLES, ORACLE, UBUNTU, more to come). +# We will even work (compile) with only part of backports provided. +# +# To enable smooth transition, especially in time of late fixes, "old" method +# of providing flags should still work as usual. + +# End of intro. +# Find info about coding style/rules at the end of file. +# Most of the implementation is in kcompat-lib.sh, here are actual 'gen' calls. + +export LC_ALL=C +SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")" +ORIG_CWD="$(pwd)" +trap 'rc=$?; echo >&2 "$(realpath "$ORIG_CWD/${BASH_SOURCE[0]}"):$LINENO: failed with rc: $rc"' ERR + +# shellcheck source=kcompat-lib.sh +source "$SCRIPT_DIR"/kcompat-lib.sh + +ARCH=$(uname -m) +IS_ARM= +if [ "$ARCH" == aarch64 ]; then + IS_ARM=1 +fi + +# DO NOT break gen calls below (via \), to make our compat code more grep-able, +# keep them also grouped, first by feature (like DEVLINK), then by .h filename +# finally, keep them sorted within a group (sort by flag name) + +# handy line of DOC copy-pasted form kcompat-lib.sh: +# gen DEFINE if (KIND [METHOD of]) NAME [(matches|lacks) PATTERN|absent] in + +function gen-aux() { + ah='include/linux/auxiliary_bus.h' + mh='include/linux/mod_devicetable.h' + if config_has CONFIG_AUXILIARY_BUS; then + gen HAVE_AUXILIARY_DRIVER_INT_REMOVE if method remove of auxiliary_driver matches 'int' in "$ah" + fi + + # generate HAVE_AUXILIARY_DEVICE_ID only for cases when it's disabled in .config + if ! config_has CONFIG_AUXILIARY_BUS; then + gen HAVE_AUXILIARY_DEVICE_ID if struct auxiliary_device_id in "$mh" + fi +} + +function gen-bitfield() { + bf='include/linux/bitfield.h' + gen HAVE_INCLUDE_BITFIELD if macro FIELD_PREP in "$bf" + gen NEED_BITFIELD_FIELD_FIT if macro FIELD_FIT absent in "$bf" + gen NEED_BITFIELD_FIELD_MASK if fun field_mask absent in "$bf" + gen NEED_BITFIELD_FIELD_MAX if macro FIELD_MAX absent in "$bf" +} + +function gen-device() { + dh='include/linux/device.h' + dph='include/linux/dev_printk.h' + gen NEED_BUS_FIND_DEVICE_CONST_DATA if fun bus_find_device lacks 'const void \\*data' in "$dh" + gen NEED_DEV_LEVEL_ONCE if macro dev_level_once absent in "$dh" "$dph" + gen NEED_DEVM_KASPRINTF if fun devm_kasprintf absent in "$dh" + gen NEED_DEVM_KFREE if fun devm_kfree absent in "$dh" + gen NEED_DEVM_KVASPRINTF if fun devm_kvasprintf absent in "$dh" + gen NEED_DEVM_KZALLOC if fun devm_kzalloc absent in "$dh" +} + +function gen-devlink() { + dh='include/net/devlink.h' + gen HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY if fun devlink_flash_update_begin_notify in "$dh" + gen HAVE_DEVLINK_FLASH_UPDATE_PARAMS if struct devlink_flash_update_params in "$dh" + gen HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW if struct devlink_flash_update_params matches 'struct firmware \\*fw' in "$dh" + gen HAVE_DEVLINK_HEALTH if enum devlink_health_reporter_state in "$dh" + gen HAVE_DEVLINK_HEALTH_OPS_EXTACK if method dump of devlink_health_reporter_ops matches extack in "$dh" + gen HAVE_DEVLINK_INFO_DRIVER_NAME_PUT if fun devlink_info_driver_name_put in "$dh" + gen HAVE_DEVLINK_PARAMS if method validate of devlink_param matches extack in "$dh" + gen HAVE_DEVLINK_PARAMS_PUBLISH if fun devlink_params_publish in "$dh" + gen HAVE_DEVLINK_PORT_NEW if method port_new of devlink_ops in "$dh" + gen HAVE_DEVLINK_PORT_OPS if struct devlink_port_ops in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT if method port_split of devlink_ops in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT if method port_split of devlink_port_ops in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT_EXTACK if method port_split of devlink_ops matches extack in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT_EXTACK if method port_split of devlink_port_ops matches extack in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT_IN_OPS if method port_split of devlink_ops in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT_IN_PORT_OPS if method port_split of devlink_port_ops in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT_PORT_STRUCT if method port_split of devlink_ops matches devlink_port in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT_PORT_STRUCT if method port_split of devlink_port_ops matches devlink_port in "$dh" + gen HAVE_DEVLINK_PORT_TYPE_ETH_HAS_NETDEV if fun devlink_port_type_eth_set matches 'struct net_device' in "$dh" + gen HAVE_DEVLINK_RATE_NODE_CREATE if fun devl_rate_node_create in "$dh" + # keep devlink_region_ops body in variable, to not look 4 times for + # exactly the same thing in big file + # please consider it as an example of "how to speed up if needed" + REGION_OPS="$(find-struct-decl devlink_region_ops "$dh")" + gen HAVE_DEVLINK_REGIONS if struct devlink_region_ops in - <<< "$REGION_OPS" + gen HAVE_DEVLINK_REGION_OPS_SNAPSHOT if fun snapshot in - <<< "$REGION_OPS" + gen HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS if fun snapshot matches devlink_region_ops in - <<< "$REGION_OPS" + gen HAVE_DEVLINK_REGISTER_SETS_DEV if fun devlink_register matches 'struct device' in "$dh" + gen HAVE_DEVLINK_RELOAD_ENABLE_DISABLE if fun devlink_reload_enable in "$dh" + gen HAVE_DEVLINK_SET_FEATURES if fun devlink_set_features in "$dh" + gen HAVE_DEVL_HEALTH_REPORTER_DESTROY if fun devl_health_reporter_destroy in "$dh" + gen HAVE_DEVL_PORT_REGISTER if fun devl_port_register in "$dh" + gen NEED_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER if fun devlink_health_reporter_create matches auto_recover in "$dh" + gen NEED_DEVLINK_RESOURCES_UNREGISTER_NO_RESOURCE if fun devlink_resources_unregister matches 'struct devlink_resource \\*' in "$dh" + gen NEED_DEVLINK_TO_DEV if fun devlink_to_dev absent in "$dh" + gen NEED_DEVLINK_UNLOCKED_RESOURCE if fun devl_resource_size_get absent in "$dh" + + gen HAVE_DEVLINK_PORT_FLAVOUR_PCI_SF if enum devlink_port_flavour matches DEVLINK_PORT_FLAVOUR_PCI_SF in include/uapi/linux/devlink.h + gen HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT if enum devlink_reload_action matches DEVLINK_RELOAD_ACTION_FW_ACTIVATE in include/uapi/linux/devlink.h +} + +function gen-dma() { + dma='include/linux/dma-mapping.h' + gen NEED_DMA_ATTRS_PTR if struct dma_attrs in include/linux/dma-attrs.h + gen NEED_DMA_ATTRS if fun dma_map_page_attrs absent in "$dma" +} + +function gen-dpll() { + dh='include/linux/dpll.h' + gen HAVE_DPLL_LOCK_STATUS_ERROR if method lock_status_get of dpll_device_ops matches status_error in "$dh" + gen HAVE_DPLL_PHASE_OFFSET if method phase_offset_get of dpll_pin_ops in "$dh" + gen NEED_DPLL_NETDEV_PIN_SET if fun dpll_netdev_pin_set absent in "$dh" +} + +function gen-ethtool() { + eth='include/linux/ethtool.h' + ueth='include/uapi/linux/ethtool.h' + gen HAVE_ETHTOOL_COALESCE_EXTACK if method get_coalesce of ethtool_ops matches 'struct kernel_ethtool_coalesce \\*' in "$eth" + gen HAVE_ETHTOOL_EXTENDED_RINGPARAMS if method get_ringparam of ethtool_ops matches 'struct kernel_ethtool_ringparam \\*' in "$eth" + gen HAVE_ETHTOOL_KEEE if struct ethtool_keee in "$eth" + gen HAVE_ETHTOOL_RXFH_PARAM if struct ethtool_rxfh_param in "$eth" + gen NEED_ETHTOOL_SPRINTF if fun ethtool_sprintf absent in "$eth" + gen HAVE_ETHTOOL_FLOW_RSS if macro FLOW_RSS in "$ueth" +} + +function gen-filter() { + fh='include/linux/filter.h' + gen NEED_NO_NETDEV_PROG_XDP_WARN_ACTION if fun bpf_warn_invalid_xdp_action lacks 'struct net_device \\*' in "$fh" + gen NEED_XDP_DO_FLUSH if fun xdp_do_flush absent in "$fh" +} + +function gen-flow-dissector() { + gen HAVE_FLOW_DISSECTOR_KEY_PPPOE if enum flow_dissector_key_id matches FLOW_DISSECTOR_KEY_PPPOE in include/net/flow_dissector.h include/net/flow_keys.h + # following HAVE ... CVLAN flag is mistakenly named after an enum key, + # but guards code around function call that was introduced later + gen HAVE_FLOW_DISSECTOR_KEY_CVLAN if fun flow_rule_match_cvlan in include/net/flow_offload.h +} + +function gen-gnss() { + cdh='include/linux/cdev.h' + clh='include/linux/device/class.h' + dh='include/linux/device.h' + gh='include/linux/gnss.h' + th='include/uapi/linux/types.h' + fh='include/linux/fs.h' + + gen HAVE_CDEV_DEVICE if fun cdev_device_add in "$cdh" + gen HAVE_DEV_UEVENT_CONST if method dev_uevent of class matches '(const|RH_KABI_CONST) struct device' in "$clh" "$dh" + gen HAVE_STREAM_OPEN if fun stream_open in "$fh" + # There can be either macro class_create or a function + gen NEED_CLASS_CREATE_WITH_MODULE_PARAM if fun class_create matches 'owner' in "$clh" "$dh" + gen NEED_CLASS_CREATE_WITH_MODULE_PARAM if macro class_create in "$clh" "$dh" + + if ! config_has CONFIG_SUSE_KERNEL; then + gen HAVE_GNSS_MODULE if struct gnss_device in "$gh" + fi + + gen HAVE_POLL_T if typedef __poll_t in "$th" +} + +function gen-mdev() { + mdevh='include/linux/mdev.h' + + gen HAVE_DEV_IN_MDEV_API if method probe of mdev_driver matches 'struct device \\*' in "$mdevh" + gen HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE if method create of mdev_parent_ops matches 'struct kobject \\*' in "$mdevh" +} + +function gen-netdevice() { + ndh='include/linux/netdevice.h' + gen HAVE_NDO_ETH_IOCTL if fun ndo_eth_ioctl in "$ndh" + gen HAVE_NDO_EXTENDED_SET_TX_MAXRATE if method ndo_set_tx_maxrate of net_device_ops_extended in "$ndh" + gen HAVE_NDO_FDB_ADD_VID if method ndo_fdb_del of net_device_ops matches 'u16 vid' in "$ndh" + gen HAVE_NDO_FDB_DEL_EXTACK if method ndo_fdb_del of net_device_ops matches extack in "$ndh" + gen HAVE_NDO_GET_DEVLINK_PORT if method ndo_get_devlink_port of net_device_ops in "$ndh" + gen HAVE_NDO_UDP_TUNNEL_CALLBACK if method ndo_udp_tunnel_add of net_device_ops in "$ndh" + gen HAVE_NETDEV_EXTENDED_MIN_MAX_MTU if struct net_device_extended matches min_mtu in "$ndh" + gen HAVE_NETDEV_MIN_MAX_MTU if struct net_device matches min_mtu in "$ndh" + gen HAVE_NETIF_SET_TSO_MAX if fun netif_set_tso_max_size in "$ndh" + gen HAVE_SET_NETDEV_DEVLINK_PORT if macro SET_NETDEV_DEVLINK_PORT in "$ndh" + gen NEED_NETIF_NAPI_ADD_NO_WEIGHT if fun netif_napi_add matches 'int weight' in "$ndh" + gen NEED_NET_PREFETCH if fun net_prefetch absent in "$ndh" + gen NEED_XDP_FEATURES if enum netdev_xdp_act absent in include/uapi/linux/netdev.h +} + +function gen-pci() { + pcih='include/linux/pci.h' + gen HAVE_PCI_MSIX_ALLOC_IRQ_AT if fun pci_msix_alloc_irq_at in "$pcih" + gen HAVE_PCI_MSIX_CAN_ALLOC_DYN if fun pci_msix_can_alloc_dyn in "$pcih" + gen HAVE_PCI_MSIX_FREE_IRQ if fun pci_msix_free_irq in "$pcih" + gen HAVE_PER_VF_MSIX_SYSFS if method sriov_set_msix_vec_count of pci_driver in "$pcih" + gen HAVE_STRUCT_PCI_DEV_PTM_ENABLED if struct pci_dev matches ptm_enabled in "$pcih" + gen NEED_PCIE_FLR if fun pcie_flr absent in "$pcih" + gen NEED_PCIE_FLR_RETVAL if fun pcie_flr lacks 'int pcie_flr' in "$pcih" + gen NEED_PCIE_PTM_ENABLED if fun pcie_ptm_enabled absent in "$pcih" + gen NEED_PCI_ENABLE_PTM if fun pci_enable_ptm absent in "$pcih" +} + +function gen-stddef() { + stddef='include/linux/stddef.h' + ustddef='include/uapi/linux/stddef.h' + gen HAVE_STDDEF_OFFSETTOEND if macro offsetofend in "$stddef" + gen NEED_DECLARE_FLEX_ARRAY if macro DECLARE_FLEX_ARRAY absent in "$stddef" + gen NEED_STRUCT_GROUP if macro struct_group absent in "$stddef" + gen NEED___STRUCT_GROUP if macro __struct_group absent in "$ustddef" +} + +function gen-vfio() { + # PASID_SUPPORT depends on multiple different functions existing + PASID_FUNC1="$(find-fun-decl mdev_set_iommu_device include/linux/mdev.h)" + PASID_FUNC2="$(find-fun-decl vfio_group_iommu_domain include/linux/vfio.h)" + + gen HAVE_PASID_SUPPORT if string "${PASID_FUNC1:+1}${PASID_FUNC2:+1}" equals 11 + gen HAVE_VFIO_FREE_DEV if fun vfio_free_device in include/linux/vfio.h + gen HAVE_LMV1_SUPPORT if macro VFIO_REGION_TYPE_MIGRATION in include/uapi/linux/vfio.h +} + +function gen-other() { + pciaerh='include/linux/aer.h' + ush='include/linux/u64_stats_sync.h' + gen HAVE_X86_STEPPING if struct cpuinfo_x86 matches x86_stepping in arch/x86/include/asm/processor.h + gen HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING if fun pci_enable_pcie_error_reporting in "$pciaerh" + gen NEED_PCI_AER_CLEAR_NONFATAL_STATUS if fun pci_aer_clear_nonfatal_status absent in "$pciaerh" + gen NEED_BITMAP_COPY_CLEAR_TAIL if fun bitmap_copy_clear_tail absent in include/linux/bitmap.h + gen NEED_BITMAP_FROM_ARR32 if fun bitmap_from_arr32 absent in include/linux/bitmap.h + gen NEED_BITMAP_TO_ARR32 if fun bitmap_to_arr32 absent in include/linux/bitmap.h + gen NEED_ASSIGN_BIT if fun assign_bit absent in include/linux/bitops.h + gen NEED_STATIC_ASSERT if macro static_assert absent in include/linux/build_bug.h + gen NEED_CLEANUP_API if macro __free absent in include/linux/cleanup.h + gen NEED___STRUCT_SIZE if macro __struct_size absent in include/linux/compiler_types.h include/linux/fortify-string.h + gen HAVE_COMPLETION_RAW_SPINLOCK if struct completion matches 'struct swait_queue_head' in include/linux/completion.h + gen NEED_IS_CONSTEXPR if macro __is_constexpr absent in include/linux/const.h include/linux/minmax.h include/linux/kernel.h + gen NEED_DEBUGFS_LOOKUP if fun debugfs_lookup absent in include/linux/debugfs.h + gen NEED_DEBUGFS_LOOKUP_AND_REMOVE if fun debugfs_lookup_and_remove absent in include/linux/debugfs.h + gen NEED_ETH_HW_ADDR_SET if fun eth_hw_addr_set absent in include/linux/etherdevice.h + gen NEED_FIND_NEXT_BIT_WRAP if fun find_next_bit_wrap absent in include/linux/find.h + gen HAVE_FILE_IN_SEQ_FILE if struct seq_file matches 'struct file' in include/linux/fs.h + gen NEED_FS_FILE_DENTRY if fun file_dentry absent in include/linux/fs.h + gen HAVE_HWMON_DEVICE_REGISTER_WITH_INFO if fun hwmon_device_register_with_info in include/linux/hwmon.h + gen NEED_HWMON_CHANNEL_INFO if macro HWMON_CHANNEL_INFO absent in include/linux/hwmon.h + gen NEED_ETH_TYPE_VLAN if fun eth_type_vlan absent in include/linux/if_vlan.h + gen HAVE_IOMMU_DEV_FEAT_AUX if enum iommu_dev_features matches IOMMU_DEV_FEAT_AUX in include/linux/iommu.h + gen NEED_READ_POLL_TIMEOUT if macro read_poll_timeout absent in include/linux/iopoll.h + gen NEED_DEFINE_STATIC_KEY_FALSE if macro DEFINE_STATIC_KEY_FALSE absent in include/linux/jump_label.h + gen NEED_STATIC_BRANCH_LIKELY if macro static_branch_likely absent in include/linux/jump_label.h + gen HAVE_STRUCT_STATIC_KEY_FALSE if struct static_key_false in include/linux/jump_label.h include/linux/jump_label_type.h + gen NEED_DECLARE_STATIC_KEY_FALSE if macro DECLARE_STATIC_KEY_FALSE absent in include/linux/jump_label.h include/linux/jump_label_type.h + gen NEED_LOWER_16_BITS if macro lower_16_bits absent in include/linux/kernel.h + gen NEED_UPPER_16_BITS if macro upper_16_bits absent in include/linux/kernel.h + gen NEED_LIST_COUNT_NODES if fun list_count_nodes absent in include/linux/list.h + + # On aarch64 RHEL systems, mul_u64_u64_div_u64 appears to be declared + # in math64 header, but is not provided by kernel + # so on these systems, set it to need anyway. + if [ "$IS_ARM" ]; then + NEED_MUL_STR=1 + else + MUL_U64_U64_DIV_U64_FUNC="$(find-fun-decl mul_u64_u64_div_u64 include/linux/math64.h)" + NEED_MUL_STR="${MUL_U64_U64_DIV_U64_FUNC:-1}" + fi + gen NEED_MUL_U64_U64_DIV_U64 if string "${NEED_MUL_STR}" equals 1 + + gen HAVE_MDEV_GET_DRVDATA if fun mdev_get_drvdata in include/linux/mdev.h + gen HAVE_MDEV_REGISTER_PARENT if fun mdev_register_parent in include/linux/mdev.h + gen HAVE_VM_FLAGS_API if fun vm_flags_init in include/linux/mm.h + gen HAVE_NL_SET_ERR_MSG_FMT if macro NL_SET_ERR_MSG_FMT in include/linux/netlink.h + gen NEED_DEV_PM_DOMAIN_ATTACH if fun dev_pm_domain_attach absent in include/linux/pm_domain.h include/linux/pm.h + gen NEED_DEV_PM_DOMAIN_DETACH if fun dev_pm_domain_detach absent in include/linux/pm_domain.h include/linux/pm.h + gen NEED_PTP_CLASSIFY_RAW if fun ptp_classify_raw absent in include/linux/ptp_classify.h + gen NEED_PTP_PARSE_HEADER if fun ptp_parse_header absent in include/linux/ptp_classify.h + gen HAVE_PTP_CLOCK_INFO_ADJFINE if method adjfine of ptp_clock_info in include/linux/ptp_clock_kernel.h + gen NEED_DIFF_BY_SCALED_PPM if fun diff_by_scaled_ppm absent in include/linux/ptp_clock_kernel.h + gen NEED_PTP_SYSTEM_TIMESTAMP if fun ptp_read_system_prets absent in include/linux/ptp_clock_kernel.h + gen NEED_RADIX_TREE_EMPTY if fun radix_tree_empty absent in include/linux/radix-tree.h + gen NEED_SCHED_PARAM if struct sched_param absent in include/linux/sched.h + gen NEED_SET_SCHED_FIFO if fun sched_set_fifo absent in include/linux/sched.h + gen NEED_RT_H if macro MAX_RT_PRIO absent in include/linux/sched/prio.h + gen NEED_DEV_PAGE_IS_REUSABLE if fun dev_page_is_reusable absent in include/linux/skbuff.h + gen NEED_NAPI_BUILD_SKB if fun napi_build_skb absent in include/linux/skbuff.h + gen NEED_KREALLOC_ARRAY if fun krealloc_array absent in include/linux/slab.h + gen NEED_SYSFS_MATCH_STRING if macro sysfs_match_string absent in include/linux/string.h + gen NEED_SYSFS_EMIT if fun sysfs_emit absent in include/linux/sysfs.h + gen HAVE_TRACE_ENABLED_SUPPORT if implementation of macro __DECLARE_TRACE matches 'trace_##name##_enabled' in include/linux/tracepoint.h + gen HAVE_TTY_OP_WRITE_SIZE_T if method write of tty_operations matches size_t in include/linux/tty_driver.h + gen HAVE_U64_STATS_FETCH_BEGIN_IRQ if fun u64_stats_fetch_begin_irq in "$ush" + gen HAVE_U64_STATS_FETCH_RETRY_IRQ if fun u64_stats_fetch_retry_irq in "$ush" + gen NEED_U64_STATS_READ if fun u64_stats_read absent in "$ush" + gen NEED_U64_STATS_SET if fun u64_stats_set absent in "$ush" + gen HAVE_NET_RPS_H if macro RPS_NO_FILTER in include/net/rps.h +} + +# all the generations, extracted from main() to keep normal code and various +# prep separated +function gen-all() { + if config_has CONFIG_NET_DEVLINK; then + gen-devlink + fi + gen-netdevice + # code above is covered by unit_tests/test_gold.sh + if [ -n "${JUST_UNIT_TESTING-}" ]; then + return + fi + gen-aux + gen-bitfield + gen-device + gen-dma + gen-dpll + gen-ethtool + gen-filter + gen-flow-dissector + gen-gnss + gen-mdev + gen-pci + gen-stddef + gen-vfio + gen-other +} + +function main() { + if ! [ -d "${KSRC-}" ]; then + echo >&2 "env KSRC=${KSRC-} does not exist or is not a directory" + exit 11 + fi + + # we need some flags from .config or (autoconf.h), try to find it + if [ -z ${CONFIG_FILE-} ]; then + find_config_file + + if [ -z ${CONFIG_FILE-} ]; then + echo >&2 "unable to locate a config file at KSRC=${KSRC}. please set CONFIG_FILE to the kernel configuration file." + exit 10 + fi + fi + + if [ ! -f "${CONFIG_FILE-}" ]; then + echo >&2 ".config passed in by env CONFIG_FILE=${CONFIG_FILE} does not exist or is not a file" + exit 9 + fi + CONFIG_FILE=$(realpath "${CONFIG_FILE-}") + + # check if caller (like our makefile) wants to redirect output to file + if [ -n "${OUT-}" ]; then + + # in case OUT exists, we don't want to overwrite it, instead + # write to a temporary copy. + if [ -s "${OUT}" ]; then + TMP_OUT="$(mktemp "${OUT}.XXX")" + trap "rm -f '${TMP_OUT}'" EXIT + + REAL_OUT="${OUT}" + OUT="${TMP_OUT}" + fi + + exec > "$OUT" + # all stdout goes to OUT since now + echo "/* Autogenerated for KSRC=${KSRC-} via $(basename "$0") */" + fi + + cd "${KSRC}" + + # check if KSRC was ok/if we are in proper place to look for headers + if [ -z "$(filter-out-bad-files include/linux/kernel.h)" ]; then + echo >&2 "seems that there are no kernel includes placed in KSRC=${KSRC} + pwd=$(pwd); ls -l:" + ls -l >&2 + exit 8 + fi + + if [ -z ${UNIFDEF_MODE-} ]; then + echo "#ifndef _KCOMPAT_GENERATED_DEFS_H_" + echo "#define _KCOMPAT_GENERATED_DEFS_H_" + fi + + gen-all + + if [ -z ${UNIFDEF_MODE-} ]; then + echo "#endif /* _KCOMPAT_GENERATED_DEFS_H_ */" + fi + + if [ -n "${OUT-}" ]; then + cd "$ORIG_CWD" + + # Compare and see if anything changed. This avoids updating + # mtime of the file. + if [ -n "${REAL_OUT-}" ]; then + if cmp --silent "${REAL_OUT}" "${TMP_OUT}"; then + # exit now, skipping print of the output since + # there were no changes. the trap should + # cleanup TMP_OUT + exit 0 + fi + + mv -f "${TMP_OUT}" "${REAL_OUT}" + OUT="${REAL_OUT}" + fi + + # dump output, will be visible in CI + if [ -n "${JUST_UNIT_TESTING-}${QUIET_COMPAT-}" ]; then + return + fi + cat -n "$OUT" >&2 + fi +} + +main + +# Coding style: +# - rely on `set -e` handling as much as possible, so: +# - do not use <(bash process substitution) - it breaks error handling; +# - do not put substantial logic in `if`-like statement - it disables error +# handling inside of the conditional (`if big-fun call; then` is substantial) +# - make shellcheck happy - https://www.shellcheck.net +# +# That enables us to move processing out of `if` or `... && ...` statements, +# what finally means that bash error handling (`set -e`) would break on errors. diff --git a/drivers/net/ethernet/guangruntong/kcompat-lib.sh b/drivers/net/ethernet/guangruntong/kcompat-lib.sh new file mode 100755 index 0000000000000..bcc3cc94f9915 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat-lib.sh @@ -0,0 +1,403 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 1999 - 2024 Intel Corporation + +# to be sourced + +# General shell helpers + +# exit with non-zero exit code; if there is only one param: +# exit with msg $1 and exit code from last command (or 99 if = 0) +# otherwise, exit with $1 and use remaining arguments as msg +function die() { + rc=$? + if [ $# -gt 1 ]; then + rc="$1" + shift + fi + [ "$rc" -ne 0 ] || rc=99 + echo >&2 "$@" + exit $rc +} + +# filter out paths that are not files +# input $@, output via echo; +# note: pass `-` for stdin +# note: outputs nothing if all input files are "bad" (eg. not existing), but it +# is left for caller to decide if this is an erorr condition; +# note: whitespaces are considered "bad" as part of filename, it's an error. +function filter-out-bad-files() { + if [[ $# = 1 && "$1" = '-' ]]; then + echo - + return 0 + fi + if [ $# = 0 ]; then + die 10 "no files passed, use '-' when reading from pipe (|)" + fi + local any=0 diagmsgs=/dev/stderr re=$'[\t \n]' + [ -n "${QUIET_COMPAT-}" ] && diagmsgs=/dev/null + for x in "$@"; do + if [ -e "$x" ]; then + if [[ "$x" =~ $re ]]; then + die 11 "err: filename contains whitespaces: $x." + fi + echo "$x" + any=1 + else + echo >&"$diagmsgs" filtering "$x" out + fi + done + if [ $any = 0 ]; then + echo >&"$diagmsgs" 'all files (for given query) filtered out' + fi +} + +# Basics of regexp explained, as a reference for mostly-C programmers: +# (bash) "regexp-$VAR-regexp" - bash' VARs are placed into "QUOTED" strings +# /\);?$/ - match end of function declaration, $ is end of string +# ^[ \t]* - (heuristic), anything but comment, eg to exclude function docs +# /STH/, /END/ - (awk), print all lines sice STH matched, up to END, inclusive + +# "Whitespace only" +WB='[ \t\n]' + +# Helpers below print the thing that is looked for, for further grep'ping/etc. +# That simplifies process of excluding comments or spares us state machine impl. +# +# We take advantage of current/common linux codebase formatting here. +# +# Functions in this section require input file/s passed as args +# (usually one, but more could be supplied in case of renames in kernel), +# '-' could be used as an (only) file argument to read from stdin/pipe. + +# wrapper over find-something-decl() functions below, to avoid repetition +# pass $what as $1, $end as $2, and $files to look in as rest of args +function find-decl() { + test $# -ge 3 # ensure that there are at least 3 params + local what end files + what="$1" + end="$2" + shift 2 + files="$(filter-out-bad-files "$@")" || die + if [ -z "$files" ]; then + return 0 + fi + # shellcheck disable=SC2086 + awk " + /^$WB*\*/ {next} + $what, $end + " $files +} + +# yield $1 function declaration (signature), don't pass return type in $1 +# looks only in files specified ($2, $3...) +function find-fun-decl() { + test $# -ge 2 + local what end + what="/$WB*([(]\*)?$1$WB*($|[()])/" + end='/\);?$/' + shift + find-decl "$what" "$end" "$@" +} + +# yield $1 enum declaration (type/body) +function find-enum-decl() { + test $# -ge 2 + local what end + what="/^$WB*enum$WB+$1"' \{$/' + end='/\};$/' + shift + find-decl "$what" "$end" "$@" +} + +# yield $1 struct declaration (type/body) +function find-struct-decl() { + test $# -ge 2 + local what end + what="/^$WB*struct$WB+$1"' \{$/' + end='/^\};$/' # that's (^) different from enum-decl + shift + find-decl "$what" "$end" "$@" +} + +# yield first line of $1 macro definition +function find-macro-decl() { + test $# -ge 2 + local what end + # only unindented defines, only whole-word match + what="/^#define$WB+$1"'([ \t\(]|$)/' + end=1 # only first line; use find-macro-implementation-decl for full body + shift + find-decl "$what" "$end" "$@" +} + +# yield full macro implementation +function find-macro-implementation-decl() { + test $# -ge 2 + local what end + # only unindented defines, only whole-word match + what="/^#define$WB+$1"'([ \t\(]|$)/' + # full implementation, until a line not ending in a backslash. + # Does not handle macros with comments embedded within the definition. + end='/[^\\]$/' + shift + find-decl "$what" "$end" "$@" +} + +# yield first line of $1 typedef definition (simple typedefs only) +# this probably won't handle typedef struct { \n int foo;\n}; +function find-typedef-decl() { + test $# -ge 2 + local what end + what="/^typedef .* $1"';$/' + end=1 + shift + find-decl "$what" "$end" "$@" +} + +# gen() - DSL-like function to wrap around all the other +# +# syntax: +# gen DEFINE if (KIND [METHOD of]) NAME [(matches|lacks) PATTERN|absent] in +# gen DEFINE if string "actual" equals "expected" + +# where: +# DEFINE is HAVE_ or NEED_ #define to print; +# `if` is there to just read it easier and made syntax easier to check; +# +# NAME is the name for what we are looking for; +# +# `if string` can be used to check if a provided string matches an expected +# value. The define will only be generated if the strings are exactly +# equal. Otherwise, the define will not be generated. When operating in +# UNIFDEF_MODE, -DDEFINE is output when the strings are equal, while +# -UDEFINE is output when the strings are not equal. This is intended +# for cases where a more complex conditional is required, such as +# generating a define when multiple different functions exist. +# +# Ex: +# +# FUNC1="$(find-fun-decl devlink_foo1 devlink.h)" +# FUNC2="$(find-fun-decl devlink_foo2 devlink.h)" +# gen HAVE_FOO_12 if string "${FUNC1:+1}${FUNC2:+1}" equals "11" +# +# KIND specifies what kind of declaration/definition we are looking for, +# could be: fun, enum, struct, method, macro, typedef, +# 'implementation of macro' +# for KIND=method, we are looking for function ptr named METHOD in struct +# named NAME (two optional args are then necessary (METHOD & of)); +# +# for KIND='implementation of macro' we are looking for the full +# implementation of the macro, not just its first line. This is usually +# combined with "matches" or "lacks". +# +# next [optional] args could be used: +# matches PATTERN - use to grep for the PATTERN within definition +# (eg, for ext_ack param) +# lacks - use to add #define only if there is no match of the PATTERN, +# *but* the NAME is *found* +# absent - the NAME that we grep for must be not found +# (ie: function not exisiting) +# +# without this optional params, behavior is the same as with +# `matches .` - use to grep just for existence of NAME; +# +# `in` is there to ease syntax, similar to `if` before. +# +# is just space-separate list of files to look in, +# single (-) for stdin. +# +# PATTERN is an awk pattern, will be wrapped by two slashes (/) +# +# The usual output is a list of "#define " lines for each flag that has +# a matched definition. When UNIFDEF_MODE is set to a non-zero string, the +# output is instead a sequence of "-D" for each matched definition, and +# "-U" for each definition which didn't match. +function gen() { + test $# -ge 4 || die 20 "too few arguments, $# given, at least 4 needed" + local define if_kw kind name in_kw # mandatory + local of_kw method_name operator pattern # optional + local src_line="${BASH_SOURCE[0]}:${BASH_LINENO[0]}" + define="$1" + if_kw="$2" + kind="$3" + local orig_args_cnt=$# + shift 3 + [ "$if_kw" != if ] && die 21 "$src_line: 'if' keyword expected, '$if_kw' given" + case "$kind" in + string) + local actual_str expect_str equals_kw missing_fmt found_fmt + + test $# -ge 3 || die 22 "$src_line: too few arguments, $orig_args_cnt given, at least 6 needed" + + actual_str="$1" + equals_kw="$2" + expect_str="$3" + shift 3 + + if [ -z ${UNIFDEF_MODE:+1} ]; then + found_fmt="#define %s\n" + missing_fmt="" + else + found_fmt="-D%s\n" + missing_fmt="-U%s\n" + fi + + if [ "${actual_str}" = "${expect_str}" ]; then + printf -- "$found_fmt" "$define" + else + printf -- "$missing_fmt" "$define" + fi + + return + ;; + fun|enum|struct|macro|typedef) + test $# -ge 3 || die 22 "$src_line: too few arguments, $orig_args_cnt given, at least 6 needed" + name="$1" + shift + ;; + method) + test $# -ge 5 || die 22 "$src_line: too few arguments, $orig_args_cnt given, at least 8 needed" + method_name="$1" + of_kw="$2" + name="$3" + shift 3 + [ "$of_kw" != of ] && die 23 "$src_line: 'of' keyword expected, '$of_kw' given" + ;; + implementation) + test $# -ge 5 || die 28 "$src_line: too few arguments, $orig_args_cnt given, at least 8 needed" + of_kw="$1" + kind="$2" + name="$3" + shift 3 + [ "$of_kw" != of ] && die 29 "$src_line: 'of' keyword expected, '$of_kw' given" + [ "$kind" != macro ] && die 30 "$src_line: implementation only supports 'macro', '$kind' given" + kind=macro-implementation + ;; + *) die 24 "$src_line: unknown KIND ($kind) to look for" ;; + esac + operator="$1" + case "$operator" in + absent) + pattern='.' + in_kw="$2" + shift 2 + ;; + matches|lacks) + pattern="$2" + in_kw="$3" + shift 3 + ;; + in) + operator=matches + pattern='.' + in_kw=in + shift + ;; + *) die 25 "$src_line: unknown OPERATOR ($operator) to look for" ;; + esac + [ "$in_kw" != in ] && die 26 "$src_line: 'in' keyword expected, '$in_kw' given" + test $# -ge 1 || die 27 "$src_line: too few arguments, at least one filename expected" + + local first_decl= + if [ "$kind" = method ]; then + first_decl="$(find-struct-decl "$name" "$@")" || exit 40 + # prepare params for next lookup phase + set -- - # overwrite $@ to be single dash (-) + name="$method_name" + kind=fun + elif [[ $# = 1 && "$1" = '-' ]]; then + # avoid losing stdin provided to gen() due to redirection (<<<) + first_decl="$(cat -)" + fi + + local unifdef + unifdef=${UNIFDEF_MODE:+1} + + # lookup the NAME + local body + body="$(find-$kind-decl "$name" "$@" <<< "$first_decl")" || exit 41 + awk -v define="$define" -v pattern="$pattern" -v "$operator"=1 -v unifdef="$unifdef" ' + BEGIN { + # prepend "identifier boundary" to pattern, also append + # it, but only for patterns not ending with such already + # + # eg: "foo" -> "\bfoo\b" + # "struct foo *" -> "\bstruct foo *" + + # Note that mawk does not support "\b", so we have our + # own approximation, NI + NI = "[^A-Za-z0-9_]" # "Not an Indentifier" + + if (!match(pattern, NI "$")) + pattern = pattern "(" NI "|$)" + pattern = "(^|" NI ")" pattern + } + /./ { not_empty = 1 } + $0 ~ pattern { found = 1 } + END { + if (unifdef) { + found_fmt="-D%s\n" + missing_fmt="-U%s\n" + } else { + found_fmt="#define %s\n" + missing_fmt="" + } + + if (lacks && !found && not_empty || matches && found || absent && !found) + printf(found_fmt, define) + else if (missing_fmt) + printf(missing_fmt, define) + } + ' <<< "$body" +} + +# tell if given flag is enabled in .config +# return 0 if given flag is enabled, 1 otherwise +# inputs: +# $1 - flag to check (whole word, without _MODULE suffix) +# env flag $CONFIG_FILE +# +# there are two "config" formats supported, to ease up integrators lifes +# .config (without leading #~ prefix): +#~ # CONFIG_ACPI_EC_DEBUGFS is not set +#~ CONFIG_ACPI_AC=y +#~ CONFIG_ACPI_VIDEO=m +# and autoconf.h, which would be: +#~ #define CONFIG_ACPI_AC 1 +#~ #define CONFIG_ACPI_VIDEO_MODULE 1 +function config_has() { + grep -qE "^(#define )?$1((_MODULE)? 1|=m|=y)$" "$CONFIG_FILE" +} + +# try to locate a suitable config file from KSRC +# +# On success, the CONFIG_FILE variable will be updated to reflect the full +# path to a configuration file. +# +# Depends on KSRC being set +function find_config_file() { + local -a CSP + local file + local diagmsgs=/dev/stderr + + [ -n "${QUIET_COMPAT-}" ] && diagmsgs=/dev/null + + if ! [ -d "${KSRC-}" ]; then + return + fi + + CSP=( + "$KSRC/include/generated/autoconf.h" + "$KSRC/include/linux/autoconf.h" + "$KSRC/.config" + ) + + for file in "${CSP[@]}"; do + if [ -f $file ]; then + echo >&"$diagmsgs" "using CONFIG_FILE=$file" + CONFIG_FILE=$file + return + fi + done +} diff --git a/drivers/net/ethernet/guangruntong/kcompat.c b/drivers/net/ethernet/guangruntong/kcompat.c new file mode 100755 index 0000000000000..301ea08fe25f7 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat.c @@ -0,0 +1,3075 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 1999 - 2023 Intel Corporation */ + +#include "grtnic.h" +#include "kcompat.h" + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) || defined __VMKLNX__ +/* From lib/vsprintf.c */ +#include + +static int skip_atoi(const char **s) +{ + int i=0; + + while (isdigit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +#define _kc_ZEROPAD 1 /* pad with zero */ +#define _kc_SIGN 2 /* unsigned/signed long */ +#define _kc_PLUS 4 /* show plus */ +#define _kc_SPACE 8 /* space if plus */ +#define _kc_LEFT 16 /* left justified */ +#define _kc_SPECIAL 32 /* 0x */ +#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) +{ + char c,sign,tmp[66]; + const char *digits; + const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + int i; + + digits = (type & _kc_LARGE) ? large_digits : small_digits; + if (type & _kc_LEFT) + type &= ~_kc_ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & _kc_ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & _kc_SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & _kc_PLUS) { + sign = '+'; + size--; + } else if (type & _kc_SPACE) { + sign = ' '; + size--; + } + } + if (type & _kc_SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++]='0'; + else while (num != 0) + tmp[i++] = digits[do_div(num,base)]; + if (i > precision) + precision = i; + size -= precision; + if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { + while(size-->0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + } + if (sign) { + if (buf <= end) + *buf = sign; + ++buf; + } + if (type & _kc_SPECIAL) { + if (base==8) { + if (buf <= end) + *buf = '0'; + ++buf; + } else if (base==16) { + if (buf <= end) + *buf = '0'; + ++buf; + if (buf <= end) + *buf = digits[33]; + ++buf; + } + } + if (!(type & _kc_LEFT)) { + while (size-- > 0) { + if (buf <= end) + *buf = c; + ++buf; + } + } + while (i < precision--) { + if (buf <= end) + *buf = '0'; + ++buf; + } + while (i-- > 0) { + if (buf <= end) + *buf = tmp[i]; + ++buf; + } + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + return buf; +} + +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char *str, *end, c; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + str = buf; + end = buf + size - 1; + + if (end < buf - 1) { + end = ((void *) -1); + size = end - buf + 1; + } + + for (; *fmt ; ++fmt) { + if (*fmt != '%') { + if (str <= end) + *str = *fmt; + ++str; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= _kc_LEFT; goto repeat; + case '+': flags |= _kc_PLUS; goto repeat; + case ' ': flags |= _kc_SPACE; goto repeat; + case '#': flags |= _kc_SPECIAL; goto repeat; + case '0': flags |= _kc_ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= _kc_LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & _kc_LEFT)) { + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + } + c = (unsigned char) va_arg(args, int); + if (str <= end) + *str = c; + ++str; + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 's': + s = va_arg(args, char *); + if (!s) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & _kc_LEFT)) { + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + } + for (i = 0; i < len; ++i) { + if (str <= end) + *str = *s; + ++str; ++s; + } + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 'p': + if ('M' == *(fmt+1)) { + str = get_mac(str, end, va_arg(args, unsigned char *)); + fmt++; + } else { + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= _kc_ZEROPAD; + } + str = number(str, end, + (unsigned long) va_arg(args, void *), + 16, field_width, precision, flags); + } + continue; + + case 'n': + /* FIXME: + * What does C99 say about the overflow case here? */ + if (qualifier == 'l') { + long * ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z') { + size_t * ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int * ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + if (str <= end) + *str = '%'; + ++str; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= _kc_LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= _kc_SIGN; + case 'u': + break; + + default: + if (str <= end) + *str = '%'; + ++str; + if (*fmt) { + if (str <= end) + *str = *fmt; + ++str; + } else { + --fmt; + } + continue; + } + if (qualifier == 'L') + num = va_arg(args, long long); + else if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & _kc_SIGN) + num = (signed long) num; + } else if (qualifier == 'Z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & _kc_SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & _kc_SIGN) + num = (signed int) num; + } + str = number(str, end, num, base, + field_width, precision, flags); + } + if (str <= end) + *str = '\0'; + else if (size > 0) + /* don't write out a null byte if the buf size is zero */ + *end = '\0'; + /* the trailing null byte doesn't count towards the total + * ++str; + */ + return str-buf; +} + +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = _kc_vsnprintf(buf,size,fmt,args); + va_end(args); + return i; +} +#endif /* < 2.4.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#if defined(CONFIG_HIGHMEM) + +#ifndef PCI_DRAM_OFFSET +#define PCI_DRAM_OFFSET 0 +#endif + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + + PCI_DRAM_OFFSET); +} + +#else /* CONFIG_HIGHMEM */ + +u64 +_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, + size_t size, int direction) +{ + return pci_map_single(dev, (void *)page_address(page) + offset, size, + direction); +} + +#endif /* CONFIG_HIGHMEM */ + +void +_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, + int direction) +{ + return pci_unmap_single(dev, dma_addr, size, direction); +} + +#endif /* 2.4.13 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +int +_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) +{ + if (!pci_dma_supported(dev, mask)) + return -EIO; + dev->dma_mask = mask; + return 0; +} + +int +_kc_pci_request_regions(struct pci_dev *dev, char *res_name) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) { + if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { + if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } + } + return 0; +} + +void +_kc_pci_release_regions(struct pci_dev *dev) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) + release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + + else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) + release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); + } +} + +/**************************************/ +/* NETWORK DRIVER API */ + +struct net_device * +_kc_alloc_etherdev(int sizeof_priv) +{ + struct net_device *dev; + int alloc_size; + + alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; + dev = kzalloc(alloc_size, GFP_KERNEL); + if (!dev) + return NULL; + + if (sizeof_priv) + dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); + dev->name[0] = '\0'; + ether_setup(dev); + + return dev; +} + +int +_kc_is_valid_ether_addr(u8 *addr) +{ + const char zaddr[6] = { 0, }; + + return !(addr[0] & 1) && memcmp(addr, zaddr, 6); +} + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +int +_kc_pci_set_power_state(struct pci_dev *dev, int state) +{ + return 0; +} + +int +_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) +{ + return 0; +} + +#endif /* 2.4.6 => 2.4.3 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, + int off, int size) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + frag->page = page; + frag->page_offset = off; + frag->size = size; + skb_shinfo(skb)->nr_frags = i + 1; +} + +/* + * Original Copyright: + * find_next_bit.c: fallback find next bit implementation + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + ffs(tmp); +} + +size_t _kc_strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + +#ifndef do_div +#if BITS_PER_LONG == 32 +uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) +{ + uint64_t rem = *n; + uint64_t b = base; + uint64_t res, d = 1; + uint32_t high = rem >> 32; + + /* Reduce the thing a bit first */ + res = 0; + if (high >= base) { + high /= base; + res = (uint64_t) high << 32; + rem -= (uint64_t) (high*base) << 32; + } + + while ((int64_t)b > 0 && b < rem) { + b = b+b; + d = d+d; + } + + do { + if (rem >= b) { + rem -= b; + res += d; + } + b >>= 1; + d >>= 1; + } while (d); + + *n = res; + return rem; +} +#endif /* BITS_PER_LONG == 32 */ +#endif /* do_div */ +#endif /* 2.6.0 => 2.4.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + return (i >= size) ? (size - 1) : i; +} +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +char *_kc_kstrdup(const char *s, unsigned int gfp) +{ + size_t len; + char *buf; + + if (!s) + return NULL; + + len = strlen(s) + 1; + buf = kmalloc(len, gfp); + if (buf) + memcpy(buf, s, len); + return buf; +} +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +void *_kc_kzalloc(size_t size, int flags) +{ + void *ret = kmalloc(size, flags); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif /* <= 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +int _kc_skb_pad(struct sk_buff *skb, int pad) +{ + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data+skb->len, 0, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) + goto free_skb; + } + +#ifdef MAX_SKB_FRAGS + if (skb_is_nonlinear(skb) && + !__pskb_pull_tail(skb, skb->data_len)) + goto free_skb; + +#endif + memset(skb->data + skb->len, 0, pad); + return 0; + +free_skb: + kfree_skb(skb); + return -ENOMEM; +} + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +int _kc_pci_save_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset, pcie_link_status; + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) + /* no ->dev for 2.4 kernels */ + WARN_ON(pdev->dev.driver_data == NULL); +#endif + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset) { + if (!pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + } + pci_config_space_ich8lan(); +#ifdef HAVE_PCI_ERS + if (adapter->config_space == NULL) +#else + WARN_ON(adapter->config_space != NULL); +#endif + adapter->config_space = kmalloc(size, GFP_KERNEL); + if (!adapter->config_space) { + printk(KERN_ERR "Out of memory in pci_save_state\n"); + return -ENOMEM; + } + for (i = 0; i < (size / 4); i++) + pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); + return 0; +} + +void _kc_pci_restore_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset; + u16 pcie_link_status; + + if (adapter->config_space != NULL) { + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset && + !pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + + pci_config_space_ich8lan(); + for (i = 0; i < (size / 4); i++) + pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); +#ifndef HAVE_PCI_ERS + kfree(adapter->config_space); + adapter->config_space = NULL; +#endif + } +} +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +void _kc_free_netdev(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + + kfree(adapter->config_space); +#ifdef CONFIG_SYSFS + if (netdev->reg_state == NETREG_UNINITIALIZED) { + kfree((char *)netdev - netdev->padded); + } else { + BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); + netdev->reg_state = NETREG_RELEASED; + class_device_put(&netdev->class_dev); + } +#else + kfree((char *)netdev - netdev->padded); +#endif +} +#endif + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) +{ + void *p; + + p = kzalloc(len, gfp); + if (p) + memcpy(p, src, len); + return p; +} +#endif /* <= 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) +{ + return ((struct adapter_struct *)netdev_priv(netdev))->pdev; +} +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +/* hexdump code taken from lib/hexdump.c */ +static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, unsigned char *linebuf, + size_t linebuflen, bool ascii) +{ + const u8 *ptr = buf; + u8 ch; + int j, lx = 0; + int ascii_column; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (!len) + goto nil; + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + switch (groupsize) { + case 8: { + const u64 *ptr8 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); + ascii_column = 17 * ngroups + 2; + break; + } + + case 4: { + const u32 *ptr4 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%8.8x", j ? " " : "", *(ptr4 + j)); + ascii_column = 9 * ngroups + 2; + break; + } + + case 2: { + const u16 *ptr2 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%4.4x", j ? " " : "", *(ptr2 + j)); + ascii_column = 5 * ngroups + 2; + break; + } + + default: + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { + ch = ptr[j]; + linebuf[lx++] = hex_asc(ch >> 4); + linebuf[lx++] = hex_asc(ch & 0x0f); + linebuf[lx++] = ' '; + } + if (j) + lx--; + + ascii_column = 3 * rowsize + 2; + break; + } + if (!ascii) + goto nil; + + while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) + linebuf[lx++] = ' '; + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) + linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] + : '.'; +nil: + linebuf[lx++] = '\0'; +} + +void _kc_print_hex_dump(const char *level, + const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[200]; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + printk("%s%s%*p: %s\n", level, prefix_str, + (int)(2 * sizeof(void *)), ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); + break; + default: + printk("%s%s%s\n", level, prefix_str, linebuf); + break; + } + } +} + +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +int ixgbe_dcb_netlink_register(void) +{ + return 0; +} + +int ixgbe_dcb_netlink_unregister(void) +{ + return 0; +} + +int ixgbe_copy_dcb_cfg(struct ixgbe_adapter __always_unused *adapter, int __always_unused tc_max) +{ + return 0; +} +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifdef NAPI +struct net_device *napi_to_poll_dev(const struct napi_struct *napi) +{ + struct adapter_q_vector *q_vector = container_of(napi, + struct adapter_q_vector, + napi); + return &q_vector->poll_dev; +} + +int __kc_adapter_clean(struct net_device *netdev, int *budget) +{ + int work_done; + int work_to_do = min(*budget, netdev->quota); + /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ + struct napi_struct *napi = netdev->priv; + work_done = napi->poll(napi, work_to_do); + *budget -= work_done; + netdev->quota -= work_done; + return (work_done >= work_to_do) ? 1 : 0; +} +#endif /* NAPI */ +#endif /* <= 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) +{ + struct pci_dev *parent = pdev->bus->self; + u16 link_state; + int pos; + + if (!parent) + return; + + pos = pci_find_capability(parent, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); + link_state &= ~state; + pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); + } +} +#endif /* < 2.6.26 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_stop_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +} +void _kc_netif_tx_wake_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_wake_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_wake_subqueue(netdev, i); +} +void _kc_netif_tx_start_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_start_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_start_subqueue(netdev, i); +} +#endif /* HAVE_TX_MQ */ + +void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) +{ + va_list args; + + printk(KERN_WARNING "------------[ cut here ]------------\n"); + printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); + va_start(args, fmt); + vprintk(fmt, args); + va_end(args); + + dump_stack(); +} +#endif /* __VMKLNX__ */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) + +int +_kc_pci_prepare_to_sleep(struct pci_dev *dev) +{ + pci_power_t target_state; + int error; + + target_state = pci_choose_state(dev, PMSG_SUSPEND); + + pci_enable_wake(dev, target_state, true); + + error = pci_set_power_state(dev, target_state); + + if (error) + pci_enable_wake(dev, target_state, false); + + return error; +} + +int +_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + int err; + + err = pci_enable_wake(dev, PCI_D3cold, enable); + if (err) + goto out; + + err = pci_enable_wake(dev, PCI_D3hot, enable); + +out: + return err; +} +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +static void __kc_pci_set_main(struct pci_dev *pdev, bool enable) +{ + u16 old_cmd, cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); + if (enable) + cmd = old_cmd | PCI_COMMAND_MASTER; + else + cmd = old_cmd & ~PCI_COMMAND_MASTER; + if (cmd != old_cmd) { + dev_dbg(pci_dev_to_dev(pdev), "%s bus DMA control\n", + enable ? "enabling" : "disabling"); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) ) + pdev->is_busmaster = enable; +#endif +} + +void _kc_pci_clear_main(struct pci_dev *dev) +{ + __kc_pci_set_main(dev, false); +} +#endif /* < 2.6.29 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) +{ + int num_vf = 0; +#ifdef CONFIG_PCI_IOV + struct pci_dev *vfdev; + + /* loop through all ethernet devices starting at PF dev */ + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == dev) + num_vf++; + + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); + } + +#endif + return num_vf; +} +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 2.6.34 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#ifdef HAVE_TX_MQ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) +{ + unsigned int real_num = dev->real_num_tx_queues; + struct Qdisc *qdisc; + int i; + + if (txq < 1 || txq > dev->num_tx_queues) + return -EINVAL; + + else if (txq > real_num) + dev->real_num_tx_queues = txq; + else if (txq < real_num) { + dev->real_num_tx_queues = txq; + for (i = txq; i < dev->num_tx_queues; i++) { + qdisc = netdev_get_tx_queue(dev, i)->qdisc; + if (qdisc) { + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); + } + } + } + + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#endif /* HAVE_TX_MQ */ + +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count) +{ + loff_t pos = *ppos; + size_t res; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !count) + return 0; + if (count > available - pos) + count = available - pos; + res = copy_from_user(to + pos, from, count); + if (res == count) + return -EFAULT; + count -= res; + *ppos = pos + count; + return count; +} + +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +static const u32 _kc_flags_dup_features = + (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); + +u32 _kc_ethtool_op_get_flags(struct net_device *dev) +{ + return dev->features & _kc_flags_dup_features; +} + +int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) +{ + if (data & ~supported) + return -EINVAL; + + dev->features = ((dev->features & ~_kc_flags_dup_features) | + (data & _kc_flags_dup_features)); + return 0; +} +#endif /* < 2.6.36 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +#ifdef HAVE_NETDEV_SELECT_QUEUE +#include +#include + +u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb, + u16 num_tx_queues) +{ + u32 hash; + u16 qoffset = 0; + u16 qcount = num_tx_queues; + + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + while (unlikely(hash >= num_tx_queues)) + hash -= num_tx_queues; + return hash; + } + + if (netdev_get_num_tc(dev)) { + struct adapter_struct *kc_adapter = netdev_priv(dev); + + if (skb->priority == TC_PRIO_CONTROL) { + qoffset = kc_adapter->dcb_tc - 1; + } else { + qoffset = skb->vlan_tci; + qoffset &= IXGBE_TX_FLAGS_VLAN_PRIO_MASK; + qoffset >>= 13; + } + + qcount = kc_adapter->ring_feature[RING_F_RSS].indices; + qoffset *= qcount; + } + + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else +#ifdef NETIF_F_RXHASH + hash = (__force u16) skb->protocol ^ skb->rxhash; +#else + hash = skb->protocol; +#endif + + hash = jhash_1word(hash, _kc_hashrnd); + + return (u16) (((u64) hash * qcount) >> 32) + qoffset; +} +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + +u8 _kc_netdev_get_num_tc(struct net_device *dev) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + if (kc_adapter->flags & IXGBE_FLAG_DCB_ENABLED) + return kc_adapter->dcb_tc; + else + return 0; +} + +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + if (num_tc > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return -EINVAL; + + kc_adapter->dcb_tc = num_tc; + + return 0; +} + +u8 _kc_netdev_get_prio_tc_map(struct net_device __maybe_unused *dev, u8 __maybe_unused up) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + return ixgbe_dcb_get_tc_from_up(&kc_adapter->dcb_cfg, 0, up); +} + +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ +#endif /* < 2.6.39 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size, unsigned int truesize) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; +} + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +int _kc_simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + + return 0; +} +#endif /* SLE_VERSION < 11,3,0 */ + +#endif /* < 3.4.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +static inline int __kc_pcie_cap_version(struct pci_dev *dev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return 0; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); + return reg16 & PCI_EXP_FLAGS_VERS; +} + +static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) +{ + return true; +} + +static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END; +} + +static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + int pos; + u16 pcie_flags_reg; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return false; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + pcie_flags_reg & PCI_EXP_FLAGS_SLOT); +} + +static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC; +} + +static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) +{ + if (!pci_is_pcie(dev)) + return false; + + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return __kc_pcie_cap_has_devctl(dev); + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return __kc_pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return __kc_pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return __kc_pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return __kc_pcie_cap_version(dev) > 1; + default: + return false; + } +} + +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; + } + + /* + * For Functions that do not implement the Slot Capabilities, + * Slot Status, and Slot Control registers, these spaces must + * be hardwired to 0b, with the exception of the Presence Detect + * State bit in the Slot Status register of Downstream Ports, + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) + */ + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) +{ + int ret; + + *val = 0; + if (pos & 3) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_dword() fails, it may + * have been written as 0xFFFFFFFF if hardware error happens + * during pci_read_config_dword(). + */ + if (ret) + *val = 0; + return ret; + } + + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + if (!__kc_pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); +} + +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) +{ + int ret; + u16 val; + + ret = __kc_pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = __kc_pcie_capability_write_word(dev, pos, val); + } + + return ret; +} + +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear) +{ + return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); +} +#endif /* < 3.7.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) +#ifdef CONFIG_XPS +#if NR_CPUS < 64 +#define _KC_MAX_XPS_CPUS NR_CPUS +#else +#define _KC_MAX_XPS_CPUS 64 +#endif + +/* + * netdev_queue sysfs structures and functions. + */ +struct _kc_netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, char *buf); + ssize_t (*store)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, const char *buf, size_t len); +}; + +#define to_kc_netdev_queue_attr(_attr) container_of(_attr, \ + struct _kc_netdev_queue_attribute, attr) + +int __kc_netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, + u16 index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, index); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) + /* Redhat requires some odd extended netdev structures */ + struct netdev_tx_queue_extended *txq_ext = + netdev_extended(dev)->_tx_ext + index; + struct kobj_type *ktype = txq_ext->kobj.ktype; +#else + struct kobj_type *ktype = txq->kobj.ktype; +#endif + struct _kc_netdev_queue_attribute *xps_attr; + struct attribute *attr = NULL; + int i, len, err; +#define _KC_XPS_BUFLEN (DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9) + char buf[_KC_XPS_BUFLEN]; + + if (!ktype) + return -ENOMEM; + + /* attempt to locate the XPS attribute in the Tx queue */ + for (i = 0; (attr = ktype->default_attrs[i]); i++) { + if (!strcmp("xps_cpus", attr->name)) + break; + } + + /* if we did not find it return an error */ + if (!attr) + return -EINVAL; + + /* copy the mask into a string */ + len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN, + cpumask_bits(mask), _KC_MAX_XPS_CPUS); + if (!len) + return -ENOMEM; + + xps_attr = to_kc_netdev_queue_attr(attr); + + /* Store the XPS value using the SYSFS store call */ + err = xps_attr->store(txq, xps_attr, buf, len); + + /* we only had an error on err < 0 */ + return (err < 0) ? err : 0; +} +#endif /* CONFIG_XPS */ +#ifdef HAVE_NETDEV_SELECT_QUEUE +static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb) +{ +#ifdef CONFIG_XPS + struct xps_dev_maps *dev_maps; + struct xps_map *map; + int queue_index = -1; + + rcu_read_lock(); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) + /* Redhat requires some odd extended netdev structures */ + dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps); +#else + dev_maps = rcu_dereference(dev->xps_maps); +#endif + if (dev_maps) { + map = rcu_dereference( + dev_maps->cpu_map[raw_smp_processor_id()]); + if (map) { + if (map->len == 1) + queue_index = map->queues[0]; + else { + u32 hash; + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else + hash = (__force u16) skb->protocol ^ + skb->rxhash; + hash = jhash_1word(hash, _kc_hashrnd); + queue_index = map->queues[ + ((u64)hash * map->len) >> 32]; + } + if (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index = -1; + } + } + rcu_read_unlock(); + + return queue_index; +#else + struct adapter_struct *kc_adapter = netdev_priv(dev); + int queue_index = -1; + + if (kc_adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + queue_index = skb_rx_queue_recorded(skb) ? + skb_get_rx_queue(skb) : + smp_processor_id(); + while (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index -= dev->real_num_tx_queues; + return queue_index; + } + + return -1; +#endif +} + +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + int queue_index = sk_tx_queue_get(sk); + int new_index; + + if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) { +#ifdef CONFIG_XPS + if (!skb->ooo_okay) +#endif + return queue_index; + } + + new_index = kc_get_xps_queue(dev, skb); + if (new_index < 0) + new_index = skb_tx_hash(dev, skb); + + if (queue_index != new_index && sk) { + struct dst_entry *dst = + rcu_dereference(sk->sk_dst_cache); + + if (dst && skb_dst(skb) == dst) + sk_tx_queue_set(sk, new_index); + + } + + return new_index; +} + +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 flags) +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_FDB_DEL_NLATTR +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr) +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr) +#endif +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (!(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + + return err; +} + +#endif /* HAVE_FDB_OPS */ +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) +{ + unsigned int vfs_assigned = 0; +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + int pos; + struct pci_dev *vfdev; + unsigned short dev_id; + + /* only search if we are a PF */ + if (!dev->is_physfn) + return 0; + + /* find SR-IOV capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* + * determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one + */ + pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* + * It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set + */ + if (vfdev->is_virtfn && (vfdev->physfn == dev) && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } + +#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ + return vfs_assigned; +} + +#endif /* CONFIG_PCI_IOV */ +#endif /* 3.10.0 */ + +static const unsigned char __maybe_unused pcie_link_speed[] = { + PCI_SPEED_UNKNOWN, /* 0 */ + PCIE_SPEED_2_5GT, /* 1 */ + PCIE_SPEED_5_0GT, /* 2 */ + PCIE_SPEED_8_0GT, /* 3 */ + PCIE_SPEED_16_0GT, /* 4 */ + PCI_SPEED_UNKNOWN, /* 5 */ + PCI_SPEED_UNKNOWN, /* 6 */ + PCI_SPEED_UNKNOWN, /* 7 */ + PCI_SPEED_UNKNOWN, /* 8 */ + PCI_SPEED_UNKNOWN, /* 9 */ + PCI_SPEED_UNKNOWN, /* A */ + PCI_SPEED_UNKNOWN, /* B */ + PCI_SPEED_UNKNOWN, /* C */ + PCI_SPEED_UNKNOWN, /* D */ + PCI_SPEED_UNKNOWN, /* E */ + PCI_SPEED_UNKNOWN /* F */ +}; + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while (dev) { + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + int ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + + if (ret) + return ret; + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + if (next_speed < *speed) + *speed = next_speed; + + if (next_width < *width) + *width = next_width; + + dev = dev->bus->self; + } + + return 0; +} + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev) +{ + int i; + u16 status; + + /* Wait for Transaction Pending bit clean */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + return 1; + } + + return 0; +} +#endif /* crs_timeout) { + printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " + "responding\n", pci_domain_nr(bus), + bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn)); + return false; + } + } + + return true; +} + +bool _kc_pci_device_is_present(struct pci_dev *pdev) +{ + u32 v; + + return _kc_pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); +} +#endif /* nexthdr; + bool found; + +#define __KC_IP6_FH_F_FRAG BIT(0) +#define __KC_IP6_FH_F_AUTH BIT(1) +#define __KC_IP6_FH_F_SKIP_RH BIT(2) + + if (fragoff) + *fragoff = 0; + + if (*offset) { + struct ipv6hdr _ip6, *ip6; + + ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); + if (!ip6 || (ip6->version != 6)) { + printk(KERN_ERR "IPv6 header not found\n"); + return -EBADMSG; + } + start = *offset + sizeof(struct ipv6hdr); + nexthdr = ip6->nexthdr; + } + + do { + struct ipv6_opt_hdr _hdr, *hp; + unsigned int hdrlen; + found = (nexthdr == target); + + if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { + if (target < 0 || found) + break; + return -ENOENT; + } + + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (!hp) + return -EBADMSG; + + if (nexthdr == NEXTHDR_ROUTING) { + struct ipv6_rt_hdr _rh, *rh; + + rh = skb_header_pointer(skb, start, sizeof(_rh), + &_rh); + if (!rh) + return -EBADMSG; + + if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && + rh->segments_left == 0) + found = false; + } + + if (nexthdr == NEXTHDR_FRAGMENT) { + unsigned short _frag_off; + __be16 *fp; + + if (flags) /* Indicate that this is a fragment */ + *flags |= __KC_IP6_FH_F_FRAG; + fp = skb_header_pointer(skb, + start+offsetof(struct frag_hdr, + frag_off), + sizeof(_frag_off), + &_frag_off); + if (!fp) + return -EBADMSG; + + _frag_off = ntohs(*fp) & ~0x7; + if (_frag_off) { + if (target < 0 && + ((!ipv6_ext_hdr(hp->nexthdr)) || + hp->nexthdr == NEXTHDR_NONE)) { + if (fragoff) + *fragoff = _frag_off; + return hp->nexthdr; + } + return -ENOENT; + } + hdrlen = 8; + } else if (nexthdr == NEXTHDR_AUTH) { + if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0)) + break; + hdrlen = (hp->hdrlen + 2) << 2; + } else + hdrlen = ipv6_optlen(hp); + + if (!found) { + nexthdr = hp->nexthdr; + start += hdrlen; + } + } while (!found); + + *offset = start; + return nexthdr; +} + +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif /* 3.14.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)) +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) +{ + size_t size; + char *buf; + + if (!s) + return NULL; + + size = strlen(s) + 1; + buf = devm_kzalloc(dev, size, gfp); + if (buf) + memcpy(buf, s, size); + return buf; +} + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + /* Set of random keys generated using kernel random number generator */ + static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, + 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, + 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, + 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, + 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, + 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, + 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; + + BUG_ON(len > NETDEV_RSS_KEY_LEN); + memcpy(buffer, seed, len); +} +#endif /* 3.15.0 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + int err; + + /* first go through and flush out any stale entries */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (!ha->synced || ha->refcount != 1) +#else + if (!ha->sync_cnt || ha->refcount != 1) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } + + /* go through and sync new entries to the list */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (ha->synced) +#else + if (ha->sync_cnt) +#endif + continue; + + err = sync(dev, ha->addr); + if (err) + return err; +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + ha->synced = true; +#else + ha->sync_cnt++; +#endif + ha->refcount++; + } + + return 0; +} + +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + if (!ha->synced) +#else + if (!ha->sync_cnt) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) + ha->synced = false; +#else + ha->sync_cnt--; +#endif + if (--ha->refcount) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } +} + +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da, **next = list; + int err; + + /* first go through and flush out any stale entries */ + while ((da = *next) != NULL) { + if (da->da_synced && da->da_users == 1) { + if (!unsync || !unsync(dev, da->da_addr)) { + *next = da->next; + kfree(da); + (*count)--; + continue; + } + } + next = &da->next; + } + + /* go through and sync new entries to the list */ + for (da = *list; da != NULL; da = da->next) { + if (da->da_synced) + continue; + + err = sync(dev, da->da_addr); + if (err) + return err; + + da->da_synced++; + da->da_users++; + } + + return 0; +} + +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da; + + while ((da = *list) != NULL) { + if (da->da_synced) { + if (!unsync || !unsync(dev, da->da_addr)) { + da->da_synced--; + if (--da->da_users == 0) { + *list = da->next; + kfree(da); + (*count)--; + continue; + } + } + } + list = &da->next; + } +} +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp) +{ + void *p; + + p = devm_kzalloc(dev, len, gfp); + if (p) + memcpy(p, src, len); + + return p; +} +#endif /* 3.16.0 */ + +/******************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#endif /* <3.17.0 && RHEL_RELEASE_CODE < RHEL7.5 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +static void __kc_sock_efree(struct sk_buff *skb) +{ + sock_put(skb->sk); +} + +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct sk_buff *clone; + + if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) + return NULL; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) { + sock_put(sk); + return NULL; + } + + clone->sk = sk; + clone->destructor = __kc_sock_efree; + + return clone; +} + +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps) +{ + struct sock_exterr_skb *serr; + struct sock *sk = skb->sk; + int err; + + sock_hold(sk); + + *skb_hwtstamps(skb) = *hwtstamps; + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + + err = sock_queue_err_skb(sk, skb); + if (err) + kfree_skb(skb); + + sock_put(sk); +} +#endif + +/* include headers needed for get_headlen function */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#include +#endif +#ifdef HAVE_SCTP +#include +#endif + +u32 __kc_eth_get_headlen(const struct net_device __always_unused *dev, + unsigned char *data, unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 proto; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + proto = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + +again: + switch (proto) { + /* handle any vlan tag if present */ + case __constant_htons(ETH_P_8021AD): + case __constant_htons(ETH_P_8021Q): + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + proto = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + goto again; + /* handle L3 protocols */ + case __constant_htons(ETH_P_IP): + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; + + hdr.network += hlen; + break; +#ifdef NETIF_F_TSO6 + case __constant_htons(ETH_P_IPV6): + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hdr.network += sizeof(struct ipv6hdr); + break; +#endif /* NETIF_F_TSO6 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) + case __constant_htons(ETH_P_FCOE): + hdr.network += FCOE_HEADER_LEN; + break; +#endif + default: + return hdr.network - data; + } + + /* finally sort out L4 */ + switch (nexthdr) { + case IPPROTO_TCP: + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hdr.network += max_t(u8, sizeof(struct tcphdr), + (hdr.network[12] & 0xF0) >> 2); + + break; + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + hdr.network += sizeof(struct udphdr); + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + hdr.network += sizeof(struct sctphdr); + break; +#endif + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + return min_t(unsigned int, hdr.network - data, max_len); +} + +#endif /* < 3.18.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +#ifdef HAVE_NET_GET_RANDOM_ONCE +static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(__kc_netdev_rss_key)); + net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); + memcpy(buffer, __kc_netdev_rss_key, len); +} +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, + int nmaskbits) +{ + ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2; + int n = 0; + + if (len > 1) { + n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) : + bitmap_scnprintf(buf, len, maskp, nmaskbits); + buf[n++] = '\n'; + buf[n] = '\0'; + } + return n; +} +#endif + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node) +{ + int cpu; + + /* Wrap: we always want a cpu. */ + i %= num_online_cpus(); + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) + /* Kernels prior to 2.6.28 do not have for_each_cpu or + * cpumask_of_node, so just use for_each_online_cpu() + */ + for_each_online_cpu(cpu) + if (i-- == 0) + return cpu; + + return 0; +#else + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) + if (i-- == 0) + return cpu; + } else { + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + if (i-- == 0) + return cpu; + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + + if (i-- == 0) + return cpu; + } + } +#endif /* KERNEL_VERSION >= 2.6.28 */ + BUG(); +} +#endif +#endif + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +/** + * _kc_skb_flow_dissect_flow_keys - parse SKB to fill _kc_flow_keys + * @skb: SKB used to fille _kc_flow_keys + * @flow: _kc_flow_keys to set with SKB fields + * @flags: currently unused flags + * + * The purpose of using kcompat for this function is so the caller doesn't have + * to care about which kernel version they are on, which prevents a larger than + * normal #ifdef mess created by using a HAVE_* flag for this case. This is also + * done for 4.2 kernels to simplify calling skb_flow_dissect_flow_keys() + * because in 4.2 kernels skb_flow_dissect_flow_keys() exists, but only has 2 + * arguments. Recent kernels have skb_flow_dissect_flow_keys() that has 3 + * arguments. + * + * The caller needs to understand that this function was only implemented as a + * bare-minimum replacement for recent versions of skb_flow_dissect_flow_keys() + * and this function is in no way similar to skb_flow_dissect_flow_keys(). An + * example use can be found in the ice driver, specifically ice_arfs.c. + * + * This function is treated as a allowlist of supported fields the SKB can + * parse. If new functionality is added make sure to keep this format (i.e. only + * check for fields that are explicity wanted). + * + * Current allowlist: + * + * TCPv4, TCPv6, UDPv4, UDPv6 + * + * If any unexpected protocol or other field is found this function memsets the + * flow passed in back to 0 and returns false. Otherwise the flow is populated + * and returns true. + */ +bool +_kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct _kc_flow_keys *flow, + unsigned int __always_unused flags) +{ + memset(flow, 0, sizeof(*flow)); + + flow->basic.n_proto = skb->protocol; + switch (flow->basic.n_proto) { + case htons(ETH_P_IP): + flow->basic.ip_proto = ip_hdr(skb)->protocol; + flow->addrs.v4addrs.src = ip_hdr(skb)->saddr; + flow->addrs.v4addrs.dst = ip_hdr(skb)->daddr; + break; + case htons(ETH_P_IPV6): + flow->basic.ip_proto = ipv6_hdr(skb)->nexthdr; + memcpy(&flow->addrs.v6addrs.src, &ipv6_hdr(skb)->saddr, + sizeof(struct in6_addr)); + memcpy(&flow->addrs.v6addrs.dst, &ipv6_hdr(skb)->daddr, + sizeof(struct in6_addr)); + break; + default: + netdev_dbg(skb->dev, "%s: Unsupported/unimplemented layer 3 protocol %04x\n", __func__, htons(flow->basic.n_proto)); + goto unsupported; + } + + switch (flow->basic.ip_proto) { + case IPPROTO_TCP: + { + struct tcphdr *tcph; + + tcph = tcp_hdr(skb); + flow->ports.src = tcph->source; + flow->ports.dst = tcph->dest; + break; + } + case IPPROTO_UDP: + { + struct udphdr *udph; + + udph = udp_hdr(skb); + flow->ports.src = udph->source; + flow->ports.dst = udph->dest; + break; + } + default: + netdev_dbg(skb->dev, "%s: Unsupported/unimplemented layer 4 protocol %02x\n", __func__, flow->basic.ip_proto); + return false; + } + + return true; + +unsupported: + memset(flow, 0, sizeof(*flow)); + return false; +} +#endif /* ! >= RHEL7.4 && ! >= SLES12.2 */ +#endif /* 4.3.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) ) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#ifdef CONFIG_SPARC +#include +#include +#endif +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused) +{ +#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)) && defined(CONFIG_OF) && \ + !defined(HAVE_STRUCT_DEVICE_OF_NODE) || !defined(CONFIG_OF)) && \ + !defined(CONFIG_SPARC)) + return -ENODEV; +#else + const unsigned char *addr; + struct device_node *dp; + + if (dev_is_pci(dev)) + dp = pci_device_to_OF_node(to_pci_dev(dev)); + else +#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF) + dp = dev->of_node; +#else + dp = NULL; +#endif + + addr = NULL; + if (dp) + addr = of_get_mac_address(dp); +#ifdef CONFIG_SPARC + /* Kernel hasn't implemented arch_get_platform_mac_address, but we + * should handle the SPARC case here since it was supported + * originally. This is replaced by arch_get_platform_mac_address() + * upstream. + */ + if (!addr) + addr = idprom->id_ethaddr; +#endif + if (!addr) + return -ENODEV; + + ether_addr_copy(mac_addr, addr); + return 0; +#endif +} +#endif /* !(RHEL_RELEASE >= 7.3) */ +#endif /* < 4.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) +int _kc_kstrtobool(const char *s, bool *res) +{ + if (!s) + return -EINVAL; + + switch (s[0]) { + case 'y': + case 'Y': + case '1': + *res = true; + return 0; + case 'n': + case 'N': + case '0': + *res = false; + return 0; + case 'o': + case 'O': + switch (s[1]) { + case 'n': + case 'N': + *res = true; + return 0; + case 'f': + case 'F': + *res = false; + return 0; + default: + break; + } + break; + default: + break; + } + + return -EINVAL; +} +#endif /* < 4.6.0 */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +const char *_kc_phy_speed_to_str(int speed) +{ + switch (speed) { + case SPEED_10: + return "10Mbps"; + case SPEED_100: + return "100Mbps"; + case SPEED_1000: + return "1Gbps"; + case SPEED_2500: + return "2.5Gbps"; + case SPEED_5000: + return "5Gbps"; + case SPEED_10000: + return "10Gbps"; + case SPEED_14000: + return "14Gbps"; + case SPEED_20000: + return "20Gbps"; + case SPEED_25000: + return "25Gbps"; + case SPEED_40000: + return "40Gbps"; + case SPEED_50000: + return "50Gbps"; + case SPEED_56000: + return "56Gbps"; +#ifdef SPEED_100000 + case SPEED_100000: + return "100Gbps"; +#endif +#ifdef SPEED_200000 + case SPEED_200000: + return "200Gbps"; +#endif + case SPEED_UNKNOWN: + return "Unknown"; + default: + return "Unsupported (update phy-core.c)"; + } +} +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) ) +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + unsigned int idx = 0; + + for (; idx < size; idx++) { + dst->link_modes.supported[idx] &= + src->link_modes.supported[idx]; + dst->link_modes.advertising[idx] &= + src->link_modes.advertising[idx]; + } +} +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0) || \ + SLE_VERSION_CODE >= SLE_VERSION(15,1,0)) +#if BITS_PER_LONG == 64 +/** + * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap + * @bitmap: array of unsigned longs, the destination bitmap + * @buf: array of u32 (in host byte order), the source bitmap + * @nbits: number of bits in @bitmap + */ +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits) +{ + unsigned int i, halfwords; + + halfwords = DIV_ROUND_UP(nbits, 32); + for (i = 0; i < halfwords; i++) { + bitmap[i/2] = (unsigned long) buf[i]; + if (++i < halfwords) + bitmap[i/2] |= ((unsigned long) buf[i]) << 32; + } + + /* Clear tail bits in last word beyond nbits. */ + if (nbits % BITS_PER_LONG) + bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits); +} +#endif /* BITS_PER_LONG == 64 */ +#endif /* !(RHEL >= 8.0) && !(SLES >= 12.5 && SLES < 15.0 || SLES >= 15.1) */ +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +/* PCIe link information */ +#define PCIE_SPEED2STR(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \ + (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \ + (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \ + (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \ + "Unknown speed") + +/* PCIe speed to Mb/s reduced by encoding overhead */ +#define PCIE_SPEED2MBS_ENC(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \ + (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \ + (speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \ + (speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \ + 0) + +static u32 +_kc_pcie_bandwidth_available(struct pci_dev *dev, + struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + u32 bw, next_bw; + + if (speed) + *speed = PCI_SPEED_UNKNOWN; + if (width) + *width = PCIE_LNK_WIDTH_UNKNOWN; + + bw = 0; + + while (dev) { + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); + + /* Check if current device limits the total bandwidth */ + if (!bw || next_bw <= bw) { + bw = next_bw; + + if (limiting_dev) + *limiting_dev = dev; + if (speed) + *speed = next_speed; + if (width) + *width = next_width; + } + + dev = pci_upstream_bridge(dev); + } + + return bw; +} + +static enum pci_bus_speed _kc_pcie_get_speed_cap(struct pci_dev *dev) +{ + u32 lnkcap2, lnkcap; + + /* + * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link + * Speeds Vector in Link Capabilities 2 when supported, falling + * back to Max Link Speed in Link Capabilities otherwise. + */ + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); + if (lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + return PCI_SPEED_UNKNOWN; + } + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) { + if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + } + + return PCI_SPEED_UNKNOWN; +} + +static enum pcie_link_width _kc_pcie_get_width_cap(struct pci_dev *dev) +{ + u32 lnkcap; + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) + return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; + + return PCIE_LNK_WIDTH_UNKNOWN; +} + +static u32 +_kc_pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + *speed = _kc_pcie_get_speed_cap(dev); + *width = _kc_pcie_get_width_cap(dev); + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) + return 0; + + return *width * PCIE_SPEED2MBS_ENC(*speed); +} + +void _kc_pcie_print_link_status(struct pci_dev *dev) { + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + struct pci_dev *limiting_dev = NULL; + u32 bw_avail, bw_cap; + + bw_cap = _kc_pcie_bandwidth_capable(dev, &speed_cap, &width_cap); + bw_avail = _kc_pcie_bandwidth_available(dev, &limiting_dev, &speed, + &width); + + if (bw_avail >= bw_cap) + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); + else + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", + bw_avail / 1000, bw_avail % 1000, + PCIE_SPEED2STR(speed), width, + limiting_dev ? pci_name(limiting_dev) : "", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); +} +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head __always_unused *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only) +{ + if (ingress_only && + f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + /* Note: Upstream has driver_block_list, but older kernels do not */ + switch (f->command) { + case TC_BLOCK_BIND: +#ifdef HAVE_TCF_BLOCK_CB_REGISTER_EXTACK + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, + f->extack); +#else + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv); +#endif + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, cb_ident); + return 0; + default: + return -EOPNOTSUPP; + } +} +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* !RHEL >= 8.2 */ +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev) +{ + u32 dword; + u64 dsn; + int pos; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); + if (!pos) + return 0; + + /* + * The Device Serial Number is two dwords offset 4 bytes from the + * capability position. The specification says that the first dword is + * the lower half, and the second dword is the upper half. + */ + pos += 4; + pci_read_config_dword(dev, pos, &dword); + dsn = (u64)dword; + pci_read_config_dword(dev, pos + 4, &dword); + dsn |= ((u64)dword) << 32; + + return dsn; +} +#endif /* 5.7.0 */ + +#ifdef NEED_DEVM_KASPRINTF +char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, + va_list ap) +{ + unsigned int len; + char *p; + va_list aq; + + va_copy(aq, ap); + len = vsnprintf(NULL, 0, fmt, aq); + va_end(aq); + + p = devm_kmalloc(dev, len + 1, gfp); + if (!p) + return NULL; + + vsnprintf(p, len + 1, fmt, ap); + + return p; +} + +char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) +{ + va_list ap; + char *p; + + va_start(ap, fmt); + p = devm_kvasprintf(dev, gfp, fmt, ap); + va_end(ap); + + return p; +} +#endif /* NEED_DEVM_KASPRINTF */ + +#ifdef NEED_PCI_IOV_VF_ID +#ifdef CONFIG_PCI_IOV +/* + * Below function needs to access pci_sriov offset and stride. Since + * pci_sriov structure is defined in drivers/pci/pci.h which can not + * be included as linux kernel header file, the structure definition + * is not globally visible. + * As a result, one copy of structure definition is added. Since the + * definition is a copy, you need to make sure the kernel you want + * to backport must have exactly the same pci_sriov definition as the + * copy, otherwise you'll access wrong field offset and value. + */ + +/* Single Root I/O Virtualization */ +struct pci_sriov { + int pos; /* Capability position */ + int nres; /* Number of resources */ + u32 cap; /* SR-IOV Capabilities */ + u16 ctrl; /* SR-IOV Control */ + u16 total_VFs; /* Total VFs associated with the PF */ + u16 initial_VFs; /* Initial VFs associated with the PF */ + u16 num_VFs; /* Number of VFs available */ + u16 offset; /* First VF Routing ID offset */ + u16 stride; /* Following VF stride */ + u16 vf_device; /* VF device ID */ + u32 pgsz; /* Page size for BAR alignment */ + u8 link; /* Function Dependency Link */ + u8 max_VF_buses; /* Max buses consumed by VFs */ + u16 driver_max_VFs; /* Max num VFs driver supports */ + struct pci_dev *dev; /* Lowest numbered PF */ + struct pci_dev *self; /* This PF */ + u32 cfg_size; /* VF config space size */ + u32 class; /* VF device */ + u8 hdr_type; /* VF header type */ + u16 subsystem_vendor; /* VF subsystem vendor */ + u16 subsystem_device; /* VF subsystem device */ + resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ + bool drivers_autoprobe; /* Auto probing of VFs by driver */ +}; + +int _kc_pci_iov_vf_id(struct pci_dev *dev) +{ + struct pci_dev *pf; + + if (!dev->is_virtfn) + return -EINVAL; + + pf = pci_physfn(dev); + return (((dev->bus->number << 8) + dev->devfn) - + ((pf->bus->number << 8) + pf->devfn + pf->sriov->offset)) / + pf->sriov->stride; +} +#endif /* CONFIG_PCI_IOV */ +#endif /* NEED_PCI_IOV_VF_ID */ + +#ifdef NEED_MUL_U64_U64_DIV_U64 +u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) +{ + u64 res = 0, div, rem; + int shift; + + /* can a * b overflow ? */ + if (ilog2(a) + ilog2(b) > 62) { + /* + * (b * a) / c is equal to + * + * (b / c) * a + + * (b % c) * a / c + * + * if nothing overflows. Can the 1st multiplication + * overflow? Yes, but we do not care: this can only + * happen if the end result can't fit in u64 anyway. + * + * So the code below does + * + * res = (b / c) * a; + * b = b % c; + */ + div = div64_u64_rem(b, c, &rem); + res = div * a; + b = rem; + + shift = ilog2(a) + ilog2(b) - 62; + if (shift > 0) { + /* drop precision */ + b >>= shift; + c >>= shift; + if (!c) + return res; + } + } + + return res + div64_u64(a * b, c); +} +#endif /* NEED_MUL_U64_U64_DIV_U64 */ diff --git a/drivers/net/ethernet/guangruntong/kcompat.h b/drivers/net/ethernet/guangruntong/kcompat.h new file mode 100755 index 0000000000000..ae377eccf08c7 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat.h @@ -0,0 +1,7193 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2023 Intel Corporation */ + +#ifndef _KCOMPAT_H_ +#define _KCOMPAT_H_ + +#include "kcompat_gcc.h" +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef GCC_VERSION +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) +#endif /* GCC_VERSION */ + +#ifndef IEEE_8021QAZ_APP_SEL_DSCP +#define IEEE_8021QAZ_APP_SEL_DSCP 5 +#endif + +/* Backport macros for controlling GCC diagnostics */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0) ) + +/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */ +#if GCC_VERSION >= 40600 +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) +#else +#define __diag(s) +#endif /* GCC_VERSION >= 4.6 */ +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) +#endif /* LINUX_VERSION < 4.18.0 */ + +#ifndef NSEC_PER_MSEC +#define NSEC_PER_MSEC 1000000L +#endif +#include +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#include +#else +#include +#endif +#endif + +/* NAPI enable/disable flags here */ +#define NAPI + +#define adapter_struct ixgbe_adapter +#define adapter_q_vector ixgbe_q_vector + +/* and finally set defines so that the code sees the changes */ +#ifdef NAPI +#else +#endif /* NAPI */ + +/* Dynamic LTR and deeper C-State support disable/enable */ + +/* packet split disable/enable */ +#ifdef DISABLE_PACKET_SPLIT +#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT +#define CONFIG_IXGBE_DISABLE_PACKET_SPLIT +#endif +#endif /* DISABLE_PACKET_SPLIT */ + +/* MSI compatibility code for all kernels and drivers */ +#ifdef DISABLE_PCI_MSI +#undef CONFIG_PCI_MSI +#endif +#ifndef CONFIG_PCI_MSI +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +struct msix_entry { + u16 vector; /* kernel uses to write allocated vector */ + u16 entry; /* driver uses to specify entry, OS writes */ +}; +#endif +#undef pci_enable_msi +#define pci_enable_msi(a) -ENOTSUPP +#undef pci_disable_msi +#define pci_disable_msi(a) do {} while (0) +#undef pci_enable_msix +#define pci_enable_msix(a, b, c) -ENOTSUPP +#undef pci_disable_msix +#define pci_disable_msix(a) do {} while (0) +#define msi_remove_pci_irq_vectors(a) do {} while (0) +#endif /* CONFIG_PCI_MSI */ +#ifdef DISABLE_PM +#undef CONFIG_PM +#endif + +#ifdef DISABLE_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef PMSG_SUSPEND +#define PMSG_SUSPEND 3 +#endif + +/* generic boolean compatibility */ +#undef TRUE +#undef FALSE +#define TRUE true +#define FALSE false +#ifdef GCC_VERSION +#if ( GCC_VERSION < 3000 ) +#define _Bool char +#endif +#else +#define _Bool char +#endif + +#ifndef BIT +#define BIT(nr) (1UL << (nr)) +#endif + +#undef __always_unused +#define __always_unused __attribute__((__unused__)) + +#undef __maybe_unused +#define __maybe_unused __attribute__((__unused__)) + +/* kernels less than 2.4.14 don't have this */ +#ifndef ETH_P_8021Q +#define ETH_P_8021Q 0x8100 +#endif + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +#ifndef uninitialized_var +#define uninitialized_var(x) x = x +#endif + +#ifndef PCIE_LINK_STATE_L0S +#define PCIE_LINK_STATE_L0S 1 +#endif +#ifndef PCIE_LINK_STATE_L1 +#define PCIE_LINK_STATE_L1 2 +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#define free_netdev(x) kfree(x) +#endif + +#ifdef HAVE_POLL_CONTROLLER +#define CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef SKB_DATAREF_SHIFT +/* if we do not have the infrastructure to detect if skb_header is cloned + just return false in all cases */ +#define skb_header_cloned(x) 0 +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef NETIF_F_GRO +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ + vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef NETIF_F_LRO +#define NETIF_F_LRO BIT(15) +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE BIT(27) +#endif + +#ifndef NETIF_F_ALL_FCOE +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ + NETIF_F_FSO) +#endif + +#ifndef IPPROTO_SCTP +#define IPPROTO_SCTP 132 +#endif + +#ifndef IPPROTO_UDPLITE +#define IPPROTO_UDPLITE 136 +#endif + +#ifndef CHECKSUM_PARTIAL +#define CHECKSUM_PARTIAL CHECKSUM_HW +#define CHECKSUM_COMPLETE CHECKSUM_HW +#endif + +#ifndef __read_mostly +#define __read_mostly +#endif + +#ifndef MII_RESV1 +#define MII_RESV1 0x17 /* Reserved... */ +#endif + +#ifndef unlikely +#define unlikely(_x) _x +#define likely(_x) _x +#endif + +#ifndef WARN_ON +#define WARN_ON(x) ({0;}) +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +#ifndef node_online +#define node_online(node) ((node) == 0) +#endif + +#ifndef _LINUX_RANDOM_H +#include +#endif + +#ifndef BITS_PER_TYPE +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#endif + +#ifndef BITS_TO_LONGS +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +#endif + +#ifndef DECLARE_BITMAP +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] +#endif + +#ifndef VLAN_HLEN +#define VLAN_HLEN 4 +#endif + +#ifndef VLAN_ETH_HLEN +#define VLAN_ETH_HLEN 18 +#endif + +#ifndef VLAN_ETH_FRAME_LEN +#define VLAN_ETH_FRAME_LEN 1518 +#endif + +#ifndef DCA_GET_TAG_TWO_ARGS +#define dca3_get_tag(a,b) dca_get_tag(b) +#endif + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#if defined(__i386__) || defined(__x86_64__) +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#endif +#endif + +/* taken from 2.6.24 definition in linux/kernel.h */ +#ifndef IS_ALIGNED +#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) +#endif + +#ifdef IS_ENABLED +#undef IS_ENABLED +#undef __ARG_PLACEHOLDER_1 +#undef config_enabled +#undef _config_enabled +#undef __config_enabled +#undef ___config_enabled +#endif + +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#ifdef __CHECKER__ +/* cppcheck-suppress preprocessorErrorDirective */ +#endif /* __CHECKER__ */ +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) +struct _kc_vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_ethhdr _kc_vlan_ethhdr +struct _kc_vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_hdr _kc_vlan_hdr +#define vlan_tx_tag_present(_skb) 0 +#define vlan_tx_tag_get(_skb) 0 +#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_2_5GB +#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_5_0GB +#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_8_0GB +#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X1 +#define PCI_EXP_LNKSTA_NLW_X1 0x0010 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X2 +#define PCI_EXP_LNKSTA_NLW_X2 0x0020 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X4 +#define PCI_EXP_LNKSTA_NLW_X4 0x0040 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X8 +#define PCI_EXP_LNKSTA_NLW_X8 0x0080 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +#ifndef IP_OFFSET +#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ +#endif + +/*****************************************************************************/ +/* Installations with ethtool version without eeprom, adapter id, or statistics + * support */ + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#endif /* ETHTOOL_PHYS_ID */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = BIT(0), + ETH_TEST_FL_FAILED = BIT(1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GWOL +#define ETHTOOL_GWOL 0x5 +#define ETHTOOL_SWOL 0x6 +#define SOPASS_MAX 6 +struct ethtool_wolinfo { + u32 cmd; + u32 supported; + u32 wolopts; + u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; +#endif /* ETHTOOL_GWOL */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +#ifndef WAKE_FILTER +#define WAKE_FILTER BIT(7) +#endif + +#ifndef SPEED_2500 +#define SPEED_2500 2500 +#endif +#ifndef SPEED_5000 +#define SPEED_5000 5000 +#endif +#ifndef SPEED_14000 +#define SPEED_14000 14000 +#endif +#ifndef SPEED_25000 +#define SPEED_25000 25000 +#endif +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif +#ifndef SPEED_56000 +#define SPEED_56000 56000 +#endif +#ifndef SPEED_100000 +#define SPEED_100000 100000 +#endif +#ifndef SPEED_200000 +#define SPEED_200000 200000 +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif + +#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ +#define RHEL_RELEASE_CODE 0 +#endif + +/* RHEL 7 didn't backport the parameter change in + * create_singlethread_workqueue. + * If/when RH corrects this we will want to tighten up the version check. + */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) +#undef create_singlethread_workqueue +#define create_singlethread_workqueue(name) \ + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) +#endif + +/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find + * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new + * enough versions of Ubuntu. Otherwise you can simply see it in the output of + * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in + * the linux-source package, but in the linux-headers package. It begins to + * appear in later releases of 14.04 and 14.10. + * + * Ex: + * + * $uname -r + * 3.13.0-45-generic + * ABI is 45 + * + * + * $uname -r + * 3.16.0-23-generic + * ABI is 23 + */ +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else +/* Ubuntu does not provide actual release version macro, so we use the kernel + * version plus the ABI to generate a unique version code specific to Ubuntu. + * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to + * ignore differences in sublevel which are not important since we have the + * ABI value. Otherwise, it becomes impossible to correlate ABI to version for + * ordering checks. + * + * This also lets us store an ABI value up to 65535, since it can take the + * space that would use the lower byte of the Linux version code. + */ +#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ + UTS_UBUNTU_RELEASE_ABI) + +#if UTS_UBUNTU_RELEASE_ABI > 65535 +#error UTS_UBUNTU_RELEASE_ABI is larger than 65535... +#endif /* UTS_UBUNTU_RELEASE_ABI > 65535 */ + +#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) +/* Our version code scheme does not make sense for non 3.x or newer kernels, + * and we have no support in kcompat for this scenario. Thus, treat this as a + * non-Ubuntu kernel. Possibly might be better to error here. + */ +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#endif /* <= 3.0.0 */ +#endif /* !UTS_UBUNTU_RELEASE_ABI */ + +/* We ignore the 3rd digit since we want to give precedence to the additional + * ABI value provided by Ubuntu. + */ +#define UBUNTU_VERSION(a,b,c,d) (((a) << 24) + ((b) << 16) + (d)) + +/* SLE_VERSION is used to generate a 3-digit encoding that can order SLE + * kernels based on their major release, service pack, and a possible + * maintenance release. + */ +#define SLE_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) + +/* The SLE_LOCALVERSION_CODE comes from a 3-digit code added as part of the + * Linux kernel version. It is extracted by the driver Makefile. This macro is + * used to generate codes for making comparisons below. + */ +#define SLE_LOCALVERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) + +#ifdef CONFIG_SUSE_KERNEL +/* Starting since at least SLE 12sp4 and SLE 15, the SUSE kernels have + * provided CONFIG_SUSE_VERSION, CONFIG_SUSE_PATCHLEVEL and + * CONFIG_SUSE_AUXRELEASE. Use these to generate SLE_VERSION if available. + * Only fall back to the manual table otherwise. We expect all future versions + * of SLE kernels to include these values, so the table will remain only for + * the older releases. + */ +#ifdef CONFIG_SUSE_VERSION +#ifndef CONFIG_SUSE_PATCHLEVEL +#error "CONFIG_SUSE_VERSION exists but CONFIG_SUSE_PATCHLEVEL is missing" +#endif +#ifndef CONFIG_SUSE_AUXRELEASE +#error "CONFIG_SUSE_VERSION exists but CONFIG_SUSE_AUXRELEASE is missing" +#endif +#define SLE_VERSION_CODE SLE_VERSION(CONFIG_SUSE_VERSION, CONFIG_SUSE_PATCHLEVEL, CONFIG_SUSE_AUXRELEASE) +#else +/* If we do not have the CONFIG_SUSE_VERSION configuration values, fall back + * to the following table for older releases. + */ +#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) +/* SLES11 GA is 2.6.27 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,0,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) +/* SLES11 SP1 is 2.6.32 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,1,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) +/* SLES11 SP2 GA is 3.0.13-0.27 */ +#define SLE_VERSION_CODE SLE_VERSION(11,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) +/* SLES11 SP3 GA is 3.0.76-0.11 */ +#define SLE_VERSION_CODE SLE_VERSION(11,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)) + #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0)) + /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */ + #define SLE_VERSION_CODE SLE_VERSION(11,2,0) + #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0)) + /* most SLES11sp3 update kernels */ + #define SLE_VERSION_CODE SLE_VERSION(11,3,0) + #else + /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ + #define SLE_VERSION_CODE SLE_VERSION(11,4,0) + #endif +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)) +/* SLES12 GA is 3.12.28-4 + * kernel updates 3.12.xx-<33 through 52>[.yy] */ +#define SLE_VERSION_CODE SLE_VERSION(12,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49)) +/* SLES12 SP1 GA is 3.12.49-11 + * updates 3.12.xx-60.yy where xx={51..} */ +#define SLE_VERSION_CODE SLE_VERSION(12,1,0) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,21) && \ + (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,59))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,74) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(92,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(93,0,0))) +/* SLES12 SP2 GA is 4.4.21-69. + * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59} + * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120} + * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(4,4,73) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,82) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,92)) || \ + (LINUX_VERSION_CODE == KERNEL_VERSION(4,4,103) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,33,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,38,0))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,114) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(94,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(95,0,0)) ) +/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3. + * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92} + * SLES12 SP3 updates conflicting with SP2 are: + * - 4.4.103-6.33.1, 4.4.103-6.38.1 + * - 4.4.{114,120}-94.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,3,0) +#else +#error "This looks like a SUSE kernel, but it has an unrecognized local version code." +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ +#endif /* !CONFIG_SUSE_VERSION */ +#endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 +#endif /* SLE_VERSION_CODE */ +#ifndef SLE_LOCALVERSION_CODE +#define SLE_LOCALVERSION_CODE 0 +#endif /* SLE_LOCALVERSION_CODE */ + +/* Include definitions from the new kcompat layout */ +#include "kcompat_defs.h" + +/* + * ADQ depends on __TC_MQPRIO_MODE_MAX and related kernel code + * added around 4.15. Some distributions (e.g. Oracle Linux 7.7) + * have done a partial back-port of that to their kernels based + * on older mainline kernels that did not include all the necessary + * kernel enablement to support ADQ. + * Undefine __TC_MQPRIO_MODE_MAX for all OSV distributions with + * kernels based on mainline kernels older than 4.15 except for + * RHEL, SLES and Ubuntu which are known to have good back-ports. + */ +#if (!RHEL_RELEASE_CODE && !SLE_VERSION_CODE && !UBUNTU_VERSION_CODE) + #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) + #undef __TC_MQPRIO_MODE_MAX + #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(4,15,0) */ +#endif /* if (NOT RHEL && NOT SLES && NOT UBUNTU) */ + + +#ifdef __KLOCWORK__ +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define memcpy(dest, src, len) memcpy_s(dest, len, src, len) +#define memset(dest, ch, len) memset_s(dest, len, ch, len) + +static inline int _kc_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr) + +static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr) + +#ifdef CONFIG_DYNAMIC_DEBUG +#undef dev_dbg +#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg) +#undef pr_debug +#define pr_debug(format, arg...) printk(KERN_DEBUG format, ##arg) +#endif /* CONFIG_DYNAMIC_DEBUG */ + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (n = NULL, pos = hlist_entry_safe((head)->first, typeof(*(pos)), \ + member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#ifdef uninitialized_var +#undef uninitialized_var +#define uninitialized_var(x) x = *(&(x)) +#endif + +#ifdef WRITE_ONCE +#undef WRITE_ONCE +#define WRITE_ONCE(x, val) ((x) = (val)) +#endif /* WRITE_ONCE */ + +#ifdef wait_event_interruptible_timeout +#undef wait_event_interruptible_timeout +#define wait_event_interruptible_timeout(wq_head, condition, timeout) ({ \ + long ret; \ + if ((condition)) \ + ret = timeout; \ + else \ + ret = 0; \ + ret; \ +}) +#endif /* wait_event_interruptible_timeout */ + +#ifdef max_t +#undef max_t +#define max_t(type, x, y) ({ \ +type __x = (x); \ +type __y = (y); \ +__x > __y ? __x : __y; \ +}) +#endif /* max_t */ + +#ifdef min_t +#undef min_t +#define min_t(type, x, y) ({ \ +type __x = (x); \ +type __y = (y); \ +__x < __y ? __x : __y; \ +}) +#endif /* min_t */ +#endif /* __KLOCWORK__ */ + +/* Older versions of GCC will trigger -Wformat-nonliteral warnings for const + * char * strings. Unfortunately, the implementation of do_trace_printk does + * this, in order to add a storage attribute to the memory. This was fixed in + * GCC 5.1, but we still use older distributions built with GCC 4.x. + * + * The string pointer is only passed as a const char * to the __trace_bprintk + * function. Since that function has the __printf attribute, it will trigger + * the warnings. We can't remove the attribute, so instead we'll use the + * __diag macro to disable -Wformat-nonliteral around the call to + * __trace_bprintk. + */ +#if GCC_VERSION < 50100 +#define __trace_bprintk(ip, fmt, args...) ({ \ + int err; \ + __diag_push(); \ + __diag(ignored "-Wformat-nonliteral"); \ + err = __trace_bprintk(ip, fmt, ##args); \ + __diag_pop(); \ + err; \ +}) +#endif /* GCC_VERSION < 5.1.0 */ + +/* Newer kernels removed */ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) && \ + (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3)) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0))))) +#define HAVE_PCI_ASPM_H +#endif + +/*****************************************************************************/ +/* 2.4.3 => 2.4.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +#ifndef pci_set_dma_mask +#define pci_set_dma_mask _kc_pci_set_dma_mask +int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); +#endif + +#ifndef pci_request_regions +#define pci_request_regions _kc_pci_request_regions +int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); +#endif + +#ifndef pci_release_regions +#define pci_release_regions _kc_pci_release_regions +void _kc_pci_release_regions(struct pci_dev *pdev); +#endif + +/**************************************/ +/* NETWORK DRIVER API */ + +#ifndef alloc_etherdev +#define alloc_etherdev _kc_alloc_etherdev +struct net_device * _kc_alloc_etherdev(int sizeof_priv); +#endif + +#ifndef is_valid_ether_addr +#define is_valid_ether_addr _kc_is_valid_ether_addr +int _kc_is_valid_ether_addr(u8 *addr); +#endif + +/**************************************/ +/* MISCELLANEOUS */ + +#ifndef INIT_TQUEUE +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) +#endif + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +/* Basic mode control register. */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +/* Basic mode status register. */ +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ +/* Advertisement control register. */ +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL) +/* Expansion register for auto-negotiation. */ +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ +#endif + +/*****************************************************************************/ +/* 2.4.6 => 2.4.3 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +#ifndef pci_set_power_state +#define pci_set_power_state _kc_pci_set_power_state +int _kc_pci_set_power_state(struct pci_dev *dev, int state); +#endif + +#ifndef pci_enable_wake +#define pci_enable_wake _kc_pci_enable_wake +int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); +#endif + +#ifndef pci_disable_device +#define pci_disable_device _kc_pci_disable_device +void _kc_pci_disable_device(struct pci_dev *pdev); +#endif + +/* PCI PM entry point syntax changed, so don't support suspend/resume */ +#undef CONFIG_PM + +#endif /* 2.4.6 => 2.4.3 */ + +#ifndef HAVE_PCI_SET_MWI +#define pci_set_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ + PCI_COMMAND_INVALIDATE); +#define pci_clear_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ + ~PCI_COMMAND_INVALIDATE); +#endif + +/*****************************************************************************/ +/* 2.4.10 => 2.4.9 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) + +/**************************************/ +/* MODULE API */ + +#ifndef MODULE_LICENSE + #define MODULE_LICENSE(X) +#endif + +/**************************************/ +/* OTHER */ + +#undef min +#define min(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x < _y ? _x : _y; }) + +#undef max +#define max(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x > _y ? _x : _y; }) + +#define min_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x < _y ? _x : _y; }) + +#define max_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x > _y ? _x : _y; }) + +#ifndef list_for_each_safe +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); +#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) +#else /* 2.4.8 => 2.4.9 */ +int snprintf(char * buf, size_t size, const char *fmt, ...); +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#endif +#endif /* 2.4.10 -> 2.4.6 */ + + +/*****************************************************************************/ +/* 2.4.12 => 2.4.10 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) +#ifndef HAVE_NETIF_MSG +#define HAVE_NETIF_MSG 1 +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#endif /* !HAVE_NETIF_MSG */ +#endif /* 2.4.12 => 2.4.10 */ + +/*****************************************************************************/ +/* 2.4.13 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#ifndef virt_to_page + #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) +#endif + +#ifndef pci_map_page +#define pci_map_page _kc_pci_map_page +u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); +#endif + +#ifndef pci_unmap_page +#define pci_unmap_page _kc_pci_unmap_page +void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); +#endif + +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ + +#undef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0xffffffff +#undef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffff + +/**************************************/ +/* OTHER */ + +#ifndef cpu_relax +#define cpu_relax() rep_nop() +#endif + +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_vlan_proto; + unsigned short h_vlan_TCI; + unsigned short h_vlan_encapsulated_proto; +}; +#endif /* 2.4.13 => 2.4.12 */ + +/*****************************************************************************/ +/* 2.4.17 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) + +#ifndef __devexit_p + #define __devexit_p(x) &(x) +#endif + +#endif /* 2.4.17 => 2.4.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) +#define NETIF_MSG_HW 0x2000 +#define NETIF_MSG_WOL 0x4000 + +#ifndef netif_msg_hw +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#endif +#ifndef netif_msg_wol +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) +#endif +#endif /* 2.4.18 */ + +/*****************************************************************************/ + +/*****************************************************************************/ +/* 2.4.20 => 2.4.19 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) + +/* we won't support NAPI on less than 2.4.20 */ +#ifdef NAPI +#undef NAPI +#endif + +#endif /* 2.4.20 => 2.4.19 */ + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#define pci_name(x) ((x)->slot_name) +#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) + +#ifndef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full BIT(12) +#endif +#ifndef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full BIT(12) +#endif +#endif + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#endif + +/*****************************************************************************/ +/*****************************************************************************/ +/* 2.4.23 => 2.4.22 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) +/*****************************************************************************/ +#ifdef NAPI +#ifndef netif_poll_disable +#define netif_poll_disable(x) _kc_netif_poll_disable(x) +static inline void _kc_netif_poll_disable(struct net_device *netdev) +{ + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { + /* No hurry */ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + } +} +#endif +#ifndef netif_poll_enable +#define netif_poll_enable(x) _kc_netif_poll_enable(x) +static inline void _kc_netif_poll_enable(struct net_device *netdev) +{ + clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); +} +#endif +#endif /* NAPI */ +#ifndef netif_tx_disable +#define netif_tx_disable(x) _kc_netif_tx_disable(x) +static inline void _kc_netif_tx_disable(struct net_device *dev) +{ + spin_lock_bh(&dev->xmit_lock); + netif_stop_queue(dev); + spin_unlock_bh(&dev->xmit_lock); +} +#endif +#else /* 2.4.23 => 2.4.22 */ +#define HAVE_SCTP +#endif /* 2.4.23 => 2.4.22 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) +#define ETHTOOL_OPS_COMPAT +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) +#define __user +#endif /* < 2.4.27 */ + +/*****************************************************************************/ +/* 2.5.71 => 2.4.x */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) +#define sk_protocol protocol +#define pci_get_device pci_find_device +#endif /* 2.5.70 => 2.4.x */ + +/*****************************************************************************/ +/* < 2.4.27 or 2.6.0 <= 2.6.5 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) + +#ifndef netif_msg_init +#define netif_msg_init _kc_netif_msg_init +static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) -1; +} +#endif + +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ +/*****************************************************************************/ +#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ + (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) +#define netdev_priv(x) x->priv +#endif + +/*****************************************************************************/ +/* <= 2.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) +#include +#undef pci_register_driver +#define pci_register_driver pci_module_init + +/* + * Most of the dma compat code is copied/modifed from the 2.4.37 + * /include/linux/libata-compat.h header file + */ +/* These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct device { + struct pci_dev pdev; +}; + +static inline struct pci_dev *to_pci_dev (struct device *dev) +{ + return (struct pci_dev *) dev; +} +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return (struct device *) pdev; +} +#define pdev_printk(lvl, pdev, fmt, args...) \ + printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) +#define dev_err(dev, fmt, args...) \ + pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) +#define dev_info(dev, fmt, args...) \ + pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) +#define dev_warn(dev, fmt, args...) \ + pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) +#define dev_notice(dev, fmt, args...) \ + pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) +#define dev_dbg(dev, fmt, args...) \ + pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) + +/* NOTE: dangerous! we ignore the 'gfp' argument */ +#define dma_alloc_coherent(dev,sz,dma,gfp) \ + pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) +#define dma_free_coherent(dev,sz,addr,dma_addr) \ + pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) + +#define dma_map_page(dev,a,b,c,d) \ + pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) +#define dma_unmap_page(dev,a,b,c) \ + pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_single(dev,a,b,c) \ + pci_map_single(to_pci_dev(dev),(a),(b),(c)) +#define dma_unmap_single(dev,a,b,c) \ + pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) + +#define dma_sync_single(dev,a,b,c) \ + pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) + +/* for range just sync everything, that's all the pci API can do */ +#define dma_sync_single_range(dev,addr,off,sz,dir) \ + pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) + +#define dma_set_mask(dev,mask) \ + pci_set_dma_mask(to_pci_dev(dev),(mask)) + +/* hlist_* code - double linked lists */ +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = NULL; + n->pprev = NULL; +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +#ifndef might_sleep +#define might_sleep() +#endif +#else +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} +#endif /* <= 2.5.0 */ + +/*****************************************************************************/ +/* 2.5.28 => 2.4.23 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + +#include +#define work_struct tq_struct +#undef INIT_WORK +#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) +#undef container_of +#define container_of list_entry +#define schedule_work schedule_task +#define flush_scheduled_work flush_scheduled_tasks +#define cancel_work_sync(x) flush_scheduled_work() + +#endif /* 2.5.28 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.0 => 2.5.28 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#ifndef read_barrier_depends +#define read_barrier_depends() rmb() +#endif + +#ifndef rcu_head +struct __kc_callback_head { + struct __kc_callback_head *next; + void (*func)(struct callback_head *head); +}; +#define rcu_head __kc_callback_head +#endif + +#undef get_cpu +#define get_cpu() smp_processor_id() +#undef put_cpu +#define put_cpu() do { } while(0) +#define MODULE_INFO(version, _version) +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT +#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT +#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGC_DISABLE_PACKET_SPLIT +#define CONFIG_IGC_DISABLE_PACKET_SPLIT 1 +#endif + +#define dma_set_coherent_mask(dev,mask) 1 + +#undef dev_put +#define dev_put(dev) __dev_put(dev) + +#ifndef skb_fill_page_desc +#define skb_fill_page_desc _kc_skb_fill_page_desc +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); +#endif + +#undef ALIGN +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) + +#ifndef page_count +#define page_count(p) atomic_read(&(p)->count) +#endif + +#ifdef MAX_NUMNODES +#undef MAX_NUMNODES +#endif +#define MAX_NUMNODES 1 + +/* find_first_bit and find_next bit are not defined for most + * 2.4 kernels (except for the redhat 2.4.21 kernels + */ +#include +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#undef find_next_bit +#define find_next_bit _kc_find_next_bit +unsigned long _kc_find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset); +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (strchr(dev->name, '%')) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#ifndef strlcpy +#define strlcpy _kc_strlcpy +size_t _kc_strlcpy(char *dest, const char *src, size_t size); +#endif /* strlcpy */ + +#ifndef do_div +#if BITS_PER_LONG == 64 +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) +#elif BITS_PER_LONG == 32 +uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = _kc__div64_32(&(n), __base); \ + __rem; \ + }) +#else /* BITS_PER_LONG == ?? */ +# error do_div() does not yet support the C64 +#endif /* BITS_PER_LONG */ +#endif /* do_div */ + +#ifndef NSEC_PER_SEC +#define NSEC_PER_SEC 1000000000L +#endif + +#undef HAVE_I2C_SUPPORT +#else /* 2.6.0 */ + +#endif /* 2.6.0 => 2.5.28 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) +#define dma_pool pci_pool +#define dma_pool_destroy pci_pool_destroy +#define dma_pool_alloc pci_pool_alloc +#define dma_pool_free pci_pool_free + +#define dma_pool_create(name,dev,size,align,allocation) \ + pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) +#endif /* < 2.6.3 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* 2.6.5 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) +#define dma_sync_single_for_cpu dma_sync_single +#define dma_sync_single_for_device dma_sync_single +#define dma_sync_single_range_for_cpu dma_sync_single_range +#define dma_sync_single_range_for_device dma_sync_single_range +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif +#endif /* 2.6.5 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); +#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) +/* taken from 2.6 include/linux/bitmap.h */ +#undef bitmap_zero +#define bitmap_zero _kc_bitmap_zero +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) +{ + if (nbits <= BITS_PER_LONG) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} +#define page_to_nid(x) 0 + +#endif /* < 2.6.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) +#undef if_mii +#define if_mii _kc_if_mii +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} + +#ifndef __force +#define __force +#endif +#endif /* < 2.6.7 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +#ifndef PCI_EXP_DEVCTL +#define PCI_EXP_DEVCTL 8 +#endif +#ifndef PCI_EXP_DEVCTL_CERE +#define PCI_EXP_DEVCTL_CERE 0x0001 +#endif +#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ +#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ +#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ +#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ +#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ +#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVSTA 10 /* Device Status */ +#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ + schedule_timeout((x * HZ)/1000 + 2); \ + } while (0) + +#endif /* < 2.6.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) +#include +#define __iomem + +#ifndef kcalloc +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) +void *_kc_kzalloc(size_t size, int flags); +#endif +#define MSEC_PER_SEC 1000L +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); +#else + return (j * MSEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); +#else + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; +#endif +} + +#define msleep_interruptible _kc_msleep_interruptible +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) +{ + unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) { + __set_current_state(TASK_INTERRUPTIBLE); + timeout = schedule_timeout(timeout); + } + return _kc_jiffies_to_msecs(timeout); +} + +/* Basic mode control register. */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +#ifndef __be16 +#define __be16 u16 +#endif +#ifndef __be32 +#define __be32 u32 +#endif +#ifndef __be64 +#define __be64 u64 +#endif + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb->mac.raw; +} + +/* Wake-On-Lan options. */ +#define WAKE_PHY BIT(0) +#define WAKE_UCAST BIT(1) +#define WAKE_MCAST BIT(2) +#define WAKE_BCAST BIT(3) +#define WAKE_ARP BIT(4) +#define WAKE_MAGIC BIT(5) +#define WAKE_MAGICSECURE BIT(6) /* only meaningful if WAKE_MAGIC */ + +#define skb_header_pointer _kc_skb_header_pointer +static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, + int offset, int len, void *buffer) +{ + int hlen = skb_headlen(skb); + + if (hlen - offset >= len) + return skb->data + offset; + +#ifdef MAX_SKB_FRAGS + if (skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +#else + return NULL; +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 +#endif +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 +#endif +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 +#endif +} + +#ifndef __bitwise +#define __bitwise +#endif +#endif /* < 2.6.9 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +#ifdef module_param_array_named +#undef module_param_array_named +#define module_param_array_named(name, array, type, nump, perm) \ + static struct kparam_array __param_arr_##name \ + = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ + sizeof(array[0]), array }; \ + module_param_call(name, param_array_set, param_array_get, \ + &__param_arr_##name, perm) +#endif /* module_param_array_named */ +/* + * num_online is broken for all < 2.6.10 kernels. This is needed to support + * Node module parameter of ixgbe. + */ +#undef num_online_nodes +#define num_online_nodes(n) 1 +extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); +#undef node_online_map +#define node_online_map _kcompat_node_online_map +#define pci_get_class pci_find_class +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) +#define PCI_D0 0 +#define PCI_D1 1 +#define PCI_D2 2 +#define PCI_D3hot 3 +#define PCI_D3cold 4 +typedef int pci_power_t; +#define pci_choose_state(pdev,state) state +#define PMSG_SUSPEND 3 +#define PCI_EXP_LNKCTL 16 + +#undef NETIF_F_LLTX + +#ifndef ARCH_HAS_PREFETCH +#define prefetch(X) +#endif + +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +#define KC_USEC_PER_SEC 1000000L +#define usecs_to_jiffies _kc_usecs_to_jiffies +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) +{ +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (KC_USEC_PER_SEC / HZ) * j; +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); +#else + return (j * KC_USEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return m * (HZ / KC_USEC_PER_SEC); +#else + return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; +#endif +} + +#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTSTA 32 /* Root Status */ +#endif /* < 2.6.11 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) +#include +#define USE_REBOOT_NOTIFIER + +/* Generic MII registers. */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +/* Advertisement control register. */ +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ +/* Link partner ability register. */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ +/* 1000BASE-T Control register */ +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ + +#ifndef is_zero_ether_addr +#define is_zero_ether_addr _kc_is_zero_ether_addr +static inline int _kc_is_zero_ether_addr(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} +#endif /* is_zero_ether_addr */ +#ifndef is_multicast_ether_addr +#define is_multicast_ether_addr _kc_is_multicast_ether_addr +static inline int _kc_is_multicast_ether_addr(const u8 *addr) +{ + return addr[0] & 0x01; +} +#endif /* is_multicast_ether_addr */ +#endif /* < 2.6.12 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +#ifndef kstrdup +#define kstrdup _kc_kstrdup +char *_kc_kstrdup(const char *s, unsigned int gfp); +#endif +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +#define pm_message_t u32 +#ifndef kzalloc +#define kzalloc _kc_kzalloc +void *_kc_kzalloc(size_t size, int flags); +#endif + +/* Generic MII registers. */ +#define MII_ESTATUS 0x0f /* Extended Status */ +/* Basic mode status register. */ +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ +/* Extended status register. */ +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +#define SUPPORTED_Pause BIT(13) +#define SUPPORTED_Asym_Pause BIT(14) +#define ADVERTISED_Pause BIT(13) +#define ADVERTISED_Asym_Pause BIT(14) + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) +#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) +#define gfp_t unsigned +#else +typedef unsigned gfp_t; +#endif +#endif /* !RHEL4.3->RHEL5.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) +#ifdef CONFIG_X86_64 +#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ + dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) +#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ + dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) +#endif +#endif +#endif /* < 2.6.14 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) +#ifndef kfree_rcu +/* this is placed here due to a lack of rcu_barrier in previous kernels */ +#define kfree_rcu(_ptr, _offset) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef vmalloc_node +#define vmalloc_node(a,b) vmalloc(a) +#endif /* vmalloc_node*/ + +#define setup_timer(_timer, _function, _data) \ +do { \ + (_timer)->function = _function; \ + (_timer)->data = _data; \ + init_timer(_timer); \ +} while (0) +#ifndef device_can_wakeup +#define device_can_wakeup(dev) (1) +#endif +#ifndef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) do{}while(0) +#endif +#ifndef device_init_wakeup +#define device_init_wakeup(dev,val) do {} while (0) +#endif +static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) +{ + const u16 *a = (const u16 *) addr1; + const u16 *b = (const u16 *) addr2; + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; +} +#undef compare_ether_addr +#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) +#endif /* < 2.6.15 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) +#undef DEFINE_MUTEX +#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) +#define mutex_lock(x) down_interruptible(x) +#define mutex_unlock(x) up(x) + +#ifndef ____cacheline_internodealigned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp +#else +#define ____cacheline_internodealigned_in_smp +#endif /* CONFIG_SMP */ +#endif /* ____cacheline_internodealigned_in_smp */ +#undef HAVE_PCI_ERS +#else /* 2.6.16 and above */ +#undef HAVE_PCI_ERS +#define HAVE_PCI_ERS +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) +#ifdef device_can_wakeup +#undef device_can_wakeup +#endif /* device_can_wakeup */ +#define device_can_wakeup(dev) 1 +#endif /* SLE_VERSION(10,4,0) */ +#endif /* < 2.6.16 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) +#ifndef dev_notice +#define dev_notice(dev, fmt, args...) \ + dev_printk(KERN_NOTICE, dev, fmt, ## args) +#endif + +#ifndef first_online_node +#define first_online_node 0 +#endif +#ifndef NET_SKB_PAD +#define NET_SKB_PAD 16 +#endif +#endif /* < 2.6.17 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) + +#ifndef IRQ_HANDLED +#define irqreturn_t void +#define IRQ_HANDLED +#define IRQ_NONE +#endif + +#ifndef IRQF_PROBE_SHARED +#ifdef SA_PROBEIRQ +#define IRQF_PROBE_SHARED SA_PROBEIRQ +#else +#define IRQF_PROBE_SHARED 0 +#endif +#endif + +#ifndef IRQF_SHARED +#define IRQF_SHARED SA_SHIRQ +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef skb_is_gso +#ifdef NETIF_F_TSO +#define skb_is_gso _kc_skb_is_gso +static inline int _kc_skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} +#else +#define skb_is_gso(a) 0 +#endif +#endif + +#ifndef resource_size_t +#define resource_size_t unsigned long +#endif + +#ifdef skb_pad +#undef skb_pad +#endif +#define skb_pad(x,y) _kc_skb_pad(x, y) +int _kc_skb_pad(struct sk_buff *skb, int pad); +#ifdef skb_padto +#undef skb_padto +#endif +#define skb_padto(x,y) _kc_skb_padto(x, y) +static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if(likely(size >= len)) + return 0; + return _kc_skb_pad(skb, len - size); +} + +#ifndef DECLARE_PCI_UNMAP_ADDR +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + u32 LEN_NAME +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) +#endif /* DECLARE_PCI_UNMAP_ADDR */ +#endif /* < 2.6.18 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) +#define i_private u.generic_ip +#endif /* >= RHEL 5.0 */ + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif +#ifndef __ALIGN_MASK +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) +#if (!((RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) +typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#undef CONFIG_INET_LRO +#undef CONFIG_INET_LRO_MODULE +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#endif +typedef irqreturn_t (*new_handler_t)(int, void*); +static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#else /* 2.4.x */ +typedef void (*irq_handler_t)(int, void*, struct pt_regs *); +typedef void (*new_handler_t)(int, void*); +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#endif /* >= 2.5.x */ +{ + irq_handler_t new_handler = (irq_handler_t) handler; + return request_irq(irq, new_handler, flags, devname, dev_id); +} + +#undef request_irq +#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) + +#define irq_handler_t new_handler_t + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) ) +#ifndef skb_checksum_help +static inline int __kc_skb_checksum_help(struct sk_buff *skb) +{ + return skb_checksum_help(skb, 0); +} +#define skb_checksum_help(skb) __kc_skb_checksum_help((skb)) +#endif +#endif /* < 2.6.19 && >= 2.6.11 */ + +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +#define PCIE_CONFIG_SPACE_LEN 256 +#define PCI_CONFIG_SPACE_LEN 64 +#define PCIE_LINK_STATUS 0x12 +#define pci_config_space_ich8lan() do {} while(0) +#undef pci_save_state +int _kc_pci_save_state(struct pci_dev *); +#define pci_save_state(pdev) _kc_pci_save_state(pdev) +#undef pci_restore_state +void _kc_pci_restore_state(struct pci_dev *); +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +#undef free_netdev +void _kc_free_netdev(struct net_device *); +#define free_netdev(netdev) _kc_free_netdev(netdev) +#endif +static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) +{ + return 0; +} +#define pci_disable_pcie_error_reporting(dev) do {} while (0) +#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); +#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) +#ifndef bool +#define bool _Bool +#define true 1 +#define false 0 +#endif +#else /* 2.6.19 */ +#include +#include + +#define NEW_SKB_CSUM_HELP +#endif /* < 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) +#undef INIT_WORK +#define INIT_WORK(_work, _func) \ +do { \ + INIT_LIST_HEAD(&(_work)->entry); \ + (_work)->pending = 0; \ + (_work)->func = (void (*)(void *))_func; \ + (_work)->data = _work; \ + init_timer(&(_work)->timer); \ +} while (0) +#endif + +#ifndef PCI_VDEVICE +#define PCI_VDEVICE(ven, dev) \ + PCI_VENDOR_ID_##ven, (dev), \ + PCI_ANY_ID, PCI_ANY_ID, 0, 0 +#endif + +#ifndef PCI_VENDOR_ID_INTEL +#define PCI_VENDOR_ID_INTEL 0x8086 +#endif + +#ifndef round_jiffies +#define round_jiffies(x) x +#endif + +#define csum_offset csum + +#define HAVE_EARLY_VMALLOC_NODE +#define dev_to_node(dev) -1 +#undef set_dev_node +/* remove compiler warning with b=b, for unused variable */ +#define set_dev_node(a, b) do { (b) = (b); } while(0) + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +static inline __wsum csum_unfold(__sum16 n) +{ + return (__force __wsum)n; +} +#endif + +#else /* < 2.6.20 */ +#define HAVE_DEVICE_NUMA_NODE +#endif /* < 2.6.20 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define to_net_dev(class) container_of(class, struct net_device, class_dev) +#define NETDEV_CLASS_DEV +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) +#define vlan_group_set_device(vg, id, dev) \ + do { \ + if (vg) vg->vlan_devices[id] = dev; \ + } while (0) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ +#define pci_channel_offline(pdev) (pdev->error_state && \ + pdev->error_state != pci_channel_io_normal) +#define pci_request_selected_regions(pdev, bars, name) \ + pci_request_regions(pdev, name) +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); + +#ifndef __aligned +#define __aligned(x) __attribute__((aligned(x))) +#endif + +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); +#define netdev_to_dev(netdev) \ + pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) +#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) +#define devm_kfree(dev, p) kfree(p) +#else /* 2.6.21 */ +static inline struct device *netdev_to_dev(struct net_device *netdev) +{ + return &netdev->dev; +} + +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define tcp_hdr(skb) (skb->h.th) +#define tcp_hdrlen(skb) (skb->h.th->doff << 2) +#define skb_transport_offset(skb) (skb->h.raw - skb->data) +#define skb_transport_header(skb) (skb->h.raw) +#define ipv6_hdr(skb) (skb->nh.ipv6h) +#define ip_hdr(skb) (skb->nh.iph) +#define skb_network_offset(skb) (skb->nh.raw - skb->data) +#define skb_network_header(skb) (skb->nh.raw) +#define skb_tail_pointer(skb) skb->tail +#define skb_reset_tail_pointer(skb) \ + do { \ + skb->tail = skb->data; \ + } while (0) +#define skb_set_tail_pointer(skb, offset) \ + do { \ + skb->tail = skb->data + offset; \ + } while (0) +#define skb_copy_to_linear_data(skb, from, len) \ + memcpy(skb->data, from, len) +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ + memcpy(skb->data + offset, from, len) +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) +#define pci_register_driver pci_module_init +#define skb_mac_header(skb) skb->mac.raw + +#ifdef NETIF_F_MULTI_QUEUE +#ifndef alloc_etherdev_mq +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) +#endif +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif +#define cancel_work_sync(x) flush_scheduled_work() +#ifndef udp_hdr +#define udp_hdr _udp_hdr +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} +#endif + +#ifdef cpu_to_be16 +#undef cpu_to_be16 +#endif +#define cpu_to_be16(x) __constant_htons(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ +#ifndef hex_asc +#define hex_asc(x) "0123456789abcdef"[x] +#endif +#include +void _kc_print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ + _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) +#ifndef ADVERTISED_2500baseX_Full +#define ADVERTISED_2500baseX_Full BIT(15) +#endif +#ifndef SUPPORTED_2500baseX_Full +#define SUPPORTED_2500baseX_Full BIT(15) +#endif + +#ifndef ETH_P_PAUSE +#define ETH_P_PAUSE 0x8808 +#endif + +static inline int compound_order(struct page *page) +{ + return 0; +} + +#define __must_be_array(a) 0 + +#ifndef SKB_WITH_OVERHEAD +#define SKB_WITH_OVERHEAD(X) \ + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#endif +#else /* 2.6.22 */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) +#undef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do { } while (0) +#endif /* > 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +#define netif_subqueue_stopped(_a, _b) 0 +#ifndef PTR_ALIGN +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#endif + +#ifndef CONFIG_PM_SLEEP +#define CONFIG_PM_SLEEP CONFIG_PM +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) +#define HAVE_ETHTOOL_GET_PERM_ADDR +#endif /* 2.6.14 through 2.6.22 */ + +static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + int delta = 0; + + if (headroom > (skb->data - skb->head)) + delta = headroom - (skb->data - skb->head); + + if (delta || skb_header_cloned(skb)) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} +#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifndef ETH_FLAG_LRO +#define ETH_FLAG_LRO NETIF_F_LRO +#endif + +#ifndef ACCESS_ONCE +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#endif + +/* if GRO is supported then the napi struct must already exist */ +#ifndef NETIF_F_GRO +/* NAPI API changes in 2.6.24 break everything */ +struct napi_struct { + /* used to look up the real NAPI polling routine */ + int (*poll)(struct napi_struct *, int); + struct net_device *dev; + int weight; +}; +#endif + +#ifdef NAPI +int __kc_adapter_clean(struct net_device *, int *); +/* The following definitions are multi-queue aware, and thus we have a driver + * define list which determines which drivers support multiple queues, and + * thus need these stronger defines. If a driver does not support multi-queue + * functionality, you don't need to add it to this list. + */ +struct net_device *napi_to_poll_dev(const struct napi_struct *napi); + +static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + poll_dev->poll = __kc_adapter_clean; + poll_dev->priv = napi; + poll_dev->weight = weight; + set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); + set_bit(__LINK_STATE_START, &poll_dev->state); + dev_hold(poll_dev); + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_add __kc_mq_netif_napi_add + +static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); + dev_put(poll_dev); + memset(poll_dev, 0, sizeof(struct net_device)); +} + +#define netif_napi_del __kc_mq_netif_napi_del + +static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) +{ + return netif_running(napi->dev) && + netif_rx_schedule_prep(napi_to_poll_dev(napi)); +} +#define napi_schedule_prep __kc_mq_napi_schedule_prep + +static inline void __kc_mq_napi_schedule(struct napi_struct *napi) +{ + if (napi_schedule_prep(napi)) + __netif_rx_schedule(napi_to_poll_dev(napi)); +} +#define napi_schedule __kc_mq_napi_schedule + +#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) +#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) +#ifdef CONFIG_SMP +static inline void napi_synchronize(const struct napi_struct *n) +{ + struct net_device *dev = napi_to_poll_dev(n); + + while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { + /* No hurry. */ + msleep(1); + } +} +#else +#define napi_synchronize(n) barrier() +#endif /* CONFIG_SMP */ +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) +static inline void _kc_napi_complete(struct napi_struct *napi) +{ +#ifdef NETIF_F_GRO + napi_gro_flush(napi); +#endif + netif_rx_complete(napi_to_poll_dev(napi)); +} +#define napi_complete _kc_napi_complete +#else /* NAPI */ + +/* The following definitions are only used if we don't support NAPI at all. */ + +static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + dev->poll = poll; + dev->weight = weight; + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_del(_a) do {} while (0) +#endif /* NAPI */ + +#undef dev_get_by_name +#define dev_get_by_name(_a, _b) dev_get_by_name(_b) +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) +#ifndef DMA_BIT_MASK +#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) +#endif + +#ifdef NETIF_F_TSO6 +#define skb_is_gso_v6 _kc_skb_is_gso_v6 +static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} +#endif /* NETIF_F_TSO6 */ + +#ifndef KERN_CONT +#define KERN_CONT "" +#endif +#ifndef pr_err +#define pr_err(fmt, arg...) \ + printk(KERN_ERR fmt, ##arg) +#endif + +#ifndef rounddown_pow_of_two +#define rounddown_pow_of_two(n) \ + __builtin_constant_p(n) ? ( \ + (n == 1) ? 0 : \ + (1UL << ilog2(n))) : \ + (1UL << (fls_long(n) - 1)) +#endif + +#else /* < 2.6.24 */ +#define HAVE_ETHTOOL_GET_SSET_COUNT +#define HAVE_NETDEV_NAPI_LIST +#endif /* < 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#define INCLUDE_PM_QOS_PARAMS_H +#include +#else /* >= 3.2.0 */ +#include +#endif /* else >= 3.2.0 */ +#endif /* > 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) +#define PM_QOS_CPU_DMA_LATENCY 1 + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) +#include +#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY +#define pm_qos_add_requirement(pm_qos_class, name, value) \ + set_acceptable_latency(name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) \ + remove_acceptable_latency(name) +#define pm_qos_update_requirement(pm_qos_class, name, value) \ + modify_acceptable_latency(name, value) +#else +#define PM_QOS_DEFAULT_VALUE -1 +#define pm_qos_add_requirement(pm_qos_class, name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) +#define pm_qos_update_requirement(pm_qos_class, name, value) { \ + if (value != PM_QOS_DEFAULT_VALUE) { \ + printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ + pci_name(adapter->pdev)); \ + } \ +} + +#endif /* > 2.6.18 */ + +#define pci_enable_device_mem(pdev) pci_enable_device(pdev) + +#ifndef DEFINE_PCI_DEVICE_TABLE +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] +#endif /* DEFINE_PCI_DEVICE_TABLE */ + +#ifndef strict_strtol +#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) +static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) +{ + /* adapted from strict_strtoul() in 2.6.25 */ + char *tail; + long val; + size_t len; + + *res = 0; + len = strlen(buf); + if (!len) + return -EINVAL; + val = simple_strtol(buf, &tail, base); + if (tail == buf) + return -EINVAL; + if ((*tail == '\0') || + ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { + *res = val; + return 0; + } + + return -EINVAL; +} +#endif + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#ifndef IXGBE_PROCFS +#define IXGBE_PROCFS +#endif /* IXGBE_PROCFS */ +#endif /* >= 2.6.0 */ + +#else /* < 2.6.25 */ + +#if IS_ENABLED(CONFIG_SYSFS) +#ifndef IXGBE_SYSFS +#define IXGBE_SYSFS +#endif /* IXGBE_SYSFS */ +#endif /* CONFIG_SYSFS */ +#if IS_ENABLED(CONFIG_HWMON) +#ifndef IXGBE_HWMON +#define IXGBE_HWMON +#endif /* IXGBE_HWMON */ +#endif /* CONFIG_HWMON */ + +#endif /* < 2.6.25 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +#ifndef clamp_t +#define clamp_t(type, val, min, max) ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min : __val; \ + __val > __max ? __max : __val; }) +#endif /* clamp_t */ +#undef kzalloc_node +#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) + +void _kc_pci_disable_link_state(struct pci_dev *dev, int state); +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) +#else /* < 2.6.26 */ +#define NETDEV_CAN_SET_GSO_MAX_SIZE +#ifdef HAVE_PCI_ASPM_H +#include +#endif +#define HAVE_NETDEV_VLAN_FEATURES +#ifndef PCI_EXP_LNKCAP_ASPMS +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#endif /* PCI_EXP_LNKCAP_ASPMS */ +#endif /* < 2.6.26 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)speed; + /* ep->speed_hi = (__u16)(speed >> 16); */ +} +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set + +static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + /* no speed_hi before 2.6.27, and probably no need for it yet */ + return (__u32)ep->speed; +} +#define ethtool_cmd_speed _kc_ethtool_cmd_speed + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) +#define ANCIENT_PM 1 +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ + defined(CONFIG_PM_SLEEP)) +#define NEWER_PM 1 +#endif +#if defined(ANCIENT_PM) || defined(NEWER_PM) +#undef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) \ + do { \ + u16 pmc = 0; \ + int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ + if (pm) { \ + pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ + &pmc); \ + } \ + (dev)->power.can_wakeup = !!(pmc >> 11); \ + (dev)->power.should_wakeup = (val && (pmc >> 11)); \ + } while (0) +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ +#endif /* 2.6.15 through 2.6.27 */ +#ifndef netif_napi_del +#define netif_napi_del(_a) do {} while (0) +#ifdef NAPI +#ifdef CONFIG_NETPOLL +#undef netif_napi_del +#define netif_napi_del(_a) list_del(&(_a)->dev_list); +#endif +#endif +#endif /* netif_napi_del */ +#ifdef dma_mapping_error +#undef dma_mapping_error +#endif +#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE +#define HAVE_TX_MQ +#endif + +#ifndef DMA_ATTR_WEAK_ORDERING +#define DMA_ATTR_WEAK_ORDERING 0 +#endif + +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *); +void _kc_netif_tx_wake_all_queues(struct net_device *); +void _kc_netif_tx_start_all_queues(struct net_device *); +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) +#undef netif_stop_subqueue +#define netif_stop_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_stop_subqueue((_ndev), (_qi)); \ + else \ + netif_stop_queue((_ndev)); \ + } while (0) +#undef netif_start_subqueue +#define netif_start_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_start_subqueue((_ndev), (_qi)); \ + else \ + netif_start_queue((_ndev)); \ + } while (0) +#else /* HAVE_TX_MQ */ +#define netif_tx_stop_all_queues(a) netif_stop_queue(a) +#define netif_tx_wake_all_queues(a) netif_wake_queue(a) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) +#define netif_tx_start_all_queues(a) netif_start_queue(a) +#else +#define netif_tx_start_all_queues(a) do {} while (0) +#endif +#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) +#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) +#endif /* HAVE_TX_MQ */ +#ifndef NETIF_F_MULTI_QUEUE +#define NETIF_F_MULTI_QUEUE 0 +#define netif_is_multiqueue(a) 0 +#define netif_wake_subqueue(a, b) +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef __WARN_printf +void __kc_warn_slowpath(const char *file, const int line, + const char *fmt, ...) __attribute__((format(printf, 3, 4))); +#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) +#endif /* __WARN_printf */ + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif /* WARN */ +#undef HAVE_IXGBE_DEBUG_FS +#undef HAVE_IGB_DEBUG_FS +#else /* < 2.6.27 */ +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)(speed & 0xFFFF); + ep->speed_hi = (__u16)(speed >> 16); +} +#define HAVE_TX_MQ +#define HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_DEBUG_FS +#define HAVE_IXGBE_DEBUG_FS +#define HAVE_IGB_DEBUG_FS +#endif /* CONFIG_DEBUG_FS */ +#endif /* < 2.6.27 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) +#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ + pci_resource_len(pdev, bar)) +#define pci_wake_from_d3 _kc_pci_wake_from_d3 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep +int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); +int _kc_pci_prepare_to_sleep(struct pci_dev *dev); +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) +#ifndef __skb_queue_head_init +static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) +#endif + +#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ + +#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ +#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ + +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +#ifndef swap +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) +#endif +#define pci_request_selected_regions_exclusive(pdev, bars, name) \ + pci_request_selected_regions(pdev, bars, name) +#ifndef CONFIG_NR_CPUS +#define CONFIG_NR_CPUS 1 +#endif /* CONFIG_NR_CPUS */ +#ifndef pcie_aspm_enabled +#define pcie_aspm_enabled() (1) +#endif /* pcie_aspm_enabled */ + +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ + +#ifndef PCI_EXP_LNKSTA_CLS +#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#endif +#ifndef PCI_EXP_LNKSTA_NLW +#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ +#endif + +#ifndef pci_clear_master +void _kc_pci_clear_main(struct pci_dev *dev); +#define pci_clear_master(dev) _kc_pci_clear_main(dev) +#endif + +#ifndef PCI_EXP_LNKCTL_ASPMC +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#endif + +#ifndef PCI_EXP_LNKCAP_MLW +#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ +#endif + +#else /* < 2.6.29 */ +#ifndef HAVE_NET_DEVICE_OPS +#define HAVE_NET_DEVICE_OPS +#endif +#ifdef CONFIG_DCB +#define HAVE_PFC_MODE_ENABLE +#endif /* CONFIG_DCB */ +#endif /* < 2.6.29 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) +#define NO_PTP_SUPPORT +#define skb_rx_queue_recorded(a) false +#define skb_get_rx_queue(a) 0 +#define skb_record_rx_queue(a, b) do {} while (0) +#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#ifndef CONFIG_PCI_IOV +#undef pci_enable_sriov +#define pci_enable_sriov(a, b) -ENOTSUPP +#undef pci_disable_sriov +#define pci_disable_sriov(a) do {} while (0) +#endif /* CONFIG_PCI_IOV */ +#ifndef pr_cont +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) +#endif /* pr_cont */ +static inline void _kc_synchronize_irq(unsigned int a) +{ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + synchronize_irq(); +#else /* < 2.5.28 */ + synchronize_irq(a); +#endif /* < 2.5.28 */ +} +#undef synchronize_irq +#define synchronize_irq(a) _kc_synchronize_irq(a) + +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ + +#ifdef nr_cpus_node +#undef nr_cpus_node +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5)) +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* RHEL >= 5.5 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5))) +static inline bool pci_is_root_bus(struct pci_bus *pbus) +{ + return !(pbus->parent); +} +#endif + +#else /* < 2.6.30 */ +#define HAVE_ASPM_QUIRKS +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* < 2.6.30 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) +#define ETH_P_1588 0x88F7 +#define ETH_P_FIP 0x8914 +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc_count) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(uclist, dev) \ + for (uclist = dev->uc_list; uclist; uclist = uclist->next) +#endif +#ifndef PORT_OTHER +#define PORT_OTHER 0xff +#endif +#ifndef MDIO_PHY_ID_PRTAD +#define MDIO_PHY_ID_PRTAD 0x03e0 +#endif +#ifndef MDIO_PHY_ID_DEVAD +#define MDIO_PHY_ID_DEVAD 0x001f +#endif +#ifndef skb_dst +#define skb_dst(s) ((s)->dst) +#endif + +#ifndef SUPPORTED_1000baseKX_Full +#define SUPPORTED_1000baseKX_Full BIT(17) +#endif +#ifndef SUPPORTED_10000baseKX4_Full +#define SUPPORTED_10000baseKX4_Full BIT(18) +#endif +#ifndef SUPPORTED_10000baseKR_Full +#define SUPPORTED_10000baseKR_Full BIT(19) +#endif + +#ifndef ADVERTISED_1000baseKX_Full +#define ADVERTISED_1000baseKX_Full BIT(17) +#endif +#ifndef ADVERTISED_10000baseKX4_Full +#define ADVERTISED_10000baseKX4_Full BIT(18) +#endif +#ifndef ADVERTISED_10000baseKR_Full +#define ADVERTISED_10000baseKR_Full BIT(19) +#endif + +static inline unsigned long dev_trans_start(struct net_device *dev) +{ + return dev->trans_start; +} +#else /* < 2.6.31 */ +#ifndef HAVE_NETDEV_STORAGE_ADDRESS +#define HAVE_NETDEV_STORAGE_ADDRESS +#endif +#ifndef HAVE_NETDEV_HW_ADDR +#define HAVE_NETDEV_HW_ADDR +#endif +#ifndef HAVE_TRANS_START_IN_QUEUE +#define HAVE_TRANS_START_IN_QUEUE +#endif +#ifndef HAVE_INCLUDE_LINUX_MDIO_H +#define HAVE_INCLUDE_LINUX_MDIO_H +#endif +#include +#endif /* < 2.6.31 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) +#undef netdev_tx_t +#define netdev_tx_t int +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef NETIF_F_FCOE_MTU +#define NETIF_F_FCOE_MTU BIT(26) +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline int _kc_pm_runtime_get_sync() +{ + return 1; +} +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() +#else /* 2.6.0 => 2.6.32 */ +static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) +{ + return 1; +} +#ifndef pm_runtime_get_sync +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) +#endif +#endif /* 2.6.0 => 2.6.32 */ +#ifndef pm_runtime_put +#define pm_runtime_put(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_sync +#define pm_runtime_put_sync(dev) do {} while (0) +#endif +#ifndef pm_runtime_resume +#define pm_runtime_resume(dev) do {} while (0) +#endif +#ifndef pm_schedule_suspend +#define pm_schedule_suspend(dev, t) do {} while (0) +#endif +#ifndef pm_runtime_set_suspended +#define pm_runtime_set_suspended(dev) do {} while (0) +#endif +#ifndef pm_runtime_disable +#define pm_runtime_disable(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_noidle +#define pm_runtime_put_noidle(dev) do {} while (0) +#endif +#ifndef pm_runtime_set_active +#define pm_runtime_set_active(dev) do {} while (0) +#endif +#ifndef pm_runtime_enable +#define pm_runtime_enable(dev) do {} while (0) +#endif +#ifndef pm_runtime_get_noresume +#define pm_runtime_get_noresume(dev) do {} while (0) +#endif +#else /* < 2.6.32 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_EXTENDED +#endif /* RHEL >= 6.2 && RHEL < 7.0 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_OPS_EXT +#define HAVE_NDO_SET_FEATURES +#endif /* RHEL >= 6.6 && RHEL < 7.0 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +#define HAVE_NETDEV_OPS_FCOE_ENABLE +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_OPS_GETAPP +#define HAVE_DCBNL_OPS_GETAPP +#endif +#endif /* CONFIG_DCB */ +#include +/* IOV bad DMA target work arounds require at least this kernel rev support */ +#define HAVE_PCIE_TYPE +#endif /* < 2.6.32 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#ifndef pci_pcie_cap +#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) +#endif +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ +/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ +#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* RHEL6 or SLES11 SP1 */ +#ifndef __percpu +#define __percpu +#endif /* __percpu */ + +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif /* PORT_DA */ +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) +#endif /* !(RHEL >= 6.2) */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_ETHTOOL_SET_PHYS_ID +#define HAVE_ETHTOOL_GET_TS_INFO +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5)) +#define HAVE_ETHTOOL_GSRSSH +#define HAVE_RHEL6_SRIOV_CONFIGURE +#define HAVE_RXFH_NONCONST +#endif /* RHEL > 6.5 */ +#endif /* RHEL >= 6.4 && RHEL < 7.0 */ + +#else /* < 2.6.33 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* < 2.6.33 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + +#ifndef dev_is_pci +#define dev_is_pci(d) ((d)->bus == &pci_bus_type) +#endif + +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev,mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) +#endif +#ifndef pci_dev_run_wake +#define pci_dev_run_wake(pdev) (0) +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_printk +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + printk(level "%s: " format, pci_name(pdev), ##args); \ +} while(0) +#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + struct device *dev = pci_dev_to_dev(pdev); \ + dev_printk(level, dev, "%s: " format, \ + netdev_name(netdev), ##args); \ +} while(0) +#else /* 2.6.21 => 2.6.34 */ +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) +#endif /* <2.6.0 <2.6.21 <2.6.34 */ +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ +} while (0) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) + +#ifdef SET_SYSTEM_SLEEP_PM_OPS +#define HAVE_SYSTEM_SLEEP_PM_OPS +#endif + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3)) +#ifdef IGB_HWMON +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define sysfs_attr_init(attr) \ + do { \ + static struct lock_class_key __key; \ + (attr)->key = &__key; \ + } while (0) +#else +#define sysfs_attr_init(attr) do {} while (0) +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* IGB_HWMON */ +#endif /* RHEL_RELEASE_CODE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline bool _kc_pm_runtime_suspended() +{ + return false; +} +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() +#else /* 2.6.0 => 2.6.34 */ +static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) +{ + return false; +} +#ifndef pm_runtime_suspended +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) +#endif +#endif /* 2.6.0 => 2.6.34 */ + +#ifndef pci_bus_speed +/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ +enum _kc_pci_bus_speed { + _KC_PCIE_SPEED_2_5GT = 0x14, + _KC_PCIE_SPEED_5_0GT = 0x15, + _KC_PCIE_SPEED_8_0GT = 0x16, + _KC_PCI_SPEED_UNKNOWN = 0xff, +}; +#define pci_bus_speed _kc_pci_bus_speed +#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT +#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT +#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT +#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN +#endif /* pci_bus_speed */ + +#else /* < 2.6.34 */ +#define HAVE_SYSTEM_SLEEP_PM_OPS +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif + +#endif /* < 2.6.34 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); +#define simple_write_to_buffer _kc_simple_write_to_buffer + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +static inline struct pci_dev *pci_physfn(struct pci_dev *dev) +{ +#ifdef HAVE_PCI_DEV_IS_VIRTFN_BIT +#ifdef CONFIG_PCI_IOV + if (dev->is_virtfn) + dev = dev->physfn; +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_DEV_IS_VIRTFN_BIT */ + return dev; +} +#endif /* ! RHEL >= 6.4 */ + +#ifndef PCI_EXP_LNKSTA_NLW_SHIFT +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 +#endif + +#ifndef numa_node_id +#define numa_node_id() 0 +#endif +#ifndef numa_mem_id +#define numa_mem_id numa_node_id +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifdef HAVE_TX_MQ +#include +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); +#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + dev->egress_subqueue_count = txq; + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#else /* HAVE_TX_MQ */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused txq) +{ + return 0; +} +#endif /* HAVE_TX_MQ */ +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#ifndef ETH_FLAG_RXHASH +#define ETH_FLAG_RXHASH (1<<28) +#endif /* ETH_FLAG_RXHASH */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) +#define HAVE_IRQ_AFFINITY_HINT +#endif +struct device_node; +#else /* < 2.6.35 */ +#define HAVE_STRUCT_DEVICE_OF_NODE +#define HAVE_PM_QOS_REQUEST_LIST +#define HAVE_IRQ_AFFINITY_HINT +#include +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); +#define ethtool_op_set_flags _kc_ethtool_op_set_flags +u32 _kc_ethtool_op_get_flags(struct net_device *); +#define ethtool_op_get_flags _kc_ethtool_op_get_flags + +enum { + WQ_UNBOUND = 0, + WQ_RESCUER = 0, +}; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif + +static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))) +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#endif + +#define u64_stats_update_begin(a) do { } while(0) +#define u64_stats_update_end(a) do { } while(0) +#define u64_stats_fetch_begin(a) do { } while(0) +#define u64_stats_fetch_retry_bh(a,b) (0) +#define u64_stats_fetch_begin_bh(a) (0) + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) +#define HAVE_8021P_SUPPORT +#endif + +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0))) +static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) +{ + return; +} +#endif + +#else /* < 2.6.36 */ + +#define msleep(x) do { if (x > 20) \ + msleep(x); \ + else \ + usleep_range(1000 * x, 2000 * x); \ + } while (0) + +#define HAVE_PM_QOS_REQUEST_ACTIVE +#define HAVE_8021P_SUPPORT +#define HAVE_NDO_GET_STATS64 +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) +#define HAVE_NON_CONST_PCI_DRIVER_NAME +#ifndef netif_set_real_num_tx_queues +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + netif_set_real_num_tx_queues(dev, txq); + return 0; +} +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif +#ifndef netif_set_real_num_rx_queues +static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused rxq) +{ + return 0; +} +#define netif_set_real_num_rx_queues(dev, rxq) \ + __kc_netif_set_real_num_rx_queues((dev), (rxq)) +#endif +#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) +#endif +#ifndef VLAN_N_VID +#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN +#endif /* VLAN_N_VID */ +#ifndef ETH_FLAG_TXVLAN +#define ETH_FLAG_TXVLAN BIT(7) +#endif /* ETH_FLAG_TXVLAN */ +#ifndef ETH_FLAG_RXVLAN +#define ETH_FLAG_RXVLAN BIT(8) +#endif /* ETH_FLAG_RXVLAN */ + +#define WQ_MEM_RECLAIM WQ_RESCUER + +static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) +{ + WARN_ON(skb->ip_summed != CHECKSUM_NONE); +} +#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) + +static inline void *_kc_vzalloc_node(unsigned long size, int node) +{ + void *addr = vmalloc_node(size, node); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) + +static inline void *_kc_vzalloc(unsigned long size) +{ + void *addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc(_size) _kc_vzalloc(_size) + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \ + (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0))) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + return skb->protocol; + + if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) + return 0; + + return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; +} +#endif /* !RHEL5.7+ || RHEL6.0 */ + +#ifdef HAVE_HW_TIME_STAMP +#define SKBTX_HW_TSTAMP BIT(0) +#define SKBTX_IN_PROGRESS BIT(2) +#define SKB_SHARED_TX_IS_UNION +#endif + +#ifndef device_wakeup_enable +#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) +#ifndef HAVE_VLAN_RX_REGISTER +#define HAVE_VLAN_RX_REGISTER +#endif +#endif /* > 2.4.18 */ +#endif /* < 2.6.37 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define skb_checksum_start_offset(skb) skb_transport_offset(skb) +#else /* 2.6.22 -> 2.6.37 */ +static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} +#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) +#endif /* 2.6.22 -> 2.6.37 */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif +#ifndef DCB_CAP_DCBX_HOST +#define DCB_CAP_DCBX_HOST 0x01 +#endif +#ifndef DCB_CAP_DCBX_LLD_MANAGED +#define DCB_CAP_DCBX_LLD_MANAGED 0x02 +#endif +#ifndef DCB_CAP_DCBX_VER_CEE +#define DCB_CAP_DCBX_VER_CEE 0x04 +#endif +#ifndef DCB_CAP_DCBX_VER_IEEE +#define DCB_CAP_DCBX_VER_IEEE 0x08 +#endif +#ifndef DCB_CAP_DCBX_STATIC +#define DCB_CAP_DCBX_STATIC 0x10 +#endif +#endif /* CONFIG_DCB */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) +#define CONFIG_XPS +#endif /* RHEL_RELEASE_VERSION(6,2) */ +#endif /* < 2.6.38 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#ifndef TC_BITMASK +#define TC_BITMASK 15 +#endif +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM BIT(29) +#endif +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) +#endif +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef FCOE_MTU +#define FCOE_MTU 2158 +#endif +#endif +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +#define kstrtoul(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtouint(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtou32(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); +#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q)) +u8 _kc_netdev_get_num_tc(struct net_device *dev); +#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev) +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc); +#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc)) +#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0) +#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0) +u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up); +#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up) +#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0) +#else /* RHEL6.1 or greater */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif /* HAVE_MQPRIO */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#ifndef IEEE_8021QAZ_TSA_STRICT +#define IEEE_8021QAZ_TSA_STRICT 0 +#endif +#ifndef IEEE_8021QAZ_TSA_ETS +#define IEEE_8021QAZ_TSA_ETS 2 +#endif +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#endif /* CONFIG_DCB */ +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ + +#ifndef udp_csum +#define udp_csum __kc_udp_csum +static inline __wsum __kc_udp_csum(struct sk_buff *skb) +{ + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), skb->csum); + + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { + csum = csum_add(csum, skb->csum); + } + return csum; +} +#endif /* udp_csum */ +#else /* < 2.6.39 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#endif +#endif /* CONFIG_DCB */ +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY +#endif /* < 2.6.39 */ + +/*****************************************************************************/ +/* use < 2.6.40 because of a Fedora 15 kernel update where they + * updated the kernel version to 2.6.40.x and they back-ported 3.0 features + * like set_phys_id for ethtool. + */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) +#ifdef ETHTOOL_GRXRINGS +#ifndef FLOW_EXT +#define FLOW_EXT 0x80000000 +union _kc_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[60]; +}; +struct _kc_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +struct _kc_ethtool_rx_flow_spec { + __u32 flow_type; + union _kc_ethtool_flow_union h_u; + struct _kc_ethtool_flow_ext h_ext; + union _kc_ethtool_flow_union m_u; + struct _kc_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec +#endif /* FLOW_EXT */ +#endif + +#define pci_disable_link_state_locked pci_disable_link_state + +#ifndef PCI_LTR_VALUE_MASK +#define PCI_LTR_VALUE_MASK 0x000003ff +#endif +#ifndef PCI_LTR_SCALE_MASK +#define PCI_LTR_SCALE_MASK 0x00001c00 +#endif +#ifndef PCI_LTR_SCALE_SHIFT +#define PCI_LTR_SCALE_SHIFT 10 +#endif + +#else /* < 2.6.40 */ +#define HAVE_ETHTOOL_SET_PHYS_ID +#endif /* < 2.6.40 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) +#define USE_LEGACY_PM_SUPPORT +#ifndef kfree_rcu +#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef kstrtol_from_user +#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) +static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, + unsigned int base, long *res) +{ + /* sign, base 2 representation, newline, terminator */ + char buf[1 + sizeof(long) * 8 + 1 + 1]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return strict_strtol(buf, base, res); +} +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0) || \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,7))) +/* 20000base_blah_full Supported and Advertised Registers */ +#define SUPPORTED_20000baseMLD2_Full BIT(21) +#define SUPPORTED_20000baseKR2_Full BIT(22) +#define ADVERTISED_20000baseMLD2_Full BIT(21) +#define ADVERTISED_20000baseKR2_Full BIT(22) +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 3.0.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#ifndef __netdev_alloc_skb_ip_align +#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) +#endif /* __netdev_alloc_skb_ip_align */ +#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) +#define dcb_ieee_delapp(dev, app) 0 +#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) + +/* 1000BASE-T Control register */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + +/* kernels less than 3.0.0 don't have this */ +#ifndef ETH_P_8021AD +#define ETH_P_8021AD 0x88A8 +#endif + +/* Stub definition for !CONFIG_OF is introduced later */ +#ifdef CONFIG_OF +static inline struct device_node * +pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev) +{ +#ifdef HAVE_STRUCT_DEVICE_OF_NODE + return pdev ? pdev->dev.of_node : NULL; +#else + return NULL; +#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */ +} +#endif /* CONFIG_OF */ +#else /* < 3.1.0 */ +#ifndef HAVE_DCBNL_IEEE_DELAPP +#define HAVE_DCBNL_IEEE_DELAPP +#endif +#endif /* < 3.1.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) +static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif +#ifdef ETHTOOL_GRXRINGS +#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef skb_frag_size +#define skb_frag_size(frag) _kc_skb_frag_size(frag) +static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) +static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) _kc_skb_frag_address(frag) +static inline void *_kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#include +#endif +#define skb_frag_dma_map(dev,frag,offset,size,dir) \ + _kc_skb_frag_dma_map(dev,frag,offset,size,dir) +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif +#define HAVE_IOMMU_PRESENT +#define HAVE_PM_QOS_REQUEST_LIST_NEW +#endif /* < 3.2.0 */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2)) +#undef ixgbe_get_netdev_tc_txq +#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) +#endif +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) +/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than + * alloc_workqueue() to avoid compiler warning from -Wvarargs + */ +static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4))) +_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, + const char *fmt, ...) +{ + struct workqueue_struct *wq; + va_list args, temp; + unsigned int len; + char *p; + + va_start(args, fmt); + va_copy(temp, args); + len = vsnprintf(NULL, 0, fmt, temp); + va_end(temp); + + p = kmalloc(len + 1, GFP_KERNEL); + if (!p) { + va_end(args); + return NULL; + } + + vsnprintf(p, len + 1, fmt, args); + va_end(args); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) + wq = create_workqueue(p); +#else + wq = alloc_workqueue(p, flags, max_active); +#endif + kfree(p); + + return wq; +} +#ifdef alloc_workqueue +#undef alloc_workqueue +#endif +#define alloc_workqueue(fmt, flags, max_active, args...) \ + _kc_alloc_workqueue(flags, max_active, fmt, ##args) + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) +typedef u32 netdev_features_t; +#endif +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) +#define netdev_completed_queue(_n, _p, _b) do {} while (0) +#define netdev_tx_sent_queue(_q, _b) do {} while (0) +#define netdev_sent_queue(_n, _b) do {} while (0) +#define netdev_tx_reset_queue(_q) do {} while (0) +#define netdev_reset_queue(_n) do {} while (0) +#endif +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#endif /* SLE_VERSION(11,3,0) */ +#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, + u8 *nexthdrp, + __be16 __always_unused *frag_offp) +{ + return ipv6_skip_exthdr(skb, start, nexthdrp); +} +#undef ipv6_skip_exthdr +#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) +#endif /* !SLES11sp4 or greater */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} +#endif + +#else /* ! < 3.3.0 */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define NUMTCS_RETURNS_U8 + +int _kc_simple_open(struct inode *inode, struct file *file); +#define simple_open _kc_simple_open +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +void _kc_skb_add_rx_frag(struct sk_buff * skb, int i, struct page *page, + int off, int size, unsigned int truesize); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) do { \ + eth_random_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ + +#ifndef for_each_set_bit_from +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit_from */ + +#else /* < 3.4.0 */ +#include +#endif /* >= 3.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \ + ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) +#ifndef NO_PTP_SUPPORT +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#define HAVE_PTP_1588_CLOCK +#endif /* CONFIG_PTP_1588_CLOCK */ +#endif /* !NO_PTP_SUPPORT */ +#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) + +#ifndef SIZE_MAX +#define SIZE_MAX (~(size_t)0) +#endif + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#ifndef ether_addr_equal +static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} +#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2)) +#endif + +/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ +#ifdef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif +#else +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* < 3.5.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) +#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#endif +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#endif +#ifndef MDIO_EEE_10GT +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#endif +#ifndef MDIO_EEE_1000KX +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#endif +#ifndef MDIO_EEE_10GKX4 +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#endif +#ifndef MDIO_EEE_10GKR +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#endif + +#ifndef __GFP_MEMALLOC +#define __GFP_MEMALLOC 0 +#endif + +#ifndef eth_broadcast_addr +#define eth_broadcast_addr _kc_eth_broadcast_addr +static inline void _kc_eth_broadcast_addr(u8 *addr) +{ + memset(addr, 0xff, ETH_ALEN); +} +#endif + +#ifndef eth_random_addr +#define eth_random_addr _kc_eth_random_addr +static inline void _kc_eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} +#endif /* eth_random_addr */ + +#ifndef DMA_ATTR_SKIP_CPU_SYNC +#define DMA_ATTR_SKIP_CPU_SYNC 0 +#endif +#else /* < 3.6.0 */ +#define HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* < 3.6.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +#include +#ifndef ADVERTISED_40000baseKR4_Full +/* these defines were all added in one commit, so should be safe + * to trigger activiation on one define + */ +#define SUPPORTED_40000baseKR4_Full BIT(23) +#define SUPPORTED_40000baseCR4_Full BIT(24) +#define SUPPORTED_40000baseSR4_Full BIT(25) +#define SUPPORTED_40000baseLR4_Full BIT(26) +#define ADVERTISED_40000baseKR4_Full BIT(23) +#define ADVERTISED_40000baseCR4_Full BIT(24) +#define ADVERTISED_40000baseSR4_Full BIT(25) +#define ADVERTISED_40000baseLR4_Full BIT(26) +#endif + +#ifndef mmd_eee_cap_to_ethtool_sup_t +/** + * mmd_eee_cap_to_ethtool_sup_t + * @eee_cap: value of the MMD EEE Capability register + * + * A small helper function that translates MMD EEE Capability (3.20) bits + * to ethtool supported settings. + */ +static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) +{ + u32 supported = 0; + + if (eee_cap & MDIO_EEE_100TX) + supported |= SUPPORTED_100baseT_Full; + if (eee_cap & MDIO_EEE_1000T) + supported |= SUPPORTED_1000baseT_Full; + if (eee_cap & MDIO_EEE_10GT) + supported |= SUPPORTED_10000baseT_Full; + if (eee_cap & MDIO_EEE_1000KX) + supported |= SUPPORTED_1000baseKX_Full; + if (eee_cap & MDIO_EEE_10GKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (eee_cap & MDIO_EEE_10GKR) + supported |= SUPPORTED_10000baseKR_Full; + + return supported; +} +#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ + __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) +#endif /* mmd_eee_cap_to_ethtool_sup_t */ + +#ifndef mmd_eee_adv_to_ethtool_adv_t +/** + * mmd_eee_adv_to_ethtool_adv_t + * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers + * + * A small helper function that translates the MMD EEE Advertisement (7.60) + * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement + * settings. + */ +static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) +{ + u32 adv = 0; + + if (eee_adv & MDIO_EEE_100TX) + adv |= ADVERTISED_100baseT_Full; + if (eee_adv & MDIO_EEE_1000T) + adv |= ADVERTISED_1000baseT_Full; + if (eee_adv & MDIO_EEE_10GT) + adv |= ADVERTISED_10000baseT_Full; + if (eee_adv & MDIO_EEE_1000KX) + adv |= ADVERTISED_1000baseKX_Full; + if (eee_adv & MDIO_EEE_10GKX4) + adv |= ADVERTISED_10000baseKX4_Full; + if (eee_adv & MDIO_EEE_10GKR) + adv |= ADVERTISED_10000baseKR_Full; + + return adv; +} + +#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ + __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) +#endif /* mmd_eee_adv_to_ethtool_adv_t */ + +#ifndef ethtool_adv_to_mmd_eee_adv_t +/** + * ethtool_adv_to_mmd_eee_adv_t + * @adv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement settings + * to EEE advertisements for the MMD EEE Advertisement (7.60) and + * MMD EEE Link Partner Ability (7.61) registers. + */ +static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) +{ + u16 reg = 0; + + if (adv & ADVERTISED_100baseT_Full) + reg |= MDIO_EEE_100TX; + if (adv & ADVERTISED_1000baseT_Full) + reg |= MDIO_EEE_1000T; + if (adv & ADVERTISED_10000baseT_Full) + reg |= MDIO_EEE_10GT; + if (adv & ADVERTISED_1000baseKX_Full) + reg |= MDIO_EEE_1000KX; + if (adv & ADVERTISED_10000baseKX4_Full) + reg |= MDIO_EEE_10GKX4; + if (adv & ADVERTISED_10000baseKR_Full) + reg |= MDIO_EEE_10GKR; + + return reg; +} +#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) +#endif /* ethtool_adv_to_mmd_eee_adv_t */ + +#ifndef pci_pcie_type +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +static inline u8 pci_pcie_type(struct pci_dev *pdev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + BUG_ON(!pos); + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); + return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; +} +#else /* < 2.6.24 */ +#define pci_pcie_type(x) (x)->pcie_type +#endif /* < 2.6.24 */ +#endif /* pci_pcie_type */ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) +#define ptp_clock_register(caps, args...) ptp_clock_register(caps) +#endif + +#ifndef pcie_capability_read_word +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) +#endif /* pcie_capability_read_word */ + +#ifndef pcie_capability_read_dword +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); +#define pcie_capability_read_dword(d,p,v) __kc_pcie_capability_read_dword(d,p,v) +#endif + +#ifndef pcie_capability_write_word +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) +#endif /* pcie_capability_write_word */ + +#ifndef pcie_capability_clear_and_set_word +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +#define pcie_capability_clear_and_set_word(d,p,c,s) \ + __kc_pcie_capability_clear_and_set_word(d,p,c,s) +#endif /* pcie_capability_clear_and_set_word */ + +#ifndef pcie_capability_clear_word +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear); +#define pcie_capability_clear_word(d, p, c) \ + __kc_pcie_capability_clear_word(d, p, c) +#endif /* pcie_capability_clear_word */ + +#ifndef PCI_EXP_LNKSTA2 +#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8)) +#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) +#endif /* !RHEL6.8+ */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) +#include +#else + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} +#endif /* RHEL >= 6.6 */ + +/* We don't have @flags support prior to 3.7, so we'll simply ignore the flags + * parameter on these older kernels. + */ +#define __setup_timer(_timer, _fn, _data, _flags) \ + setup_timer((_timer), (_fn), (_data)) \ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(12,0,0) ) ) + +#ifndef mod_delayed_work +/** + * __mod_delayed_work - modify delay or queue delayed work + * @wq: workqueue to use + * @dwork: delayed work to queue + * @delay: number of jiffies to wait before queueing + * + * Return: %true if @dwork was pending and was rescheduled; + * %false if it wasn't pending + * + * Note: the dwork parameter was declared as a void* + * to avoid comptibility problems with early 2.6 kernels + * where struct delayed_work is not declared. Unlike the original + * implementation flags are not preserved and it shouldn't be + * used in the interrupt context. + */ +static inline bool __mod_delayed_work(struct workqueue_struct *wq, + void *dwork, + unsigned long delay) +{ + bool ret = cancel_delayed_work(dwork); + queue_delayed_work(wq, dwork, delay); + return ret; +} +#define mod_delayed_work(wq, dwork, delay) __mod_delayed_work(wq, dwork, delay) +#endif /* mod_delayed_work */ + +#endif /* !(RHEL >= 6.7) && !(SLE >= 12.0) */ +#else /* >= 3.7.0 */ +#include +#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif /* >= 3.7.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#ifndef pci_sriov_set_totalvfs +static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) +{ + return 0; +} +#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) +#endif +#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */ +#ifndef PCI_EXP_LNKCTL_ASPM_L0S +#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ +#endif +#ifndef PCI_EXP_LNKCTL_ASPM_L1 +#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ +#endif +#define HAVE_CONFIG_HOTPLUG +/* Reserved Ethernet Addresses per IEEE 802.1Q */ +static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { + 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; + +#ifndef is_link_local_ether_addr +static inline bool __kc_is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = cpu_to_be16(0xfff0); + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; +} +#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) +#endif /* is_link_local_ether_addr */ + +#ifndef FLOW_MAC_EXT +#define FLOW_MAC_EXT 0x40000000 +#endif /* FLOW_MAC_EXT */ + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#define HAVE_SRIOV_CONFIGURE +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_2_5GB +#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_5_0GB +#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ +#endif + +#undef PCI_EXP_LNKCAP2_SLS_2_5GB +#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_5_0GB +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_8_0GB +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ + +#else /* >= 3.8.0 */ +#ifndef __devinit +#define __devinit +#endif + +#ifndef __devinitdata +#define __devinitdata +#endif + +#ifndef __devinitconst +#define __devinitconst +#endif + +#ifndef __devexit +#define __devexit +#endif + +#ifndef __devexit_p +#define __devexit_p +#endif + +#ifndef HAVE_ENCAP_CSUM_OFFLOAD +#define HAVE_ENCAP_CSUM_OFFLOAD +#endif + +#ifndef HAVE_GRE_ENCAP_OFFLOAD +#define HAVE_GRE_ENCAP_OFFLOAD +#endif + +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +#define HAVE_BRIDGE_ATTRIBS +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ +#endif /* BRIDGE_MODE_VEB */ +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#endif /* BRIDGE_MODE_VEPA */ +#endif /* >= 3.8.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) + +#undef BUILD_BUG_ON +#ifdef __CHECKER__ +#define BUILD_BUG_ON(condition) (0) +#else /* __CHECKER__ */ +#ifndef __compiletime_warning +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_warning(message) __attribute__((warning(message))) +#else /* __GNUC__ */ +#define __compiletime_warning(message) +#endif /* __GNUC__ */ +#endif /* __compiletime_warning */ +#ifndef __compiletime_error +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_error(message) __attribute__((error(message))) +#define __compiletime_error_fallback(condition) do { } while (0) +#else /* __GNUC__ */ +#define __compiletime_error(message) +#define __compiletime_error_fallback(condition) \ + do { ((void)sizeof(char[1 - 2 * condition])); } while (0) +#endif /* __GNUC__ */ +#else /* __compiletime_error */ +#define __compiletime_error_fallback(condition) do { } while (0) +#endif /* __compiletime_error */ +#define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + bool __cond = !(condition); \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (__cond) \ + prefix ## suffix(); \ + __compiletime_error_fallback(__cond); \ + } while (0) + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else /* __OPTIMIZE__ */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif /* __OPTIMIZE__ */ +#endif /* __CHECKER__ */ + +#undef hlist_entry +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#undef hlist_entry_safe +#define hlist_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ + }) + +#undef hlist_for_each_entry +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ + pos && ({ n = pos->member.next; 1; }); \ + pos = hlist_entry_safe(n, typeof(*pos), member)) + +#undef hlist_for_each_entry_continue +#define hlist_for_each_entry_continue(pos, member) \ + for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_from +#define hlist_for_each_entry_from(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hash_for_each +#define hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry(obj, &name[bkt], member) + +#undef hash_for_each_safe +#define hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +#undef hash_for_each_possible +#define hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) + +#undef hash_for_each_possible_safe +#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ + hlist_for_each_entry_safe(obj, tmp,\ + &name[hash_min(key, HASH_BITS(name))], member) + +#ifdef CONFIG_XPS +int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16); +#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) +#else /* CONFIG_XPS */ +#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) +#endif /* CONFIG_XPS */ + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define _kc_hashrnd 0xd631614b /* not so random hash salt */ +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); +#define __netdev_pick_tx __kc_netdev_pick_tx +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else +#define HAVE_BRIDGE_FILTER +#define HAVE_FDB_DEL_NLATTR +#endif /* < 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifndef NAPI_POLL_WEIGHT +#define NAPI_POLL_WEIGHT 64 +#endif +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev *dev); +#else +static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) +{ + return 0; +} +#endif +#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) + +#ifndef list_first_entry_or_null +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + return skb; +} +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + __kc__vlan_hwaccel_put_tag(skb, vlan_tci) +#endif + +#ifdef HAVE_FDB_OPS +#if defined(HAVE_NDO_FDB_ADD_NLATTR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 flags); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr, u16 flags); +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags); +#endif /* HAVE_NDO_FDB_ADD_NLATTR */ +#if defined(HAVE_FDB_DEL_NLATTR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr); +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr); +#endif /* HAVE_FDB_DEL_NLATTR */ +#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add +#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del +#endif /* HAVE_FDB_OPS */ + +#ifndef PCI_DEVID +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#endif + +/* The definitions for these functions when CONFIG_OF_NET is defined are + * pulled in from . For kernels older than 3.5 we already have + * backports for when CONFIG_OF_NET is true. These are separated and + * duplicated in order to cover all cases so that all kernels get either the + * real definitions (when CONFIG_OF_NET is defined) or the stub definitions + * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real + * definitions). + */ +#ifndef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif + +#else /* >= 3.10.0 */ +#define HAVE_ENCAP_TSO_OFFLOAD +#define USE_DEFAULT_FDB_DEL_DUMP +#define HAVE_SKB_INNER_NETWORK_HEADER + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +#define HAVE_RHEL7_PCI_DRIVER_RH +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_RHEL7_PCI_RESET_NOTIFY +#endif /* RHEL >= 7.2 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define HAVE_GENEVE_RX_OFFLOAD +#endif /* RHEL < 7.5 */ +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_RHEL7_NET_DEVICE_OPS_EXT +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif /* !HAVE_UDP_ENC_TUNNEL && CONFIG_GENEVE */ +#endif /* RHEL >= 7.3 */ + +/* new hooks added to net_device_ops_extended in RHEL7.4 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL +#define HAVE_UDP_ENC_RX_OFFLOAD +#endif /* RHEL >= 7.4 */ +#else /* RHEL >= 8.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#endif /* RHEL >= 8.0 */ +#endif /* RHEL >= 7.0 */ +#endif /* >= 3.10.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) ) +#define netdev_notifier_info_to_dev(ptr) ptr +#ifndef time_in_range64 +#define time_in_range64(a, b, c) \ + (time_after_eq64(a, b) && \ + time_before_eq64(a, c)) +#endif /* time_in_range64 */ +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#define HAVE_NDO_SET_VF_LINK_STATE +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#else /* >= 3.11.0 */ +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_SKB_INNER_PROTOCOL +#define HAVE_MPLS_FEATURES +#endif /* >= 3.11.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width); +#ifndef pcie_get_minimum_link +#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev); +#define pci_wait_for_pending_transaction _kc_pci_wait_for_pending_transaction +#endif /* = 3.12.0 */ +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_VXLAN_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NDO_GET_PHYS_PORT_ID +#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK +#endif /* >= 3.12.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) +#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) +int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); +#ifndef u64_stats_init +#define u64_stats_init(a) do { } while(0) +#endif +#undef BIT_ULL +#define BIT_ULL(n) (1ULL << (n)) + +#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) +{ + dev = pci_physfn(dev); + if (pci_is_root_bus(dev->bus)) + return NULL; + + return dev->bus->self; +} +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)) +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#endif +#ifndef list_next_entry +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif +#ifndef list_prev_entry +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20) ) +#define devm_kcalloc(dev, cnt, size, flags) \ + devm_kzalloc(dev, (cnt) * (size), flags) +#endif /* > 2.6.20 */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +bool _kc_pci_device_is_present(struct pci_dev *pdev); +#define pci_device_is_present _kc_pci_device_is_present +#endif /* = 3.13.0 */ +#define HAVE_VXLAN_CHECKS +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#else +#define HAVE_NDO_SELECT_QUEUE_ACCEL +#endif +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS +#endif + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) + +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#ifndef U64_MAX +#define U64_MAX ((u64)~0ULL) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define dev_consume_skb_any(x) dev_kfree_skb_any(x) +#define dev_consume_skb_irq(x) dev_kfree_skb_irq(x) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) + +/* it isn't expected that this would be a #define unless we made it so */ +#ifndef skb_set_hash + +#define PKT_HASH_TYPE_NONE 0 +#define PKT_HASH_TYPE_L2 1 +#define PKT_HASH_TYPE_L3 2 +#define PKT_HASH_TYPE_L4 3 + +enum _kc_pkt_hash_types { + _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, + _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, + _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, + _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, +}; +#define pkt_hash_types _kc_pkt_hash_types + +#define skb_set_hash __kc_skb_set_hash +static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, + u32 __maybe_unused hash, + int __maybe_unused type) +{ +#ifdef HAVE_SKB_L4_RXHASH + skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); +#endif +#ifdef NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} +#endif /* !skb_set_hash */ + +#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,0)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE <= SLE_VERSION(12,1,0))) +/* GPLv2 code taken from 5.10-rc2 kernel source include/linux/pci.h, Copyright + * original authors. + */ +static inline int pci_enable_msix_exact(struct pci_dev *dev, + struct msix_entry *entries, int nvec) +{ + int rc = pci_enable_msix_range(dev, entries, nvec, nvec); + if (rc < 0) + return rc; + return 0; +} +#endif /* <=EL7.0 || <=SLES 12.1 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#ifndef HAVE_VXLAN_RX_OFFLOAD +#define HAVE_VXLAN_RX_OFFLOAD +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#endif + +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif + +#ifndef HAVE_VXLAN_CHECKS +#define HAVE_VXLAN_CHECKS +#endif /* HAVE_VXLAN_CHECKS */ +#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) +#define HAVE_NDO_DFWD_OPS +#endif + +#ifndef pci_enable_msix_range +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec); +#define pci_enable_msix_range __kc_pci_enable_msix_range +#endif + +#ifndef ether_addr_copy +#define ether_addr_copy __kc_ether_addr_copy +static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +#else + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +#endif +} +#endif /* ether_addr_copy */ +int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *flags); +#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) + +#ifndef OPTIMIZE_HIDE_VAR +#ifdef __GNUC__ +#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) +#else +#include +#define OPTIMIZE_HIDE_VAR(var) barrier() +#endif +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,4,0))) +static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) +{ +#ifdef NETIF_F_RXHASH + return skb->rxhash; +#else + return 0; +#endif /* NETIF_F_RXHASH */ +} +#endif /* !RHEL > 5.9 && !SLES >= 10.4 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define request_firmware_direct request_firmware +#endif /* !RHEL || RHEL < 7.5 */ + +#else /* >= 3.14.0 */ + +/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ +#ifndef HAVE_NDO_DFWD_OPS +#define HAVE_NDO_DFWD_OPS +#endif +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif /* 3.14.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#define HAVE_SKBUFF_RXHASH +#endif /* >= 2.6.35 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \ + !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30))) +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh +#endif + +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp) + +#else /* >= 3.15.0 */ +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_PTP_1588_CLOCK_PINS +#define HAVE_NETDEV_PORT +#endif /* 3.15.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif +#ifndef __dev_uc_sync +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#endif /* HAVE_SET_RX_MODE */ + +static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_UNICAST + return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, + dev, sync, unsync); +#else + return 0; +#endif +} +#define __dev_uc_sync __kc_dev_uc_sync + +static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST + __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_uc_unsync __kc_dev_uc_unsync + +static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, + dev, sync, unsync); +#else + return 0; +#endif + +} +#define __dev_mc_sync __kc_dev_mc_sync + +static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_MULTICAST + __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_mc_unsync __kc_dev_mc_unsync +#endif /* __dev_uc_sync */ + +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM +/* if someone backports this, hopefully they backport as a #define. + * declare it as zero on older kernels so that if it get's or'd in + * it won't effect anything, therefore preventing core driver changes + */ +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#define SKB_GSO_UDP_TUNNEL_CSUM 0 +#endif +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp); +#define devm_kmemdup __kc_devm_kmemdup + +#else +#if ( ( LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0) ) && \ + ! ( SLE_VERSION_CODE && ( SLE_VERSION_CODE >= SLE_VERSION(12,4,0)) ) ) +#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY +#endif /* >= 3.16.0 && < 4.13.0 && !(SLES >= 12sp4) */ +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif /* 3.16.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#ifndef timespec64 +#define timespec64 timespec +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + return ts; +} +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + return ts64; +} +#define timespec64_equal timespec_equal +#define timespec64_compare timespec_compare +#define set_normalized_timespec64 set_normalized_timespec +#define timespec64_add_safe timespec_add_safe +#define timespec64_add timespec_add +#define timespec64_sub timespec_sub +#define timespec64_valid timespec_valid +#define timespec64_valid_strict timespec_valid_strict +#define timespec64_to_ns timespec_to_ns +#define ns_to_timespec64 ns_to_timespec +#define ktime_to_timespec64 ktime_to_timespec +#define ktime_get_ts64 ktime_get_ts +#define ktime_get_real_ts64 ktime_get_real_ts +#define timespec64_add_ns timespec_add_ns +#endif /* timespec64 */ +#endif /* !(RHEL6.8= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +static inline void ktime_get_real_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get_real()); +} + +static inline void ktime_get_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get()); +} +#endif + +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#endif /* RHEL_RELEASE_CODE < RHEL7.5 */ + +#if RHEL_RELEASE_CODE && \ + RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,3) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3) +static inline u64 ktime_get_ns(void) +{ + return ktime_to_ns(ktime_get()); +} + +static inline u64 ktime_get_real_ns(void) +{ + return ktime_to_ns(ktime_get_real()); +} + +static inline u64 ktime_get_boot_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} +#endif /* RHEL < 7.3 */ + +#else +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#include +#define HAVE_RHASHTABLE +#endif /* 3.17.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +#include +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); +#define skb_clone_sk __kc_skb_clone_sk +#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp +#endif +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +u32 __kc_eth_get_headlen(const struct net_device *dev, unsigned char *data, + unsigned int max_len); +#else +unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len); +#endif /* !RHEL >= 8.2 */ + +#define eth_get_headlen __kc_eth_get_headlen +#ifndef ETH_P_XDSA +#define ETH_P_XDSA 0x00F8 +#endif +/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1)) +#define HAVE_SKBUFF_CSUM_LEVEL +#endif /* >= RH 7.1 */ + +/* RHEL 7.3 backported xmit_more */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_SKB_XMIT_MORE +#endif /* >= RH 7.3 */ + +#undef GENMASK +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#undef GENMASK_ULL +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#else /* 3.18.0 */ +#define HAVE_SKBUFF_CSUM_LEVEL +#define HAVE_SKB_XMIT_MORE +#define HAVE_SKB_INNER_PROTOCOL_TYPE +#endif /* 3.18.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) ) +#else +#define HAVE_NDO_FEATURES_CHECK +#endif /* 3.18.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,13) ) +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = (val); }) +#endif +#endif /* 3.18.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +/* netdev_phys_port_id renamed to netdev_phys_item_id */ +#define netdev_phys_item_id netdev_phys_port_id + +static inline void _kc_napi_complete_done(struct napi_struct *napi, + int __always_unused work_done) { + napi_complete(napi); +} +/* don't use our backport if the distro kernels already have it */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#define napi_complete_done _kc_napi_complete_done +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits); +#define bitmap_print_to_pagebuf _kc_bitmap_print_to_pagebuf + +#ifndef NETDEV_RSS_KEY_LEN +#define NETDEV_RSS_KEY_LEN (13 * 4) +#endif +#if (!(RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))))) +#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) +#endif /* RHEL_RELEASE_CODE */ +void __kc_netdev_rss_key_fill(void *buffer, size_t len); +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif +#ifndef dev_alloc_pages +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE -1 +#endif +#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) +#endif +#ifndef dev_alloc_page +#define dev_alloc_page() dev_alloc_pages(0) +#endif +#if !defined(eth_skb_pad) && !defined(skb_put_padto) +/** + * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} +#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) + +static inline int __kc_eth_skb_pad(struct sk_buff *skb) +{ + return __kc_skb_put_padto(skb, ETH_ZLEN); +} +#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) +#endif /* eth_skb_pad && skb_put_padto */ + +#ifndef SKB_ALLOC_NAPI +/* RHEL 7.2 backported napi_alloc_skb and friends */ +static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) +{ + return netdev_alloc_skb_ip_align(napi->dev, length); +} +#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len) +#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len) +#endif /* SKB_ALLOC_NAPI */ +#define HAVE_CONFIG_PM_RUNTIME +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RXFH_HASHFUNC +#endif /* 6.7 < RHEL < 7.0 */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* RHEL > 7.1 */ +#ifndef napi_schedule_irqoff +#define napi_schedule_irqoff napi_schedule +#endif +#ifndef READ_ONCE +#define READ_ONCE(_x) ACCESS_ONCE(_x) +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_FDB_ADD_VID +#endif +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif +#ifndef ETH_MODULE_SFF_8636_LEN +#define ETH_MODULE_SFF_8636_LEN 256 +#endif +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif +#ifndef ETH_MODULE_SFF_8436_LEN +#define ETH_MODULE_SFF_8436_LEN 256 +#endif +#ifndef writel_relaxed +#define writel_relaxed writel +#endif +#else /* 3.19.0 */ +#define HAVE_NDO_FDB_ADD_VID +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* 3.19.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) ) +/* vlan_tx_xx functions got renamed to skb_vlan */ +#ifndef skb_vlan_tag_get +#define skb_vlan_tag_get vlan_tx_tag_get +#endif +#ifndef skb_vlan_tag_present +#define skb_vlan_tag_present vlan_tx_tag_present +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif +#else +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif /* 3.20.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) ) +/* Definition for CONFIG_OF was introduced earlier */ +#if !defined(CONFIG_OF) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; } +#else /* !CONFIG_OF && RHEL < 7.3 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* !CONFIG_OF && RHEL < 7.3 */ +#else /* < 4.0 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* < 4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#ifndef NO_PTP_SUPPORT +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#else +#include +#endif +static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) +{ + tc->nsec += delta; +} + +static inline struct net_device * +of_find_net_device_by_node(struct device_node __always_unused *np) +{ + return NULL; +} + +#define timecounter_adjtime __kc_timecounter_adjtime +#endif +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)))) +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_RHEL7_EXTENDED_NDO_SET_TX_MAXRATE +#define HAVE_NDO_SET_TX_MAXRATE +#endif +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node); +#define cpumask_local_spread _kc_cpumask_local_spread +#endif +#ifdef HAVE_RHASHTABLE +#define rhashtable_loopup_fast(ht, key, params) \ + do { \ + (void)params; \ + rhashtable_lookup((ht), (key)); \ + } while (0) + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj), GFP_KERNEL); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj), GFP_KERNEL); \ + } while (0) + +#else /* >= 3,19,0 */ +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj)); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj)); \ + } while (0) + +#endif /* 3,19,0 */ +#endif /* HAVE_RHASHTABLE */ +#else /* >= 4,1,0 */ +#define HAVE_NDO_GET_PHYS_PORT_NAME +#define HAVE_PTP_CLOCK_INFO_GETTIME64 +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_PASSTHRU_FEATURES_CHECK +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#define HAVE_NDO_SET_TX_MAXRATE +#endif /* 4,1,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9)) +#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,3,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0,47,71))) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,4,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65,0,0))) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) +{ +#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC + return page->pfmemalloc; +#else + return false; +#endif +} +#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */ +#else +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* 4.1.9 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; +#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) +#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return bus->self && bus->self->ari_enabled; +} +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_VF_STATS +#endif /* (RHEL7.2+) */ +#endif /* !(RHEL6.8+ || RHEL7.2+) */ +#else +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return false; +} +#endif /* 2.6.27 */ +#else +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#define HAVE_VF_STATS +#endif /* 4.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +/** + * _kc_flow_dissector_key_ipv4_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv4_addrs { + __be32 src; + __be32 dst; +}; + +/** + * _kc_flow_dissector_key_ipv6_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv6_addrs { + struct in6_addr src; + struct in6_addr dst; +}; + +/** + * _kc_flow_dissector_key_addrs: + * @v4addrs: IPv4 addresses + * @v6addrs: IPv6 addresses + */ +struct _kc_flow_dissector_key_addrs { + union { + struct _kc_flow_dissector_key_ipv4_addrs v4addrs; + struct _kc_flow_dissector_key_ipv6_addrs v6addrs; + }; +}; + +/** + * _kc_flow_dissector_key_tp_ports: + * @ports: port numbers of Transport header + * src: source port number + * dst: destination port number + */ +struct _kc_flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + +/** + * _kc_flow_dissector_key_basic: + * @n_proto: Network header protocol (eg. IPv4/IPv6) + * @ip_proto: Transport header protocol (eg. TCP/UDP) + * @padding: padding for alignment + */ +struct _kc_flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct _kc_flow_keys { + struct _kc_flow_dissector_key_basic basic; + struct _kc_flow_dissector_key_ports ports; + struct _kc_flow_dissector_key_addrs addrs; +}; + +/* These are all the include files for kernels inside this #ifdef block that + * have any reference to the in kernel definition of struct flow_keys. The + * reason for putting them here is to make 100% sure that these files do not get + * included after re-defining flow_keys to _kc_flow_keys. This is done to + * prevent any possible ABI issues that this structure re-definition could case. + */ +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) || \ + SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#include +#endif /* (>= 3.3.0 && < 4.2.0) || >= RHEL 6.7 || >= SLE 11.4 */ +#if (LINUX_VERSION_CODE == KERNEL_VERSION(4,2,0)) +#include +#endif /* 4.2.0 */ +#include +#include +#include +#include + +#define flow_keys _kc_flow_keys +bool +_kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct flow_keys *flow, + unsigned int __always_unused flags); +#define skb_flow_dissect_flow_keys _kc_skb_flow_dissect_flow_keys +#endif /* ! >= RHEL 7.4 && ! >= SLES 12.2 */ + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +#include +#endif /* >= RHEL7.3 || >= SLE12sp2 */ +#else /* >= 4.3.0 */ +#include +#endif /* 4.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_NDO_SET_VF_TRUST +#endif /* (RHEL_RELEASE >= 7.3) */ +#ifndef CONFIG_64BIT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#include /* 32-bit readq/writeq */ +#else /* 3.3.0 => 4.3.x */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) +#include +#endif /* 2.6.26 => 3.3.0 */ +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#define readq readq +#endif + +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, (u8 *)addr + 4); +} +#define writeq writeq +#endif +#endif /* < 3.3.0 */ +#endif /* !CONFIG_64BIT */ +#else /* < 4.4.0 */ +#define HAVE_NDO_SET_VF_TRUST + +#ifndef CONFIG_64BIT +#include /* 32-bit readq/writeq */ +#endif /* !CONFIG_64BIT */ +#endif /* 4.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)) +/* protect against a likely backport */ +#ifndef NETIF_F_CSUM_MASK +#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM +#endif /* NETIF_F_CSUM_MASK */ +#ifndef NETIF_F_SCTP_CRC +#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM +#endif /* NETIF_F_SCTP_CRC */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused); +#endif /* !(RHEL_RELEASE >= 7.3) */ +#else /* 4.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_GENEVE_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#define HAVE_NETDEV_UPPER_INFO +#endif /* 4.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,3)) +static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) + return skb->head + skb->csum_start; +#else /* < 2.6.22 */ + return skb_transport_header(skb); +#endif +} +#endif + +#if(!defined(KYLIN_KERNEL44)) +#if !(UBUNTU_VERSION_CODE && \ + UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \ + !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +static inline void napi_consume_skb(struct sk_buff *skb, + int __always_unused budget) +{ + dev_consume_skb_any(skb); +} + +#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */ +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} +#endif +#endif /*#if(!defined(KYLIN_KERNEL44))*/ + +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) +static inline void page_ref_inc(struct page *page) +{ + get_page(page); +} +#else +#define HAVE_PAGE_COUNT_BULK_UPDATE +#endif +#ifndef IPV4_USER_FLOW +#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#endif + +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_FLOWER_ENC +#endif + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +#define HAVE_TC_SETUP_CLSU32 +#endif + +#if (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)) +#define HAVE_TC_SETUP_CLSFLOWER +#endif + +#ifndef kstrtobool +#define kstrtobool _kc_kstrtobool +int _kc_kstrtobool(const char *s, bool *res); +#endif + +#else /* >= 4.6.0 */ +#define HAVE_PAGE_COUNT_BULK_UPDATE +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_PTP_CROSSTIMESTAMP +#ifndef KYLIN_KERNEL +#define HAVE_TC_SETUP_CLSFLOWER +#endif /* KYLIN_KERNEL */ +#define HAVE_TC_SETUP_CLSU32 +#endif /* 4.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))) +#define HAVE_NETIF_TRANS_UPDATE +#endif /* SLES12sp3+ || RHEL7.4+ */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#endif /* RHEL7.3+ || SLES12sp3+ */ +#else /* 4.7.0 */ +#define HAVE_NETIF_TRANS_UPDATE +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#define HAVE_TCF_MIRRED_REDIRECT +#endif /* 4.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN, + UDP_TUNNEL_TYPE_GENEVE, +}; +struct udp_tunnel_info { + unsigned short type; + sa_family_t sa_family; + __be16 port; +}; +#endif + +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE < UBUNTU_VERSION(4,8,0,0)) +#define tc_no_actions(_exts) true +#define tc_for_each_action(_a, _exts) while (0) +#endif +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) &&\ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) &&\ + !defined(KYLIN_KERNEL44) +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_io_regions(struct pci_dev *pdev, char *name) +#else +pci_request_io_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO), name); +} + +static inline void +pci_release_io_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO)); +} + +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_mem_regions(struct pci_dev *pdev, char *name) +#else +pci_request_mem_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), name); +} + +static inline void +pci_release_mem_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +} +#endif /* !SLE_VERSION(12,3,0) */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* RHEL7.4+ || SLES12sp3+ */ +#else +#define HAVE_UDP_ENC_RX_OFFLOAD +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* 4.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#ifdef HAVE_TC_SETUP_CLSFLOWER +#if (!(RHEL_RELEASE_CODE) && !(SLE_VERSION_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0)))) +#define HAVE_TC_FLOWER_VLAN_IN_TAGS +#endif /* !RHEL_RELEASE_CODE && !SLE_VERSION_CODE || = RHEL_RELEASE_VERSION(7,4)) +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* RHEL7.4+ */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4)) +static inline void bitmap_from_u64(unsigned long *dst, u64 mask) +{ + dst[0] = mask & ULONG_MAX; + + if (sizeof(mask) > sizeof(unsigned long)) + dst[1] = mask >> 32; +} +#endif /* = RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) && \ + !(UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,13,0,16)) && !defined(KYLIN_KERNEL44)) +static inline bool eth_type_vlan(__be16 ethertype) +{ + switch (ethertype) { + case htons(ETH_P_8021Q): +#ifdef ETH_P_8021AD + case htons(ETH_P_8021AD): +#endif + return true; + default: + return false; + } +} +#endif /* Linux < 4.9 || RHEL < 7.4 || SLES < 12.3 || Ubuntu < 4.3.0-16 */ +#else /* >=4.9 */ +#define HAVE_FLOW_DISSECTOR_KEY_VLAN_PRIO +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* KERNEL_VERSION(4.9.0) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +/* SLES 12.3 and RHEL 7.5 backported this interface */ +#if (!SLE_VERSION_CODE && !RHEL_RELEASE_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +static inline bool _kc_napi_complete_done2(struct napi_struct *napi, + int __always_unused work_done) +{ + /* it was really hard to get napi_complete_done to be safe to call + * recursively without running into our own kcompat, so just use + * napi_complete + */ + napi_complete(napi); + + /* true means that the stack is telling the driver to go-ahead and + * re-enable interrupts + */ + return true; +} + +#ifdef napi_complete_done +#undef napi_complete_done +#endif +#define napi_complete_done _kc_napi_complete_done2 +#endif /* sles and rhel exclusion for < 4.10 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +#endif /* RHEL7.4+ */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12,3,0))) +#define HAVE_STRUCT_DMA_ATTRS +#endif /* (SLES == 12.3.0) */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif /* (SLES >= 12.3.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_STRUCT_DMA_ATTRS +#define HAVE_RHEL7_EXTENDED_MIN_MAX_MTU +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif +#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#ifndef dma_map_page_attrs +#define dma_map_page_attrs __kc_dma_map_page_attrs +static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#endif + +#ifndef dma_unmap_page_attrs +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs +static inline void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#endif + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + BUG_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#endif /* !SLE_VERSION(12,3,0) && !RHEL_VERSION(7,5) */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#endif + +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(15,0,0))) ||\ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,4)))) +#define page_frag_free __free_page_frag +#endif +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif /* ETH_MIN_MTU */ + +/* If kernel is older than 4.10 but distro is RHEL >= 7.5 || SLES > 12SP4, + * it does have support for NAPI_STATE + */ +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) ||\ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0)))) +#define HAVE_NAPI_STATE_IN_BUSY_POLL +#endif /* RHEL >= 7.5 || SLES >=12.4 */ +#else /* >= 4.10 */ +#define HAVE_TC_FLOWER_ENC +#define HAVE_NETDEVICE_MIN_MAX_MTU +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#define HAVE_NETDEV_TC_RESETS_XPS +#define HAVE_XPS_QOS_SUPPORT +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +/* kernel 4.10 onwards, as part of busy_poll rewrite, new state were added + * which is part of NAPI:state. If NAPI:state=NAPI_STATE_IN_BUSY_POLL, + * it means napi_poll is invoked in busy_poll context + */ +#define HAVE_NAPI_STATE_IN_BUSY_POLL +#define HAVE_TCF_MIRRED_EGRESS_REDIRECT +#define HAVE_PTP_CLOCK_INFO_ADJFINE +#endif /* 4.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +#ifdef CONFIG_NET_RX_BUSY_POLL +#define HAVE_NDO_BUSY_POLL +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#define HAVE_VOID_NDO_GET_STATS64 +#endif /* (SLES >= 12.3.0) && (RHEL >= 7.5) */ + +static inline void _kc_dev_kfree_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_irq(skb); +} + +#undef dev_kfree_skb_irq +#define dev_kfree_skb_irq _kc_dev_kfree_skb_irq + +static inline void _kc_dev_consume_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_irq(skb); +} + +#undef dev_consume_skb_irq +#define dev_consume_skb_irq _kc_dev_consume_skb_irq + +static inline void _kc_dev_kfree_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_any(skb); +} + +#undef dev_kfree_skb_any +#define dev_kfree_skb_any _kc_dev_kfree_skb_any + +static inline void _kc_dev_consume_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_any(skb); +} + +#undef dev_consume_skb_any +#define dev_consume_skb_any _kc_dev_consume_skb_any + +#else /* > 4.11 */ +#define HAVE_VOID_NDO_GET_STATS64 +#define HAVE_VM_OPS_FAULT_NO_VMA +#endif /* 4.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +/* The RHEL 7.7+ NL_SET_ERR_MSG_MOD triggers unused parameter warnings */ +#undef NL_SET_ERR_MSG_MOD +#endif +/* If kernel is older than 4.12 but distro is RHEL >= 7.5 || SLES > 12SP4, + * it does have support for MIN_NAPI_ID + */ +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0)))) +#define HAVE_MIN_NAPI_ID +#endif /* RHEL >= 7.5 || SLES >= 12.4 */ +#ifndef NL_SET_ERR_MSG_MOD +#define NL_SET_ERR_MSG_MOD(extack, msg) \ + do { \ + uninitialized_var(extack); \ + pr_err(KBUILD_MODNAME ": " msg); \ + } while (0) +#endif /* !NL_SET_ERR_MSG_MOD */ +#else /* >= 4.12 */ +#define HAVE_MIN_NAPI_ID +#endif /* 4.12 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_TCF_EXTS_HAS_ACTION +#endif +#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#endif /* SLES >= 12sp4 */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) + +#if(!defined(KYLIN_KERNEL44)) +#define UUID_SIZE 16 +typedef struct { + __u8 b[UUID_SIZE]; +} uuid_t; +#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ +((uuid_t) \ +{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \ + ((b) >> 8) & 0xff, (b) & 0xff, \ + ((c) >> 8) & 0xff, (c) & 0xff, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + +static inline bool uuid_equal(const uuid_t *u1, const uuid_t *u2) +{ + return memcmp(u1, u2, sizeof(uuid_t)) == 0; +} +#endif + +#else +#define HAVE_METADATA_PORT_INFO +#endif /* !(RHEL >= 7.5) && !(SLES >= 12.4) */ +#else /* > 4.13 */ +#define HAVE_METADATA_PORT_INFO +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_NDO_SETUP_TC_CHAIN_INDEX +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK +#endif /* 4.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef ethtool_link_ksettings_del_link_mode +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ + __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) +#endif +#endif /* ETHTOOL_GLINKSETTINGS */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC +#endif + +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) + +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) + +#ifndef xdp_do_flush_map +#define xdp_do_flush_map() do {} while (0) +#endif +struct _kc_xdp_buff { + void *data; + void *data_end; + void *data_hard_start; +}; +#define xdp_buff _kc_xdp_buff +struct _kc_bpf_prog { +}; +#define bpf_prog _kc_bpf_prog +#ifndef DIV_ROUND_DOWN_ULL +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) +#endif /* DIV_ROUND_DOWN_ULL */ +#else /* > 4.14 */ +#define HAVE_XDP_SUPPORT +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_TCF_EXTS_HAS_ACTION +#endif /* 4.14.0 */ + +/*****************************************************************************/ +#ifndef ETHTOOL_GLINKSETTINGS + +#define __ETHTOOL_LINK_MODE_MASK_NBITS 32 +#define ETHTOOL_LINK_MASK_SIZE BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS) + +/** + * struct ethtool_link_ksettings + * @link_modes: supported and advertising, single item arrays + * @link_modes.supported: bitmask of supported link speeds + * @link_modes.advertising: bitmask of currently advertised speeds + * @base: base link details + * @base.speed: current link speed + * @base.port: current port type + * @base.duplex: current duplex mode + * @base.autoneg: current autonegotiation settings + * + * This struct and the following macros provide a way to support the old + * ethtool get/set_settings API on older kernels, but in the style of the new + * GLINKSETTINGS API. In this way, the same code can be used to support both + * APIs as seemlessly as possible. + * + * It should be noted the old API only has support up to the first 32 bits. + */ +struct ethtool_link_ksettings { + struct { + u32 speed; + u8 port; + u8 duplex; + u8 autoneg; + } base; + struct { + unsigned long supported[ETHTOOL_LINK_MASK_SIZE]; + unsigned long advertising[ETHTOOL_LINK_MASK_SIZE]; + } link_modes; +}; + +#define ETHTOOL_LINK_NAME_advertising(mode) ADVERTISED_ ## mode +#define ETHTOOL_LINK_NAME_supported(mode) SUPPORTED_ ## mode +#define ETHTOOL_LINK_NAME(name) ETHTOOL_LINK_NAME_ ## name +#define ETHTOOL_LINK_CONVERT(name, mode) ETHTOOL_LINK_NAME(name)(mode) + +/** + * ethtool_link_ksettings_zero_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + */ +#define ethtool_link_ksettings_zero_link_mode(ptr, name)\ + (*((ptr)->link_modes.name) = 0x0) + +/** + * ethtool_link_ksettings_add_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_add_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) |= (typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_del_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to delete + */ +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) &= ~(typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_test_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_test_link_mode(ptr, name, mode)\ + (!!(*((ptr)->link_modes.name) & ETHTOOL_LINK_CONVERT(name, mode))) + +/** + * _kc_ethtool_ksettings_to_cmd - Convert ethtool_link_ksettings to ethtool_cmd + * @ks: ethtool_link_ksettings struct + * @cmd: ethtool_cmd struct + * + * Convert an ethtool_link_ksettings structure into the older ethtool_cmd + * structure. We provide this in kcompat.h so that drivers can easily + * implement the older .{get|set}_settings as wrappers around the new api. + * Hence, we keep it prefixed with _kc_ to make it clear this isn't actually + * a real function in the kernel. + */ +static inline void +_kc_ethtool_ksettings_to_cmd(struct ethtool_link_ksettings *ks, + struct ethtool_cmd *cmd) +{ + cmd->supported = (u32)ks->link_modes.supported[0]; + cmd->advertising = (u32)ks->link_modes.advertising[0]; + ethtool_cmd_speed_set(cmd, ks->base.speed); + cmd->duplex = ks->base.duplex; + cmd->autoneg = ks->base.autoneg; + cmd->port = ks->base.port; +} + +#endif /* !ETHTOOL_GLINKSETTINGS */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +#define phy_speed_to_str _kc_phy_speed_to_str +const char *_kc_phy_speed_to_str(int speed); +#else /* (LINUX >= 4.14.0) || (SLES > 12.3.0) || (RHEL > 7.5) */ +#include +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#else /* RHEL >= 7.6 || SLES >= 15.1 */ +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src); +#define ethtool_intersect_link_masks _kc_ethtool_intersect_link_masks +#else /* >= 4.15 */ +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +/* The return value of the strscpy() and strlcpy() functions is different. + * This could be potentially hazard for the future. + * To avoid this the void result is forced. + * So it is not possible use this function with the return value. + * Return value is required in kernel 4.3 through 4.15 + */ +#define strscpy(...) (void)(strlcpy(__VA_ARGS__)) +#endif /* !RHEL >= 7.7 && !SLES12sp4+ && !SLES15sp1+ */ + +#define pci_printk(level, pdev, fmt, arg...) \ + dev_printk(level, &(pdev)->dev, fmt, ##arg) +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) + +#ifndef array_index_nospec +static inline unsigned long _kc_array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + /* + * Always calculate and emit the mask even if the compiler + * thinks the mask is not needed. The compiler does not take + * into account the value of @index under speculation. + */ + OPTIMIZER_HIDE_VAR(index); + return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); +} + +#define array_index_nospec(index, size) \ +({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ + unsigned long _mask = _kc_array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ + \ + (typeof(_i)) (_i & _mask); \ +}) +#endif /* array_index_nospec */ +#ifndef sizeof_field +#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->MEMBER))) +#endif /* sizeof_field */ +/* add a check for the Oracle UEK 4.14.35 kernel as + * it backported a version of this bitmap function + */ +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0) || \ + SLE_VERSION_CODE >= SLE_VERSION(15,1,0)) && \ + !(LINUX_VERSION_CODE == KERNEL_VERSION(4,14,35)) +/* + * Copy bitmap and clear tail bits in last word. + */ +static inline void +bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits) +{ + bitmap_copy(dst, src, nbits); + if (nbits % BITS_PER_LONG) + dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); +} + +/* + * On 32-bit systems bitmaps are represented as u32 arrays internally, and + * therefore conversion is not needed when copying data from/to arrays of u32. + */ +#if BITS_PER_LONG == 64 +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits); +#else +#define bitmap_from_arr32(bitmap, buf, nbits) \ + bitmap_copy_clear_tail((unsigned long *) (bitmap), \ + (const unsigned long *) (buf), (nbits)) +#endif /* BITS_PER_LONG == 64 */ +#endif /* !(RHEL >= 8.0) && !(SLES >= 12.5 && SLES < 15.0 || SLES >= 15.1) */ +#else /* >= 4.16 */ +#include +#define HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK +#define HAVE_TCF_MIRRED_DEV +#define HAVE_VF_STATS_DROPPED +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +#include +#include +#define PCIE_SPEED_16_0GT 0x17 +#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ +#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */ +#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ +void _kc_pcie_print_link_status(struct pci_dev *dev); +#define pcie_print_link_status _kc_pcie_print_link_status +#else /* >= 4.17.0 */ +#define HAVE_XDP_BUFF_IN_XDP_H +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#include "kcompat_overflow.h" + +#if (SLE_VERSION_CODE < SLE_VERSION(15,1,0)) +#define firmware_request_nowarn request_firmware_direct +#endif /* SLES < 15.1 */ + +#else +#include +#include +#define HAVE_XDP_FRAME_STRUCT +#define HAVE_XDP_SOCK +#define HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS +#define NO_NDO_XDP_FLUSH +#endif /* 4.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)) +#define bitmap_alloc(nbits, flags) \ + kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags) +#define bitmap_zalloc(nbits, flags) bitmap_alloc(nbits, ((flags) | __GFP_ZERO)) +#define bitmap_free(bitmap) kfree(bitmap) +#ifdef ETHTOOL_GLINKSETTINGS +#define ethtool_ks_clear(ptr, name) \ + ethtool_link_ksettings_zero_link_mode(ptr, name) +#define ethtool_ks_add_mode(ptr, name, mode) \ + ethtool_link_ksettings_add_link_mode(ptr, name, mode) +#define ethtool_ks_del_mode(ptr, name, mode) \ + ethtool_link_ksettings_del_link_mode(ptr, name, mode) +#define ethtool_ks_test(ptr, name, mode) \ + ethtool_link_ksettings_test_link_mode(ptr, name, mode) +#endif /* ETHTOOL_GLINKSETTINGS */ +#define HAVE_NETPOLL_CONTROLLER +#define REQUIRE_PCI_CLEANUP_AER_ERROR_STATUS +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_MIRRED_DEV +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#endif + +static inline void __kc_metadata_dst_free(void *md_dst) +{ + kfree(md_dst); +} + +#define metadata_dst_free(md_dst) __kc_metadata_dst_free(md_dst) +#else /* >= 4.19.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_NETDEV_SB_DEV +#define HAVE_TCF_VLAN_TPID +#define HAVE_RHASHTABLE_TYPES +#endif /* 4.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#define HAVE_XDP_UMEM_PROPS +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* RHEL >= 8.0 */ +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* SLE == 12sp5 || SLE >= 15sp1 */ +#else /* >= 4.20.0 */ +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#define HAVE_AF_XDP_ZC_SUPPORT +#define HAVE_ETF_SUPPORT /* Earliest TxTime First */ +#endif /* 4.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,0))) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#define NETLINK_MAX_COOKIE_LEN 20 +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + u8 cookie[NETLINK_MAX_COOKIE_LEN]; + u8 cookie_len; +}; + +#endif /* < 4.12 */ +static inline int _kc_dev_open(struct net_device *netdev, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_open(netdev); +} + +#define dev_open _kc_dev_open + +static inline int +_kc_dev_change_flags(struct net_device *netdev, unsigned int flags, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_change_flags(netdev, flags); +} + +#define dev_change_flags _kc_dev_change_flags +#endif /* !(RHEL_RELEASE_CODE && RHEL > RHEL(8,0)) */ + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) || defined(KYLIN_KERNEL)) +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#define HAVE_PTP_CLOCK_INFO_GETTIMEX64 +#else /* RHEL >= 7.7 && RHEL < 8.0 || RHEL >= 8.1 */ +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void +ptp_read_system_prets(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} +#endif /* !(RHEL >= 7.7 && RHEL != 8.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#endif /* RHEL 8.1 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) +#define HAVE_TC_INDIR_BLOCK +#endif /* RHEL 8.2 */ +#else /* >= 5.0.0 */ +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#define HAVE_PTP_CLOCK_INFO_GETTIMEX64 +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM +#define HAVE_TC_INDIR_BLOCK +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#endif /* RHEL < 8.1 */ +#else /* >= 5.1.0 */ +#define HAVE_NDO_FDB_ADD_EXTACK +#define NO_XDP_QUERY_XSK_UMEM +#define HAVE_AF_XDP_NETDEV_UMEM +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#if (!defined KYLIN_KERNEL) +#if (defined HAVE_SKB_XMIT_MORE) && \ +(!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#define netdev_xmit_more() (skb->xmit_more) +#else +#define netdev_xmit_more() (0) +#endif + +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifndef eth_get_headlen +static inline u32 +__kc_eth_get_headlen(const struct net_device __always_unused *dev, void *data, + unsigned int len) +{ + return eth_get_headlen(data, len); +} + +#define eth_get_headlen(dev, data, len) __kc_eth_get_headlen(dev, data, len) +#endif /* !eth_get_headlen */ +#endif /* !RHEL >= 8.2 */ + +#ifndef mmiowb +#ifdef CONFIG_IA64 +#define mmiowb() asm volatile ("mf.a" ::: "memory") +#else +#define mmiowb() +#endif +#endif /* mmiowb */ + +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,1)) +#define HAVE_NDO_GET_DEVLINK_PORT +#endif /* RHEL > 8.1 */ +#endif /*!defined KYLIN_KERNEL*/ + +#else /* >= 5.2.0 */ +#define HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +#define SPIN_UNLOCK_IMPLIES_MMIOWB +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,2,0)) +#define HAVE_NDO_GET_DEVLINK_PORT +#endif /* < 6.2.0 */ +#endif /* 5.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))) +#define flow_block_offload tc_block_offload +#define flow_block_command tc_block_command +#define flow_cls_offload tc_cls_flower_offload +#define flow_block_binder_type tcf_block_binder_type +#define flow_cls_common_offload tc_cls_common_offload +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE +#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS +#define FLOW_BLOCK_BIND TC_BLOCK_BIND +#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND + +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include + +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only); + +#define flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) \ + _kc_flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#else /* RHEL >= 8.2 */ +#define HAVE_FLOW_BLOCK_API +#define HAVE_DEVLINK_PORT_ATTR_PCI_VF +#endif /* RHEL >= 8.2 */ + +#ifndef ETH_P_LLDP +#define ETH_P_LLDP 0x88CC +#endif /* !ETH_P_LLDP */ + +#else /* >= 5.3.0 */ +#define XSK_UMEM_RETURNS_XDP_DESC +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0)) +#define HAVE_XSK_UMEM_HAS_ADDRS +#endif /* SLE < 15.3 */ +#endif /* < 5.8.0*/ +#define HAVE_FLOW_BLOCK_API +#define HAVE_DEVLINK_PORT_ATTR_PCI_VF +#if IS_ENABLED(CONFIG_DIMLIB) +#define HAVE_CONFIG_DIMLIB +#endif +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +#if (SLE_VERSION_CODE >= SLE_VERSION(15,2,0)) +#define HAVE_NDO_XSK_WAKEUP +#endif /* SLES15sp2 */ +#else /* >= 5.4.0 */ +#define HAVE_NDO_XSK_WAKEUP +#endif /* 5.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,5,0)) +static inline unsigned long _kc_bitmap_get_value8(const unsigned long *map, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + return (map[index] >> offset) & 0xFF; +} +#define bitmap_get_value8 _kc_bitmap_get_value8 + +static inline void _kc_bitmap_set_value8(unsigned long *map, + unsigned long value, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + map[index] &= ~(0xFFUL << offset); + map[index] |= value << offset; +} +#define bitmap_set_value8 _kc_bitmap_set_value8 + +#endif /* 5.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)) +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#define xsk_umem_release_addr xsk_umem_discard_addr +#define xsk_umem_release_addr_rq xsk_umem_discard_addr_rq +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif +#else /* >= 5.6.0 */ +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif /* 5.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev); +#define pci_get_dsn(dev) _kc_pci_get_dsn(dev) +/* add a check for the Oracle UEK 5.4.17 kernel which + * backported the rename of the aer functions + */ +#if defined(NEED_ORCL_LIN_PCI_AER_CLEAR_NONFATAL_STATUS) || \ +!(SLE_VERSION_CODE > SLE_VERSION(15, 2, 0)) && \ + !((LINUX_VERSION_CODE == KERNEL_VERSION(5,3,18)) && \ +(SLE_LOCALVERSION_CODE >= KERNEL_VERSION(14, 0, 0))) && \ + !(LINUX_VERSION_CODE == KERNEL_VERSION(5,4,17)) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#endif + +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID +#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id" +#endif +#else /* >= 5.7.0 */ +#define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15,3,0)) +/* (RHEL < 8.4) || (SLE < 15.3) */ +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#elif (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +/* RHEL >= 8.4 */ +#define HAVE_XDP_BUFF_FRAME_SZ +#endif +#define flex_array_size(p, member, count) \ + array_size(count, sizeof(*(p)->member) + __must_be_array((p)->member)) +#else /* >= 5.8.0 */ +#define HAVE_TC_FLOW_INDIR_DEV +#define HAVE_TC_FLOW_INDIR_BLOCK_CLEANUP +#define HAVE_XDP_BUFF_FRAME_SZ +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif /* 5.8.0 */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define HAVE_TC_FLOW_INDIR_DEV +#endif +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#define HAVE_TC_FLOW_INDIR_DEV +#endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP3 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +#define HAVE_TC_FLOW_INDIR_BLOCK_CLEANUP +#endif /* (RHEL >= 8.4) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#else /* >= 5.9.0 */ +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#define HAVE_UDP_TUNNEL_NIC_INFO +#endif /* 5.9.0 */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,3))) +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#endif +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,3,0))) +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP3 */ + +/*****************************************************************************/ +#ifdef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#ifdef HAVE_XDP_BUFF_IN_XDP_H +#include +#else +#include +#endif /* HAVE_XDP_BUFF_IN_XDP_H */ +static inline int +_kc_xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, + u32 queue_index, unsigned int __always_unused napi_id) +{ + return xdp_rxq_info_reg(xdp_rxq, dev, queue_index); +} + +#define xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) \ + _kc_xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) +#endif /* HAVE_XDP_RXQ_INFO_REG_3_PARAMS */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)) +#ifdef HAVE_NAPI_BUSY_LOOP +#ifdef CONFIG_NET_RX_BUSY_POLL +#include +static inline void +_kc_napi_busy_loop(unsigned int napi_id, + bool (*loop_end)(void *, unsigned long), void *loop_end_arg, + bool __always_unused prefer_busy_poll, + u16 __always_unused budget) +{ + napi_busy_loop(napi_id, loop_end, loop_end_arg); +} + +#define napi_busy_loop(napi_id, loop_end, loop_end_arg, prefer_busy_poll, budget) \ + _kc_napi_busy_loop(napi_id, loop_end, loop_end_arg, prefer_busy_poll, budget) +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#endif /* HAVE_NAPI_BUSY_LOOP */ +#endif /* <5.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,12,0)) +#define HAVE_GRO_HEADER +#endif /* >=5.12.0 */ + +/*****************************************************************************/ + +/* + * Load the implementations file which actually defines kcompat backports. + * Legacy backports still exist in this file, but all new backports must be + * implemented using kcompat_*defs.h and kcompat_impl.h + */ +#include "kcompat_impl.h" + +#endif /* _KCOMPAT_H_ */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_defs.h b/drivers/net/ethernet/guangruntong/kcompat_defs.h new file mode 100755 index 0000000000000..6594358eef821 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_defs.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2023 Intel Corporation */ + +#ifndef _KCOMPAT_DEFS_H_ +#define _KCOMPAT_DEFS_H_ + +#ifndef LINUX_VERSION_CODE +#include +#else +#ifndef KERNEL_VERSION +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#endif /* LINUX_VERSION_CODE */ + +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) //sam +#include +#else +#include +#endif +#endif + +/* + * Include the definitions file for HAVE/NEED flags for the standard upstream + * kernels. + * + * Then, based on the distribution we detect, load the distribution specific + * definitions file that customizes the definitions for the target + * distribution. + */ +#include "kcompat_std_defs.h" + +#ifdef CONFIG_SUSE_KERNEL +#include "kcompat_sles_defs.h" +#elif UBUNTU_VERSION_CODE +#include "kcompat_ubuntu_defs.h" +#elif RHEL_RELEASE_CODE +#include "kcompat_rhel_defs.h" +#elif defined(UEK_RELEASE_NUMBER) +#include "kcompat_oracle_defs.h" +#endif + +#endif /* _KCOMPAT_DEFS_H_ */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_gcc.h b/drivers/net/ethernet/guangruntong/kcompat_gcc.h new file mode 100755 index 0000000000000..a619fa3628c4d --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_gcc.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2021 Intel Corporation. */ + +#ifndef _KCOMPAT_GCC_H_ +#define _KCOMPAT_GCC_H_ + +#ifdef __has_attribute +#if __has_attribute(__fallthrough__) +# define fallthrough __attribute__((__fallthrough__)) +#else +# define fallthrough do {} while (0) /* fallthrough */ +#endif /* __has_attribute(fallthrough) */ +#else +# define fallthrough do {} while (0) /* fallthrough */ +#endif /* __has_attribute */ + +#endif /* _KCOMPAT_GCC_H_ */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_generated_defs.h b/drivers/net/ethernet/guangruntong/kcompat_generated_defs.h new file mode 100644 index 0000000000000..e8abd5d1cdfb6 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_generated_defs.h @@ -0,0 +1,58 @@ +/* Autogenerated for KSRC=/lib/modules/6.6.7-amd64-desktop-hwe/build via kcompat-generator.sh */ +#ifndef _KCOMPAT_GENERATED_DEFS_H_ +#define _KCOMPAT_GENERATED_DEFS_H_ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW +#define HAVE_DEVLINK_HEALTH +#define HAVE_DEVLINK_HEALTH_OPS_EXTACK +#define HAVE_DEVLINK_PARAMS +#define HAVE_DEVLINK_PORT_NEW +#define HAVE_DEVLINK_PORT_OPS +#define HAVE_DEVLINK_PORT_SPLIT +#define HAVE_DEVLINK_PORT_SPLIT_EXTACK +#define HAVE_DEVLINK_PORT_SPLIT_IN_PORT_OPS +#define HAVE_DEVLINK_PORT_SPLIT_PORT_STRUCT +#define HAVE_DEVLINK_RATE_NODE_CREATE +#define HAVE_DEVLINK_REGIONS +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVL_HEALTH_REPORTER_DESTROY +#define HAVE_DEVL_PORT_REGISTER +#define HAVE_DEVLINK_PORT_FLAVOUR_PCI_SF +#define HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +#define HAVE_NDO_ETH_IOCTL +#define HAVE_NDO_FDB_ADD_VID +#define HAVE_NDO_FDB_DEL_EXTACK +#define HAVE_NETDEV_MIN_MAX_MTU +#define HAVE_NETIF_SET_TSO_MAX +#define HAVE_SET_NETDEV_DEVLINK_PORT +#define HAVE_INCLUDE_BITFIELD +#define NEED_DPLL_NETDEV_PIN_SET +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define HAVE_ETHTOOL_FLOW_RSS +#define HAVE_FLOW_DISSECTOR_KEY_PPPOE +#define HAVE_FLOW_DISSECTOR_KEY_CVLAN +#define HAVE_CDEV_DEVICE +#define HAVE_DEV_UEVENT_CONST +#define HAVE_STREAM_OPEN +#define HAVE_GNSS_MODULE +#define HAVE_POLL_T +#define HAVE_PCI_MSIX_ALLOC_IRQ_AT +#define HAVE_PCI_MSIX_CAN_ALLOC_DYN +#define HAVE_PCI_MSIX_FREE_IRQ +#define HAVE_PER_VF_MSIX_SYSFS +#define HAVE_STRUCT_PCI_DEV_PTM_ENABLED +#define HAVE_STDDEF_OFFSETTOEND +#define HAVE_X86_STEPPING +#define HAVE_COMPLETION_RAW_SPINLOCK +#define HAVE_HWMON_DEVICE_REGISTER_WITH_INFO +#define HAVE_STRUCT_STATIC_KEY_FALSE +#define HAVE_MDEV_REGISTER_PARENT +#define HAVE_VM_FLAGS_API +#define HAVE_NL_SET_ERR_MSG_FMT +#define HAVE_PTP_CLOCK_INFO_ADJFINE +#define NEED_SCHED_PARAM +#define HAVE_TRACE_ENABLED_SUPPORT +#define HAVE_TTY_OP_WRITE_SIZE_T +#endif /* _KCOMPAT_GENERATED_DEFS_H_ */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_impl.h b/drivers/net/ethernet/guangruntong/kcompat_impl.h new file mode 100755 index 0000000000000..2700008ceab71 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_impl.h @@ -0,0 +1,2714 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2024 Intel Corporation */ + +#ifndef _KCOMPAT_IMPL_H_ +#define _KCOMPAT_IMPL_H_ + +/* devlink support */ +#if IS_ENABLED(CONFIG_NET_DEVLINK) + +/* + * This change is adding buffer in enum value for ice_devlink_param_id. + * + * In upstream / OOT compiled from source it is safe to use + * DEVLINK_PARAM_GENERIC_ID_MAX as first value for ice_devlink_param_id + * enum. + * + * In case of binary release (for Secure Boot purpose) this caused issue + * with supporting multiple kernels because backport made by SLES changed + * value of DEVLINK_PARAM_GENERIC_ID_MAX. This caused -EINVAL to + * be returned by devlink_params_register() because + * ICE_DEVLINK_PARAM_ID_FW_MGMT_MINSREV (compiled on older kernel) was equal + * to DEVLINK_PARAM_GENERIC_ID_MAX (in newer kernel). + */ +#define DEVLINK_PARAM_GENERIC_ID_MAX __KC_DEVLINK_PARAM_GENERIC_ID_MAX +#include +#undef DEVLINK_PARAM_GENERIC_ID_MAX +#define DEVLINK_PARAM_GENERIC_ID_MAX (__KC_DEVLINK_PARAM_GENERIC_ID_MAX + 32) +#endif /* CONFIG_DEVLINK */ + +/* This file contains implementations of backports from various kernels. It + * must rely only on NEED_ and HAVE_ checks. It must not make any + * checks to determine the kernel version when deciding whether to include an + * implementation. + * + * All new implementations must go in this file, and legacy implementations + * should be migrated to the new format over time. + */ + +/* The same kcompat code is used here and auxiliary module. To avoid + * duplication and functions redefitions in some scenarios, include the + * auxiliary kcompat implementation here. + */ +#include "auxiliary_compat.h" + +/* generic network stack functions */ + +/* NEED_NETDEV_TXQ_BQL_PREFETCH + * + * functions + * netdev_txq_bql_complete_prefetchw() + * netdev_txq_bql_enqueue_prefetchw() + * + * were added in kernel 4.20 upstream commit + * 535114539bb2 ("net: add netdev_txq_bql_{enqueue, complete}_prefetchw() + * helpers") + */ +#ifdef NEED_NETDEV_TXQ_BQL_PREFETCH +/** + * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write + * @dev_queue: pointer to transmit queue + * + * BQL enabled drivers might use this helper in their ndo_start_xmit(), + * to give appropriate hint to the CPU. + */ +static inline +void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_BQL + prefetchw(&dev_queue->dql.num_queued); +#endif +} + +/** + * netdev_txq_bql_complete_prefetchw - prefetch bql data for write + * @dev_queue: pointer to transmit queue + * + * BQL enabled drivers might use this helper in their TX completion path, + * to give appropriate hint to the CPU. + */ +static inline +void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_BQL + prefetchw(&dev_queue->dql.limit); +#endif +} +#endif /* NEED_NETDEV_TXQ_BQL_PREFETCH */ + +/* NEED_NETDEV_TX_SENT_QUEUE + * + * __netdev_tx_sent_queue was added in kernel 4.20 upstream commit + * 3e59020abf0f ("net: bql: add __netdev_tx_sent_queue()") + */ +#ifdef NEED_NETDEV_TX_SENT_QUEUE +/* Variant of netdev_tx_sent_queue() for drivers that are aware + * that they should not test BQL status themselves. + * We do want to change __QUEUE_STATE_STACK_XOFF only for the last + * skb of a batch. + * Returns true if the doorbell must be used to kick the NIC. + */ +static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes, + bool xmit_more) +{ + if (xmit_more) { +#ifdef CONFIG_BQL + dql_queued(&dev_queue->dql, bytes); +#endif + return netif_tx_queue_stopped(dev_queue); + } + netdev_tx_sent_queue(dev_queue, bytes); + return true; +} +#endif /* NEED_NETDEV_TX_SENT_QUEUE */ + +/* NEED_NET_PREFETCH + * + * net_prefetch was introduced by commit f468f21b7af0 ("net: Take common + * prefetch code structure into a function") + * + * This function is trivial to re-implement in full. + */ +#ifdef NEED_NET_PREFETCH +static inline void net_prefetch(void *p) +{ + prefetch(p); +#if L1_CACHE_BYTES < 128 + prefetch((u8 *)p + L1_CACHE_BYTES); +#endif +} +#endif /* NEED_NET_PREFETCH */ + +/* NEED_SKB_FRAG_OFF and NEED_SKB_FRAG_OFF_ADD + * + * skb_frag_off and skb_frag_off_add were added in upstream commit + * 7240b60c98d6 ("linux: Add skb_frag_t page_offset accessors") + * + * Implementing the wrappers directly for older kernels which still have the + * old implementation of skb_frag_t is trivial. + * + * LTS 4.19 backported the define for skb_frag_off in 4.19.201. + * d94d95ae0dd0 ("gro: ensure frag0 meets IP header alignment") + * Need to exclude defining skb_frag_off for 4.19.X where X > 200 + */ +#ifdef NEED_SKB_FRAG_OFF +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->page_offset; +} +#endif /* NEED_SKB_FRAG_OFF */ +#ifdef NEED_SKB_FRAG_OFF_ADD +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->page_offset += delta; +} +#endif /* NEED_SKB_FRAG_OFF_ADD */ + +/* + * NEED_DMA_ATTRS, NEED_DMA_ATTRS_PTR and related functions + * + * dma_map_page_attrs and dma_unmap_page_attrs were added in upstream commit + * 0495c3d36794 ("dma: add calls for dma_map_page_attrs and + * dma_unmap_page_attrs") + * + * Implementing these calls in this way makes RHEL7.4 compile (which doesn't + * have these) and all newer kernels compile fine, while using only the new + * calls with no ifdeffery. + * + * In kernel 4.10 the commit ("dma-mapping: use unsigned long for dma_attrs") + * switched the argument from struct dma_attrs * to unsigned long. + * + * __page_frag_cache_drain was implemented in 2017, but __page_frag_drain came + * with the above series for _attrs, and seems to have been backported at the + * same time. + * + * Please note SLES12.SP3 and RHEL7.5 and newer have all three of these + * already. + * + * If need be in the future for some reason, we could make a separate NEED_ + * define for __page_frag_cache_drain, but not yet. + * + * For clarity: there are three states: + * 1) no attrs + * 2) attrs but with a pointer to a struct dma_attrs + * 3) attrs but with unsigned long type + */ +#ifdef NEED_DMA_ATTRS +static inline +dma_addr_t __kc_dma_map_page_attrs(struct device *dev, struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#define dma_map_page_attrs __kc_dma_map_page_attrs + +static inline +void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + WARN_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#elif defined(NEED_DMA_ATTRS_PTR) +static inline +dma_addr_t __kc_dma_map_page_attrs_long(struct device *dev, struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_attrs dmaattrs = {}; + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &dmaattrs); + + if (attrs & DMA_ATTR_WEAK_ORDERING) + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &dmaattrs); + + return dma_map_page_attrs(dev, page, offset, size, dir, &dmaattrs); +} +#define dma_map_page_attrs __kc_dma_map_page_attrs_long +/* there is a nasty macro buried in dma-mapping.h which reroutes dma_map_page + * and dma_unmap_page to attribute versions, so take control of that macro and + * fix it here. */ +#ifdef dma_map_page +#undef dma_map_page +#define dma_map_page(a,b,c,d,r) dma_map_page_attrs(a,b,c,d,r,0) +#endif + +static inline +void __kc_dma_unmap_page_attrs_long(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_attrs dmaattrs = {}; + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &dmaattrs); + + if (attrs & DMA_ATTR_WEAK_ORDERING) + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &dmaattrs); + + dma_unmap_page_attrs(dev, addr, size, dir, &dmaattrs); +} +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs_long +#ifdef dma_unmap_page +#undef dma_unmap_page +#define dma_unmap_page(a,b,c,r) dma_unmap_page_attrs(a,b,c,r,0) +#endif +#endif /* NEED_DMA_ATTRS_PTR */ + +/* + * NETIF_F_HW_L2FW_DOFFLOAD related functions + * + * Support for NETIF_F_HW_L2FW_DOFFLOAD was first introduced upstream by + * commit a6cc0cfa72e0 ("net: Add layer 2 hardware acceleration operations for + * macvlan devices") + */ +#ifdef NETIF_F_HW_L2FW_DOFFLOAD + +#include + +/* NEED_MACVLAN_ACCEL_PRIV + * + * macvlan_accel_priv is an accessor function that replaced direct access to + * the macvlan->fwd_priv variable. It was introduced in commit 7d775f63470c + * ("macvlan: Rename fwd_priv to accel_priv and add accessor function") + * + * Implement the new wrapper name by simply accessing the older + * macvlan->fwd_priv name. + */ +#ifdef NEED_MACVLAN_ACCEL_PRIV +static inline void *macvlan_accel_priv(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->fwd_priv; +} +#endif /* NEED_MACVLAN_ACCEL_PRIV */ + +/* NEED_MACVLAN_RELEASE_L2FW_OFFLOAD + * + * macvlan_release_l2fw_offload was introduced upstream by commit 53cd4d8e4dfb + * ("macvlan: Provide function for interfaces to release HW offload") + * + * Implementing this is straight forward, but we must be careful to use + * fwd_priv instead of accel_priv. Note that both the change to accel_priv and + * introduction of this function happened in the same release. + */ +#ifdef NEED_MACVLAN_RELEASE_L2FW_OFFLOAD +static inline int macvlan_release_l2fw_offload(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + macvlan->fwd_priv = NULL; + return dev_uc_add(macvlan->lowerdev, dev->dev_addr); +} +#endif /* NEED_MACVLAN_RELEASE_L2FW_OFFLOAD */ + +/* NEED_MACVLAN_SUPPORTS_DEST_FILTER + * + * macvlan_supports_dest_filter was introduced upstream by commit 6cb1937d4eff + * ("macvlan: Add function to test for destination filtering support") + * + * The implementation doesn't rely on anything new and is trivial to backport + * for kernels that have NETIF_F_HW_L2FW_DOFFLOAD support. + */ +#ifdef NEED_MACVLAN_SUPPORTS_DEST_FILTER +static inline bool macvlan_supports_dest_filter(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PRIVATE || + macvlan->mode == MACVLAN_MODE_VEPA || + macvlan->mode == MACVLAN_MODE_BRIDGE; +} +#endif /* NEED_MACVLAN_SUPPORTS_DEST_FILTER */ + +#endif /* NETIF_F_HW_L2FW_DOFFLOAD */ + +/* tc functions */ + +/* NEED_FLOW_INDR_BLOCK_CB_REGISTER + * + * __flow_indr_block_cb_register and __flow_indr_block_cb_unregister were + * added in upstream commit 4e481908c51b ("flow_offload: move tc indirect + * block to flow offload") + * + * This was a simple rename so we can just translate from the old + * naming scheme with a macro. + */ +#ifdef NEED_FLOW_INDR_BLOCK_CB_REGISTER +#define __flow_indr_block_cb_register __tc_indr_block_cb_register +#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister +#endif + +/* devlink support */ +#if IS_ENABLED(CONFIG_NET_DEVLINK) + +#ifdef HAVE_DEVLINK_REGIONS +/* NEED_DEVLINK_REGION_CREATE_OPS + * + * The ops parameter to devlink_region_create was added by commit e8937681797c + * ("devlink: prepare to support region operations") + * + * For older kernels, define _kc_devlink_region_create that takes an ops + * parameter, and calls the old implementation function by extracting the name + * from the structure. + */ +#ifdef NEED_DEVLINK_REGION_CREATE_OPS +struct devlink_region_ops { + const char *name; + void (*destructor)(const void *data); +}; + +static inline struct devlink_region * +_kc_devlink_region_create(struct devlink *devlink, + const struct devlink_region_ops *ops, + u32 region_max_snapshots, u64 region_size) +{ + return devlink_region_create(devlink, ops->name, region_max_snapshots, + region_size); +} + +#define devlink_region_create _kc_devlink_region_create +#endif /* NEED_DEVLINK_REGION_CREATE_OPS */ +#endif /* HAVE_DEVLINK_REGIONS */ + +/* NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY + * + * devlink_flash_update_status_notify, _begin_notify, and _end_notify were + * added by upstream commit 191ed2024de9 ("devlink: allow driver to update + * progress of flash update") + * + * For older kernels that lack the netlink messages, convert the functions + * into no-ops. + */ +#ifdef NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY +static inline void +devlink_flash_update_begin_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_end_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_status_notify(struct devlink __always_unused *devlink, + const char __always_unused *status_msg, + const char __always_unused *component, + unsigned long __always_unused done, + unsigned long __always_unused total) +{ +} +#endif /* NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY */ + +#ifndef HAVE_DEVLINK_FLASH_UPDATE_PARAMS +struct devlink_flash_update_params { + const char *file_name; + const char *component; + u32 overwrite_mask; +}; + +#ifndef DEVLINK_FLASH_OVERWRITE_SETTINGS +#define DEVLINK_FLASH_OVERWRITE_SETTINGS BIT(0) +#endif + +#ifndef DEVLINK_FLASH_OVERWRITE_IDENTIFIERS +#define DEVLINK_FLASH_OVERWRITE_IDENTIFIERS BIT(1) +#endif +#endif /* !HAVE_DEVLINK_FLASH_UPDATE_PARAMS */ + +/* NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY + * + * devlink_flash_update_timeout_notify was added by upstream commit + * f92970c694b3 ("devlink: add timeout information to status_notify"). + * + * For older kernels, just convert timeout notifications into regular status + * notification messages without timeout information. + */ +#ifdef NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY +static inline void +devlink_flash_update_timeout_notify(struct devlink *devlink, + const char *status_msg, + const char *component, + unsigned long __always_unused timeout) +{ + devlink_flash_update_status_notify(devlink, status_msg, component, 0, 0); +} +#endif /* NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY */ + +/* NEED_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER + * + * Upstream commit ba7d16c77942 ("devlink: Implicitly set auto recover flag when + * registering health reporter") removed auto_recover param. + * CORE code does not need to bother about this param, we could simply provide + * it via compat. + */ +#ifdef NEED_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER +static inline struct devlink_health_reporter * +_kc_devlink_health_reporter_create(struct devlink *devlink, + const struct devlink_health_reporter_ops *ops, + u64 graceful_period, void *priv) +{ + return devlink_health_reporter_create(devlink, ops, graceful_period, + !!ops->recover, priv); +} +#define devlink_health_reporter_create _kc_devlink_health_reporter_create +#endif /* NEED_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER */ + +/* + * NEED_DEVLINK_PORT_ATTRS_SET_STRUCT + * + * HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR + * HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID + * + * devlink_port_attrs_set was introduced by commit b9ffcbaf56d3 ("devlink: + * introduce devlink_port_attrs_set") + * + * It's function signature has changed multiple times over several kernel + * releases: + * + * commit 5ec1380a21bb ("devlink: extend attrs_set for setting port + * flavours") added the ability to set port flavour. (Note that there is no + * official kernel release with devlink_port_attrs_set without the flavour + * argument, as they were introduced in the same series.) + * + * commit bec5267cded2 ("net: devlink: extend port attrs for switch ID") added + * the ability to set the switch ID (HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID) + * + * Finally commit 71ad8d55f8e5 ("devlink: Replace devlink_port_attrs_set + * parameters with a struct") refactored to pass devlink_port_attrs struct + * instead of individual parameters. (!NEED_DEVLINK_PORT_ATTRS_SET_STRUCT) + * + * We want core drivers to just use the latest form that takes + * a devlink_port_attrs structure. Note that this structure did exist as part + * of but was never used directly by driver code prior to the + * function parameter change. For this reason, the implementation always + * relies on _kc_devlink_port_attrs instead of what was defined in the kernel. + */ +#ifdef NEED_DEVLINK_PORT_ATTRS_SET_STRUCT + +#ifndef HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL, + DEVLINK_PORT_FLAVOUR_CPU, + DEVLINK_PORT_FLAVOUR_DSA, + DEVLINK_PORT_FLAVOUR_PCI_PF, + DEVLINK_PORT_FLAVOUR_PCI_VF, +}; +#endif + +struct _kc_devlink_port_phys_attrs { + u32 port_number; + u32 split_subport_number; +}; + +struct _kc_devlink_port_pci_pf_attrs { + u16 pf; +}; + +struct _kc_devlink_port_pci_vf_attrs { + u16 pf; + u16 vf; +}; + +struct _kc_devlink_port_attrs { + u8 split:1, + splittable:1; + u32 lanes; + enum devlink_port_flavour flavour; + struct netdev_phys_item_id switch_id; + union { + struct _kc_devlink_port_phys_attrs phys; + struct _kc_devlink_port_pci_pf_attrs pci_pf; + struct _kc_devlink_port_pci_vf_attrs pci_vf; + }; +}; + +#define devlink_port_attrs _kc_devlink_port_attrs + +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ +#if defined(HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID) + devlink_port_attrs_set(devlink_port, attrs->flavour, attrs->phys.port_number, + attrs->split, attrs->phys.split_subport_number, + attrs->switch_id.id, attrs->switch_id.id_len); +#elif defined(HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR) + devlink_port_attrs_set(devlink_port, attrs->flavour, attrs->phys.port_number, + attrs->split, attrs->phys.split_subport_number); +#else + if (attrs->split) + devlink_port_split_set(devlink_port, attrs->phys.port_number); +#endif +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set + +#endif /* NEED_DEVLINK_PORT_ATTRS_SET_STRUCT */ + +/* + * NEED_DEVLINK_ALLOC_SETS_DEV + * + * Since commit 919d13a7e455 ("devlink: Set device as early as possible"), the + * devlink device pointer is set by devlink_alloc instead of by + * devlink_register. + * + * devlink_alloc now includes the device pointer in its signature, while + * devlink_register no longer includes it. + * + * This implementation provides a replacement for devlink_alloc which will + * take and then silently discard the extra dev pointer. + * + * To use devlink_register, drivers must check + * HAVE_DEVLINK_REGISTER_SETS_DEV. Note that we can't easily provide + * a backport of the change to devlink_register directly. Although the dev + * pointer is accessible from the devlink pointer through the driver private + * section, it is device driver specific and is not easily accessible in + * compat code. + */ +#ifdef NEED_DEVLINK_ALLOC_SETS_DEV +static inline struct devlink * +_kc_devlink_alloc(const struct devlink_ops *ops, size_t priv_size, + struct device * __always_unused dev) +{ + return devlink_alloc(ops, priv_size); +} +#define devlink_alloc _kc_devlink_alloc +#endif /* NEED_DEVLINK_ALLOC_SETS_DEV */ + +#ifdef HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +#ifdef NEED_DEVLINK_UNLOCKED_RESOURCE +/* + * NEED_DEVLINK_UNLOCKED_RESOURCE + * + * Handle devlink API change introduced in: + * c223d6a4bf ("net: devlink: add unlocked variants of devlink_resource*() + * functions") + * 644a66c60f ("net: devlink: convert reload command to take implicit + * devlink->lock") + * + * devl_resource_size_get() does not take devlink->lock where + * devlink_resource_size_get() takes devlink->lock, but we do not introduce + * locking in the driver as taking the lock in devlink_reload() was added + * upstream in the same version as API change. + * + * We have to rely on distro maintainers properly backporting of both mentioned + * commits for OOT driver to work properly. + * In case of backporting only c223d6a4bf assert inside + * devl_resource_size_get() will trigger kernel WARNING, + * In case of backporting only 644a66c60f devlink_resource_size_get() will + * attempt to take the lock second time. + */ +static inline int devl_resource_size_get(struct devlink *devlink, + u64 resource_id, + u64 *p_resource_size) +{ + return devlink_resource_size_get(devlink, resource_id, p_resource_size); +} +#endif /* NEED_DEVLINK_UNLOCKED_RESOURCE */ + +#ifdef NEED_DEVLINK_RESOURCES_UNREGISTER_NO_RESOURCE +/* + * NEED_DEVLINK_RESOURCES_UNREGISTER_NO_RESOURCE + * + * Commit 4c897cfc46 ("devlink: Simplify devlink resources unregister call") + * removed struct devlink_resource *resource parameter from + * devlink_resources_unregister() function, if NULL is passed as a resource + * parameter old version of devlink_resources_unregister() behaves the same + * way as new implementation removing all resources from: + * &devlink->resource_list. + */ +static inline void +_kc_devlink_resources_unregister(struct devlink *devlink) +{ + return devlink_resources_unregister(devlink, NULL); +} + +#define devlink_resources_unregister _kc_devlink_resources_unregister +#endif /* NEED_DEVLINK_RESOURCES_UNREGISTER_NO_RESOURCE */ +#endif /* HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT */ + +#ifdef NEED_DEVLINK_TO_DEV +/* + * Commit 2131463 ("devlink: Reduce struct devlink exposure") + * removed devlink struct fields from header to avoid exposure + * and added devlink_to_dev and related functions to access + * them instead. + */ +static inline struct device * +devlink_to_dev(const struct devlink *devlink) +{ + return devlink->dev; +} +#endif /* NEED_DEVLINK_TO_DEV */ + +#endif /* CONFIG_NET_DEVLINK */ + +#ifdef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +/* ida_alloc(), ida_alloc_min(), ida_alloc_max(), ida_alloc_range(), and + * ida_free() were added in commit 5ade60dda43c ("ida: add new API"). + * + * Also, using "0" as the "end" argument (3rd argument) to ida_simple_get() is + * considered the max value, which is why it's used in ida_alloc() and + * ida_alloc_min(). + */ +static inline int ida_alloc(struct ida *ida, gfp_t gfp) +{ + return ida_simple_get(ida, 0, 0, gfp); +} + +static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) +{ + return ida_simple_get(ida, min, 0, gfp); +} + +static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) +{ + return ida_simple_get(ida, 0, max, gfp); +} + +static inline int +ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max, gfp_t gfp) +{ + return ida_simple_get(ida, min, max, gfp); +} + +static inline void ida_free(struct ida *ida, unsigned int id) +{ + ida_simple_remove(ida, id); +} +#endif /* NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE */ + +/* dev_printk implementations */ + +/* NEED_DEV_LEVEL_ONCE + * + * The dev_*_once family of printk functions was introduced by commit + * e135303bd5be ("device: Add dev__once variants") + * + * The implementation is very straight forward so we will just implement them + * as-is here. + * + * Note that this assumes all dev_*_once macros exist if dev_level_once was + * found. + */ +#ifdef NEED_DEV_LEVEL_ONCE +#ifdef CONFIG_PRINTK +#define dev_level_once(dev_level, dev, fmt, ...) \ +do { \ + static bool __print_once __read_mostly; \ + \ + if (!__print_once) { \ + __print_once = true; \ + dev_level(dev, fmt, ##__VA_ARGS__); \ + } \ +} while (0) +#else +#define dev_level_once(dev_level, dev, fmt, ...) \ +do { \ + if (0) \ + dev_level(dev, fmt, ##__VA_ARGS__); \ +} while (0) +#endif + +#define dev_emerg_once(dev, fmt, ...) \ + dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__) +#define dev_alert_once(dev, fmt, ...) \ + dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__) +#define dev_crit_once(dev, fmt, ...) \ + dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__) +#define dev_err_once(dev, fmt, ...) \ + dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__) +#define dev_warn_once(dev, fmt, ...) \ + dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__) +#define dev_notice_once(dev, fmt, ...) \ + dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__) +#define dev_info_once(dev, fmt, ...) \ + dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) +#define dev_dbg_once(dev, fmt, ...) \ + dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__) +#endif /* NEED_DEV_LEVEL_ONCE */ + +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO + +/* NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 + * + * tc_cls_can_offload_and_chain0 was added by upstream commit + * 878db9f0f26d ("pkt_cls: add new tc cls helper to check offload flag and + * chain index"). + * + * This patch backports this function for older kernels by calling + * tc_can_offload() directly. + */ +#ifdef NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 +#include +static inline bool +tc_cls_can_offload_and_chain0(const struct net_device *dev, + struct tc_cls_common_offload *common) +{ + if (!tc_can_offload(dev)) + return false; + if (common->chain_index) + return false; + + return true; +} +#endif /* NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 */ +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ + +/* NEED_TC_SETUP_QDISC_MQPRIO + * + * TC_SETUP_QDISC_MQPRIO was added by upstream commit + * 575ed7d39e2f ("net_sch: mqprio: Change TC_SETUP_MQPRIO to + * TC_SETUP_QDISC_MQPRIO"). + * + * For older kernels which are using TC_SETUP_MQPRIO + */ +#ifdef NEED_TC_SETUP_QDISC_MQPRIO +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#endif /* NEED_TC_SETUP_QDISC_MQPRIO */ + +/* ART/TSC functions */ +#ifdef HAVE_PTP_CROSSTIMESTAMP +/* NEED_CONVERT_ART_NS_TO_TSC + * + * convert_art_ns_to_tsc was added by upstream commit fc804f65d462 ("x86/tsc: + * Convert ART in nanoseconds to TSC"). + * + * This function is similar to convert_art_to_tsc, but expects the input in + * terms of nanoseconds, rather than ART cycles. We implement this by + * accessing the tsc_khz value and performing the proper calculation. In order + * to access the correct clock object on returning, we use the function + * convert_art_to_tsc, because the art_related_clocksource is inaccessible. + */ +#if defined(CONFIG_X86) && defined(NEED_CONVERT_ART_NS_TO_TSC) +#include + +static inline struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns) +{ + struct system_counterval_t system; + u64 tmp, res, rem; + + rem = do_div(art_ns, USEC_PER_SEC); + + res = art_ns * tsc_khz; + tmp = rem * tsc_khz; + + do_div(tmp, USEC_PER_SEC); + res += tmp; + + system = convert_art_to_tsc(art_ns); + system.cycles = res; + + return system; +} +#endif /* CONFIG_X86 && NEED_CONVERT_ART_NS_TO_TSC */ +#endif /* HAVE_PTP_CROSSTIMESTAMP */ + +/* PTP functions and definitions */ +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#include +#include + +/* PTP_* ioctl flags + * + * PTP_PEROUT_ONE_SHOT and PTP_PEROUT_DUTY_CYCLE were added by commit + * f65b71aa25a6 ("ptp: add ability to configure duty cycle for periodic + * output") + * + * PTP_PEROUT_PHASE was added in commit b6bd41363a1c ("ptp: introduce + * a phase offset in the periodic output request") + * + * PTP_STRICT_FLAGS was added in commit 6138e687c7b6 ("ptp: Introduce strict + * checking of external time stamp options.") + * + * These flags control behavior for the periodic output PTP ioctl. For older + * kernels, we define the flags as 0. This allows bitmask checks on flags to + * work as expected, since these feature flags will become no-ops on kernels + * that lack support. + * + * Drivers can check if the relevant feature is actually supported by using an + * '#if' on the flag instead of an '#ifdef' + */ +#ifndef PTP_PEROUT_PHASE +#define PTP_PEROUT_PHASE 0 +#endif + +#ifndef PTP_PEROUT_DUTY_CYCLE +#define PTP_PEROUT_DUTY_CYCLE 0 +#endif + +#ifndef PTP_STRICT_FLAGS +#define PTP_STRICT_FLAGS 0 +#endif + +#ifndef PTP_PEROUT_PHASE +/* PTP_PEROUT_PHASE + * + * The PTP_PEROUT_PHASE flag was added in commit b6bd41363a1c ("ptp: introduce + * a phase offset in the periodic output request") as a way for userspace to + * request a phase-offset periodic output that starts on some arbitrary + * multiple of the clock period. + * + * For older kernels, define this flag to 0 so that checks for if it is + * enabled will always fail. Drivers should use '#if PTP_PEROUT_PHASE' to + * determine if the kernel has phase support, and use the flag as normal for + * checking supported flags or if the flag is enabled for a given request. + */ +#define PTP_PEROUT_PHASE 0 +#endif + +#endif /* CONFIG_PTP_1588_CLOCK */ + +/* + * NEED_PTP_SYSTEM_TIMESTAMP + * + * Upstream commit 361800876f80 ("ptp: add PTP_SYS_OFFSET_EXTENDED + * ioctl") introduces new ioctl, driver and helper functions. + * + * Required for PhotonOS 3.0 to correctly support backport of + * PTP patches introduced in Linux Kernel version 5.0 on 4.x kernels + */ +#ifdef NEED_PTP_SYSTEM_TIMESTAMP +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void +ptp_read_system_prets(struct ptp_system_timestamp *sts) { } + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp *sts) { } +#endif /* !NEED_PTP_SYSTEM_TIMESTAMP */ + +#ifdef NEED_PTP_CLASSIFY_RAW +/* NEED_PTP_CLASSIFY_RAW + * + * The ptp_classify_raw() function was introduced into + * as part of commit 164d8c666521 ("net: ptp: do not reimplement PTP/BPF + * classifier"). + * + * The kernel does provide the classifier BPF program since commit + * 15f0127d1d18 ("net: added a BPF to help drivers detect PTP packets."). + * However, it requires initializing the BPF filter properly and that varies + * depending on the kernel version. + * + * The only current uses for this function in our drivers is to enhance + * debugging messages. Rather than re-implementing the function just return + * PTP_CLASS_NONE indicating that it could not identify any PTP frame. + */ +#include + +static inline unsigned int ptp_classify_raw(struct sk_buff *skb) +{ + return PTP_CLASS_NONE; +} +#endif /* NEED_PTP_CLASSIFY_RAW */ + +#ifdef NEED_PTP_PARSE_HEADER +/* NEED_PTP_PARSE_HEADER + * + * The ptp_parse_header() function was introduced upstream in commit + * bdfbb63c314a ("ptp: Add generic ptp v2 header parsing function"). + * + * Since it is straight forward to implement, do so. + */ +#include + +struct clock_identity { + u8 id[8]; +}; + +struct port_identity { + struct clock_identity clock_identity; + __be16 port_number; +}; + +struct ptp_header { + u8 tsmt; /* transportSpecific | messageType */ + u8 ver; /* reserved | versionPTP */ + __be16 message_length; + u8 domain_number; + u8 reserved1; + u8 flag_field[2]; + __be64 correction; + __be32 reserved2; + struct port_identity source_port_identity; + __be16 sequence_id; + u8 control; + u8 log_message_interval; +} __packed; + +static inline struct ptp_header *ptp_parse_header(struct sk_buff *skb, + unsigned int type) +{ +#if defined(CONFIG_NET_PTP_CLASSIFY) + u8 *ptr = skb_mac_header(skb); + + if (type & PTP_CLASS_VLAN) + ptr += VLAN_HLEN; + + switch (type & PTP_CLASS_PMASK) { + case PTP_CLASS_IPV4: + ptr += IPV4_HLEN(ptr) + UDP_HLEN; + break; + case PTP_CLASS_IPV6: + ptr += IP6_HLEN + UDP_HLEN; + break; + case PTP_CLASS_L2: + break; + default: + return NULL; + } + + ptr += ETH_HLEN; + + /* Ensure that the entire header is present in this packet. */ + if (ptr + sizeof(struct ptp_header) > skb->data + skb->len) + return NULL; + + return (struct ptp_header *)ptr; +#else + return NULL; +#endif +} +#endif /* NEED_PTP_PARSE_HEADER */ + +#ifdef NEED_CPU_LATENCY_QOS_RENAME +/* NEED_CPU_LATENCY_QOS_RENAME + * + * The PM_QOS_CPU_DMA_LATENCY definition was removed in 67b06ba01857 ("PM: + * QoS: Drop PM_QOS_CPU_DMA_LATENCY and rename related functions"). The + * related functions were renamed to use "cpu_latency_qos_" prefix. + * + * Use wrapper functions to map the new API onto the API available in older + * kernels. + */ +#include +static inline void +cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value) +{ + pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, value); +} + +static inline void +cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value) +{ + pm_qos_update_request(req, new_value); +} + +static inline void +cpu_latency_qos_remove_request(struct pm_qos_request *req) +{ + pm_qos_remove_request(req); +} +#endif /* NEED_CPU_LATENCY_QOS_RENAME */ + +#ifdef NEED_DECLARE_STATIC_KEY_FALSE +/* NEED_DECLARE_STATIC_KEY_FALSE + * + * DECLARE_STATIC_KEY_FALSE was added by upstream commit b8fb03785d4d + * ("locking/static_keys: Provide DECLARE and well as DEFINE macros") + * + * The definition is now necessary to handle the xdpdrv work with more than 64 + * cpus + */ +#ifdef HAVE_STRUCT_STATIC_KEY_FALSE +#define DECLARE_STATIC_KEY_FALSE(name) extern struct static_key_false name +#else +#define DECLARE_STATIC_KEY_FALSE(name) extern struct static_key name +#endif /* HAVE_STRUCT_STATIC_KEY_FALSE */ +#endif /* NEED_DECLARE_STATIC_KEY_FALSE */ + +#ifdef NEED_DEFINE_STATIC_KEY_FALSE +/* NEED_DEFINE_STATIC_KEY_FALSE + * + * DEFINE_STATIC_KEY_FALSE was added by upstream commit 11276d5306b8 + * ("locking/static_keys: Add a new static_key interface") + * + * The definition is now necessary to handle the xdpdrv work with more than 64 + * cpus + */ +#define DEFINE_STATIC_KEY_FALSE(name) \ + struct static_key name = STATIC_KEY_INIT_FALSE +#endif /* NEED_DEFINE_STATIC_KEY_FALSE */ + +#ifdef NEED_STATIC_BRANCH_LIKELY +/* NEED_STATIC_BRANCH_LIKELY + * + * static_branch_likely, static_branch_unlikely, + * static_branch_inc, static_branch_dec was added by upstream commit + * 11276d5306b8 ("locking/static_keys: Add a new + * static_key interface") + * + * The definition is now necessary to handle the xdpdrv work with more than 64 + * cpus + * + * Note that we include all four definitions if static_branch_likely cannot be + * found in . + */ +#define static_branch_likely(x) likely(static_key_enabled(x)) +#define static_branch_unlikely(x) unlikely(static_key_enabled(x)) + +#define static_branch_inc(x) static_key_slow_inc(x) +#define static_branch_dec(x) static_key_slow_dec(x) + +#endif /* NEED_STATIC_BRANCH_LIKELY */ + +/* PCI related stuff */ + +/* NEED_PCI_AER_CLEAR_NONFATAL_STATUS + * + * 894020fdd88c ("PCI/AER: Rationalize error status register clearing") has + * renamed pci_cleanup_aer_uncorrect_error_status to more sane name. + */ +#ifdef NEED_PCI_AER_CLEAR_NONFATAL_STATUS +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#endif /* NEED_PCI_AER_CLEAR_NONFATAL_STATUS */ + +#ifdef NEED_NETDEV_XDP_STRUCT +#define netdev_bpf netdev_xdp +#endif /* NEED_NETDEV_XDP_STRUCT */ + +#ifdef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#ifdef HAVE_XDP_SUPPORT +#include +static inline void +_kc_bpf_warn_invalid_xdp_action(__maybe_unused struct net_device *dev, + __maybe_unused struct bpf_prog *prog, u32 act) +{ + bpf_warn_invalid_xdp_action(act); +} + +#define bpf_warn_invalid_xdp_action(dev, prog, act) \ + _kc_bpf_warn_invalid_xdp_action(dev, prog, act) +#endif /* HAVE_XDP_SUPPORT */ +#endif /* HAVE_NETDEV_PROG_XDP_WARN_ACTION */ + +/* NEED_ETH_HW_ADDR_SET + * + * eth_hw_addr_set was added by upstream commit + * 48eab831ae8b ("net: create netdev->dev_addr assignment helpers") + * + * Using eth_hw_addr_set became required in 5.17, when the dev_addr field in + * the netdev struct was constified. See 48eab831ae8b ("net: create + * netdev->dev_addr assignment helpers") + */ +#ifdef NEED_ETH_HW_ADDR_SET +static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr) +{ + ether_addr_copy(dev->dev_addr, addr); +} +#endif /* NEED_ETH_HW_ADDR_SET */ + +#ifdef NEED_JIFFIES_64_TIME_IS_MACROS +/* NEED_JIFFIES_64_TIME_IS_MACROS + * + * The jiffies64 time_is_* macros were introduced upstream by 3740dcdf8a77 + * ("jiffies: add time comparison functions for 64 bit jiffies") in Linux 4.9. + * + * Support for 64-bit jiffies has been available since the initial import of + * Linux into git in 2005, so its safe to just implement the macros as-is + * here. + */ +#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a) +#define time_is_after_jiffies64(a) time_before64(get_jiffies_64(), a) +#define time_is_before_eq_jiffies64(a) time_after_eq64(get_jiffies_64(), a) +#define time_is_after_eq_jiffies64(a) time_before_eq64(get_jiffies_64(), a) +#endif /* NEED_JIFFIES_64_TIME_IS_MACROS */ + +#ifdef NEED_INDIRECT_CALL_WRAPPER_MACROS +/* NEED_INDIRECT_CALL_WRAPPER_MACROS + * + * The INDIRECT_CALL_* macros were introduced upstream as upstream commit + * 283c16a2dfd3 ("indirect call wrappers: helpers to speed-up indirect calls + * of builtin") which landed in Linux 5.0 + * + * These are easy to implement directly. + */ +#ifdef CONFIG_RETPOLINE +#define INDIRECT_CALL_1(f, f1, ...) \ + ({ \ + likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \ + }) +#define INDIRECT_CALL_2(f, f2, f1, ...) \ + ({ \ + likely(f == f2) ? f2(__VA_ARGS__) : \ + INDIRECT_CALL_1(f, f1, __VA_ARGS__); \ + }) + +#define INDIRECT_CALLABLE_DECLARE(f) f +#define INDIRECT_CALLABLE_SCOPE +#else /* !CONFIG_RETPOLINE */ +#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALLABLE_DECLARE(f) +#define INDIRECT_CALLABLE_SCOPE static +#endif /* CONFIG_RETPOLINE */ +#endif /* NEED_INDIRECT_CALL_WRAPPER_MACROS */ + +#ifdef NEED_INDIRECT_CALL_3_AND_4 +/* NEED_INDIRECT_CALL_3_AND_4 + * Support for the 3 and 4 call variants was added in upstream commit + * e678e9ddea96 ("indirect_call_wrapper: extend indirect wrapper to support up + * to 4 calls") + * + * These are easy to implement directly. + */ + +#ifdef CONFIG_RETPOLINE +#define INDIRECT_CALL_3(f, f3, f2, f1, ...) \ + ({ \ + likely(f == f3) ? f3(__VA_ARGS__) : \ + INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \ + }) +#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \ + ({ \ + likely(f == f4) ? f4(__VA_ARGS__) : \ + INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \ + }) +#else /* !CONFIG_RETPOLINE */ +#define INDIRECT_CALL_3(f, f3, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__) +#endif /* CONFIG_RETPOLINE */ +#endif /* NEED_INDIRECT_CALL_3_AND_4 */ + +#ifdef NEED_EXPORT_INDIRECT_CALLABLE +/* NEED_EXPORT_INDIRECT_CALLABLE + * + * Support for EXPORT_INDIRECT_CALLABLE was added in upstream commit + * 0053859496ba ("net: add EXPORT_INDIRECT_CALLABLE wrapper") + * + * These are easy to implement directly. + */ +#ifdef CONFIG_RETPOLINE +#define EXPORT_INDIRECT_CALLABLE(f) EXPORT_SYMBOL(f) +#else +#define EXPORT_INDIRECT_CALLABLE(f) +#endif /* CONFIG_RETPOLINE */ +#endif /* NEED_EXPORT_INDIRECT_CALLABLE */ + +/* NEED_DEVM_KASPRINTF and NEED_DEVM_KVASPRINTF + * + * devm_kvasprintf and devm_kasprintf were added by commit + * 75f2a4ead5d5 ("devres: Add devm_kasprintf and devm_kvasprintf API") + * in Linux 3.17. + */ +#ifdef NEED_DEVM_KVASPRINTF +__printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp, + const char *fmt, va_list ap); +#endif /* NEED_DEVM_KVASPRINTF */ + +#ifdef NEED_DEVM_KASPRINTF +__printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp, + const char *fmt, ...); +#endif /* NEED_DEVM_KASPRINTF */ + +#ifdef NEED_XSK_UMEM_GET_RX_FRAME_SIZE +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifndef xsk_umem_get_rx_frame_size +static inline u32 _xsk_umem_get_rx_frame_size(struct xdp_umem *umem) +{ + return umem->chunk_size_nohr - XDP_PACKET_HEADROOM; +} + +#define xsk_umem_get_rx_frame_size _xsk_umem_get_rx_frame_size +#endif /* xsk_umem_get_rx_frame_size */ +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#endif + +#ifdef NEED_XSK_BUFF_DMA_SYNC_FOR_CPU +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +#include +static inline void +_kc_xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, + void __always_unused *pool) +{ + xsk_buff_dma_sync_for_cpu(xdp); +} + +#define xsk_buff_dma_sync_for_cpu(xdp, pool) \ + _kc_xsk_buff_dma_sync_for_cpu(xdp, pool) +#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */ +#endif /* NEED_XSK_BUFF_DMA_SYNC_FOR_CPU */ + +#ifdef NEED_XSK_BUFF_POOL_RENAME +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_get_pool_from_qid xdp_get_umem_from_qid +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#endif /* NEED_XSK_BUFF_POOL_RENAME */ + +#ifdef NEED_PCI_IOV_VF_ID +/* NEED_PCI_IOV_VF_ID + * + * pci_iov_vf_id were added by commit 21ca9fb62d468 ("PCI/IOV: + * Add pci_iov_vf_id() to get VF index") in Linux 5.18 + */ +int _kc_pci_iov_vf_id(struct pci_dev *dev); +#define pci_iov_vf_id _kc_pci_iov_vf_id +#endif /* NEED_PCI_IOV_VF_ID */ + +/* NEED_MUL_U64_U64_DIV_U64 + * + * mul_u64_u64_div_u64 was introduced in Linux 5.9 as part of commit + * 3dc167ba5729 ("sched/cputime: Improve cputime_adjust()") + */ +#ifdef NEED_MUL_U64_U64_DIV_U64 +u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); +#endif /* NEED_MUL_U64_U64_DIV_U64 */ + +#ifndef HAVE_LINKMODE +static inline void linkmode_set_bit(int nr, volatile unsigned long *addr) +{ + __set_bit(nr, addr); +} + +static inline void linkmode_zero(unsigned long *dst) +{ + bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); +} +#endif /* !HAVE_LINKMODE */ + +#ifndef ETHTOOL_GLINKSETTINGS +/* Link mode bit indices */ +enum ethtool_link_mode_bit_indices { + ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, + ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, + ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, + ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, + ETHTOOL_LINK_MODE_Autoneg_BIT = 6, + ETHTOOL_LINK_MODE_TP_BIT = 7, + ETHTOOL_LINK_MODE_AUI_BIT = 8, + ETHTOOL_LINK_MODE_MII_BIT = 9, + ETHTOOL_LINK_MODE_FIBRE_BIT = 10, + ETHTOOL_LINK_MODE_BNC_BIT = 11, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, + ETHTOOL_LINK_MODE_Pause_BIT = 13, + ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, + ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, + ETHTOOL_LINK_MODE_Backplane_BIT = 16, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, + ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, + ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, + ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, + + /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit + * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* + * macro for bits > 31. The only way to use indices > 31 is to + * use the new ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API. + */ + + __ETHTOOL_LINK_MODE_LAST + = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, +}; +#endif /* !ETHTOOL_GLINKSETTINGS */ + +#if defined(NEED_FLOW_MATCH) && defined(HAVE_TC_SETUP_CLSFLOWER) +/* NEED_FLOW_MATCH + * + * flow_match*, FLOW_DISSECTOR_MATCH, flow_rule*, flow_rule_match_key, and + * tc_cls_flower_offload_flow_rule were added by commit + * 8f2566225ae2 ("flow_offload: add flow_rule and flow_match structures and use + * them") in Linux 5.1. + */ + +#include + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; +#endif /* HAVE_TC_FLOWER_VLAN_IN_TAGS */ + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP +struct flow_match_ip { + struct flow_dissector_key_ip *key, *mask; +}; +#endif /* HAVE_FLOW_DISSECTOR_KEY_IP */ + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +#ifdef HAVE_TC_FLOWER_ENC +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; +#endif /* HAVE_TC_FLOWER_ENC */ + +struct flow_rule { + struct flow_match match; +#if 0 + /* In 5.1+ kernels, action is a member of struct flow_rule but is + * not compatible with how we kcompat tc_cls_flower_offload_flow_rule + * below. By not declaring it here, any driver that attempts to use + * action as an element of struct flow_rule will fail to compile + * instead of silently trying to access memory that shouldn't be. + */ + struct flow_action action; +#endif +}; + +static inline struct flow_rule * +tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) +{ + return (struct flow_rule *)&tc_flow_cmd->dissector; +} + +static inline bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} + +#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ + const struct flow_match *__m = &(__rule)->match; \ + struct flow_dissector *__d = (__m)->dissector; \ + \ + (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ + (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ + +static inline void +flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); +} + +static inline void +flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); +} + +static inline void +flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); +} + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +static inline void +flow_rule_match_vlan(const struct flow_rule *rule, struct flow_match_vlan *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); +} +#endif /* HAVE_TC_FLOWER_VLAN_IN_TAGS */ + +static inline void +flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); +} + +static inline void +flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); +} + +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP +static inline void +flow_rule_match_ip(const struct flow_rule *rule, struct flow_match_ip *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out); +} +#endif /* HAVE_FLOW_DISSECTOR_KEY_IP */ + +static inline void +flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); +} + +#ifdef HAVE_TC_FLOWER_ENC +static inline void +flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); +} + +static inline void +flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); +} + +static inline void +flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); +} + +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP +#ifdef HAVE_FLOW_DISSECTOR_KEY_ENC_IP +static inline void +flow_rule_match_enc_ip(const struct flow_rule *rule, struct flow_match_ip *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out); +} +#endif /* HAVE_FLOW_DISSECTOR_KEY_ENC_IP */ +#endif /* HAVE_FLOW_DISSECTOR_KEY_IP */ + +static inline void +flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); +} + +static inline void +flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); +} +#endif /* HAVE_TC_FLOWER_ENC */ +#endif /* NEED_FLOW_MATCH && HAVE_TC_SETUP_CLSFLOWER */ + +/* bitfield / bitmap */ + +/* NEED_BITMAP_COPY_CLEAR_TAIL + * + * backport + * c724f193619c ("bitmap: new bitmap_copy_safe and bitmap_{from,to}_arr32") + */ +#ifdef NEED_BITMAP_COPY_CLEAR_TAIL +/* Copy bitmap and clear tail bits in last word */ +static inline void +bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits) +{ + bitmap_copy(dst, src, nbits); + if (nbits % BITS_PER_LONG) + dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); +} +#endif /* NEED_BITMAP_COPY_CLEAR_TAIL */ + +/* NEED_BITMAP_FROM_ARR32 + * + * backport + * c724f193619c ("bitmap: new bitmap_copy_safe and bitmap_{from,to}_arr32") + */ +#ifdef NEED_BITMAP_FROM_ARR32 +#if BITS_PER_LONG == 64 +/** + * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap + * @bitmap: array of unsigned longs, the destination bitmap + * @buf: array of u32 (in host byte order), the source bitmap + * @nbits: number of bits in @bitmap + */ +static inline void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, + unsigned int nbits) +{ + unsigned int i, halfwords; + + halfwords = DIV_ROUND_UP(nbits, 32); + for (i = 0; i < halfwords; i++) { + bitmap[i/2] = (unsigned long) buf[i]; + if (++i < halfwords) + bitmap[i/2] |= ((unsigned long) buf[i]) << 32; + } + + /* Clear tail bits in last word beyond nbits. */ + if (nbits % BITS_PER_LONG) + bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits); +} +#else /* BITS_PER_LONG == 64 */ +/* + * On 32-bit systems bitmaps are represented as u32 arrays internally, and + * therefore conversion is not needed when copying data from/to arrays of u32. + */ +#define bitmap_from_arr32(bitmap, buf, nbits) \ + bitmap_copy_clear_tail((unsigned long *) (bitmap), \ + (const unsigned long *) (buf), (nbits)) +#endif /* BITS_PER_LONG == 64 */ +#endif /* NEED_BITMAP_FROM_ARR32 */ + +/* NEED_BITMAP_TO_ARR32 + * + * backport + * c724f193619c ("bitmap: new bitmap_copy_safe and bitmap_{from,to}_arr32") + */ +#ifdef NEED_BITMAP_TO_ARR32 +#if BITS_PER_LONG == 64 +/** + * bitmap_to_arr32 - copy the contents of bitmap to a u32 array of bits + * @buf: array of u32 (in host byte order), the dest bitmap + * @bitmap: array of unsigned longs, the source bitmap + * @nbits: number of bits in @bitmap + */ +static inline void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, + unsigned int nbits) +{ + unsigned int i, halfwords; + + halfwords = DIV_ROUND_UP(nbits, 32); + for (i = 0; i < halfwords; i++) { + buf[i] = (u32) (bitmap[i/2] & UINT_MAX); + if (++i < halfwords) + buf[i] = (u32) (bitmap[i/2] >> 32); + } + + /* Clear tail bits in last element of array beyond nbits. */ + if (nbits % BITS_PER_LONG) + buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31)); +} +#else +/* + * On 32-bit systems bitmaps are represented as u32 arrays internally, and + * therefore conversion is not needed when copying data from/to arrays of u32. + */ +#define bitmap_to_arr32(buf, bitmap, nbits) \ + bitmap_copy_clear_tail((unsigned long *) (buf), \ + (const unsigned long *) (bitmap), (nbits)) +#endif /* BITS_PER_LONG == 64 */ +#endif /* NEED_BITMAP_TO_ARR32 */ + +#ifndef HAVE_INCLUDE_BITFIELD +/* linux/bitfield.h has been added in Linux 4.9 in upstream commit + * 3e9b3112ec74 ("add basic register-field manipulation macros") + */ +#define __bf_shf(x) (__builtin_ffsll(x) - 1) + +#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \ + ({ \ + BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \ + _pfx "mask is not constant"); \ + BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \ + BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \ + ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \ + _pfx "value too large for the field"); \ + BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \ + _pfx "type of reg too small for mask"); \ + __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \ + (1ULL << __bf_shf(_mask))); \ + }) + +/** + * FIELD_PREP() - prepare a bitfield element + * @_mask: shifted mask defining the field's length and position + * @_val: value to put in the field + * + * FIELD_PREP() masks and shifts up the value. The result should + * be combined with other fields of the bitfield using logical OR. + */ +#define FIELD_PREP(_mask, _val) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \ + ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \ + }) + +/** + * FIELD_GET() - extract a bitfield element + * @_mask: shifted mask defining the field's length and position + * @_reg: value of entire bitfield + * + * FIELD_GET() extracts the field specified by @_mask from the + * bitfield passed in as @_reg by masking and shifting it down. + */ +#define FIELD_GET(_mask, _reg) \ + ({ \ + __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \ + (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ + }) +#endif /* HAVE_INCLUDE_BITFIELD */ + +#ifdef NEED_BITFIELD_FIELD_MAX +/* linux/bitfield.h has FIELD_MAX added to it in Linux 5.7 in upstream + * commit e31a50162feb ("bitfield.h: add FIELD_MAX() and field_max()") + */ +/** + * FIELD_MAX() - produce the maximum value representable by a field + * @_mask: shifted mask defining the field's length and position + * + * FIELD_MAX() returns the maximum value that can be held in the field + * specified by @_mask. + */ +#define FIELD_MAX(_mask) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_MAX: "); \ + (typeof(_mask))((_mask) >> __bf_shf(_mask)); \ + }) +#endif /* HAVE_BITFIELD_FIELD_MAX */ + +#ifdef NEED_BITFIELD_FIELD_FIT +/** + * FIELD_FIT() - check if value fits in the field + * @_mask: shifted mask defining the field's length and position + * @_val: value to test against the field + * + * Return: true if @_val can fit inside @_mask, false if @_val is too big. + */ +#define FIELD_FIT(_mask, _val) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \ + !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ + }) +#endif /* NEED_BITFIELD_FIELD_FIT */ + +#ifdef NEED_BITFIELD_FIELD_MASK +/** + * linux/bitfield.h has field_mask() along with *_encode_bits() in 4.16: + * 00b0c9b82663 ("Add primitives for manipulating bitfields both in host and fixed-endian.") + * + */ +extern void __compiletime_error("value doesn't fit into mask") +__field_overflow(void); +extern void __compiletime_error("bad bitfield mask") +__bad_mask(void); +static __always_inline u64 field_multiplier(u64 field) +{ + if ((field | (field - 1)) & ((field | (field - 1)) + 1)) + __bad_mask(); + return field & -field; +} +static __always_inline u64 field_mask(u64 field) +{ + return field / field_multiplier(field); +} +#define ____MAKE_OP(type,base,to,from) \ +static __always_inline __##type type##_encode_bits(base v, base field) \ +{ \ + if (__builtin_constant_p(v) && (v & ~field_mask(field))) \ + __field_overflow(); \ + return to((v & field_mask(field)) * field_multiplier(field)); \ +} \ +static __always_inline __##type type##_replace_bits(__##type old, \ + base val, base field) \ +{ \ + return (old & ~to(field)) | type##_encode_bits(val, field); \ +} \ +static __always_inline void type##p_replace_bits(__##type *p, \ + base val, base field) \ +{ \ + *p = (*p & ~to(field)) | type##_encode_bits(val, field); \ +} \ +static __always_inline base type##_get_bits(__##type v, base field) \ +{ \ + return (from(v) & field)/field_multiplier(field); \ +} +#define __MAKE_OP(size) \ + ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \ + ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \ + ____MAKE_OP(u##size,u##size,,) +__MAKE_OP(16) +__MAKE_OP(32) +__MAKE_OP(64) +#undef __MAKE_OP +#undef ____MAKE_OP +#endif + +#ifdef NEED_BUILD_BUG_ON +/* Force a compilation error if a constant expression is not a power of 2 */ +#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON(((n) & ((n) - 1)) != 0) + +/** + * BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied + * error message. + * @condition: the condition which the compiler should know is false. + * + * See BUILD_BUG_ON for description. + */ +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) + +/** + * BUILD_BUG_ON - break compile if a condition is true. + * @condition: the condition which the compiler should know is false. + * + * If you have some code which relies on certain constants being equal, or + * some other compile-time-evaluated condition, you should use BUILD_BUG_ON to + * detect if someone changes it. + */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif /* NEED_BUILD_BUG_ON */ + +#ifdef NEED_IN_TASK +#define in_hardirq() (hardirq_count()) +#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) +#define in_task() (!(in_nmi() | in_hardirq() | \ + in_serving_softirq())) +#endif /* NEED_IN_TASK */ + +/* + * NEED_NETIF_NAPI_ADD_NO_WEIGHT + * + * Upstream commit b48b89f9c189 ("net: drop the weight argument from + * netif_napi_add") removes weight argument from function call. + * + * Our drivers always used default weight, which is 64. + * + * Define NEED_NETIF_NAPI_ADD_NO_WEIGHT on kernels 3.10+ to use old + * implementation. Undef for 6.1+ where new function was introduced. + * RedHat 9.2 required using no weight parameter option. + */ +#ifdef NEED_NETIF_NAPI_ADD_NO_WEIGHT +static inline void +_kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int)) +{ + return netif_napi_add(dev, napi, poll, NAPI_POLL_WEIGHT); +} + +/* RHEL7 complains about redefines. Undef first, then define compat wrapper */ +#ifdef netif_napi_add +#undef netif_napi_add +#endif +#define netif_napi_add _kc_netif_napi_add +#endif /* NEED_NETIF_NAPI_ADD_NO_WEIGHT */ + +/* + * NEED_ETHTOOL_SPRINTF + * + * Upstream commit 7888fe53b706 ("ethtool: Add common function for filling out + * strings") introduced ethtool_sprintf, which landed in Linux v5.13 + * + * The function implementation is moved to kcompat.c since the compiler + * complains it can never be inlined for the function with variable argument + * lists. + */ +#ifdef NEED_ETHTOOL_SPRINTF +__printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...); +#endif /* NEED_ETHTOOL_SPRINTF */ + +/* + * NEED_SYSFS_MATCH_STRING + * + * Upstream commit e1fe7b6a7b37 ("lib/string: add sysfs_match_string helper") + * introduced a helper for looking up strings in an array - it's pure algo stuff + * that is easy to backport if needed. + * Instead of covering sysfs_streq() by yet another flag just copy it. + */ +#ifdef NEED_SYSFS_MATCH_STRING +/* + * sysfs_streq - return true if strings are equal, modulo trailing newline + * @s1: one string + * @s2: another string + * + * This routine returns true iff two strings are equal, treating both + * NUL and newline-then-NUL as equivalent string terminations. It's + * geared for use with sysfs input strings, which generally terminate + * with newlines but are compared against values without newlines. + */ +static inline bool _kc_sysfs_streq(const char *s1, const char *s2) +{ + while (*s1 && *s1 == *s2) { + s1++; + s2++; + } + + if (*s1 == *s2) + return true; + if (!*s1 && *s2 == '\n' && !s2[1]) + return true; + if (*s1 == '\n' && !s1[1] && !*s2) + return true; + return false; +} + +/* + * __sysfs_match_string - matches given string in an array + * @array: array of strings + * @n: number of strings in the array or -1 for NULL terminated arrays + * @str: string to match with + * + * Returns index of @str in the @array or -EINVAL, just like match_string(). + * Uses sysfs_streq instead of strcmp for matching. + * + * This routine will look for a string in an array of strings up to the + * n-th element in the array or until the first NULL element. + * + * Historically the value of -1 for @n, was used to search in arrays that + * are NULL terminated. However, the function does not make a distinction + * when finishing the search: either @n elements have been compared OR + * the first NULL element was found. + */ +static inline int _kc___sysfs_match_string(const char * const *array, size_t n, + const char *str) +{ + const char *item; + int index; + + for (index = 0; index < n; index++) { + item = array[index]; + if (!item) + break; + if (sysfs_streq(item, str)) + return index; + } + + return -EINVAL; +} + +#define sysfs_match_string(_a, _s) \ + _kc___sysfs_match_string(_a, ARRAY_SIZE(_a), _s) + +#endif /* NEED_SYSFS_MATCH_STRING */ + +/* + * NEED_SYSFS_EMIT + * + * Upstream introduced following function in + * commit 2efc459d06f1 ("sysfs: Add sysfs_emit and sysfs_emit_at to format sysfs output") + * + * The function implementation is moved to kcompat.c since the compiler + * complains it can never be inlined for the function with variable argument + * lists. + */ +#ifdef NEED_SYSFS_EMIT +__printf(2, 3) int sysfs_emit(char *buf, const char *fmt, ...); +#endif /* NEED_SYSFS_EMIT */ + +/* + * HAVE_U64_STATS_FETCH_BEGIN_IRQ + * HAVE_U64_STATS_FETCH_RETRY_IRQ + * + * Upstream commit 44b0c2957adc ("u64_stats: Streamline the implementation") + * marks u64_stats_fetch_begin_irq() and u64_stats_fetch_retry_irq() + * as obsolete. Their functionality is combined with: u64_stats_fetch_begin() + * and u64_stats_fetch_retry(). + * + * Upstream commit dec5efcffad4 ("u64_stat: Remove the obsolete fetch_irq() + * variants.") removes u64_stats_fetch_begin_irq() and + * u64_stats_fetch_retry_irq(). + * + * Map u64_stats_fetch_begin() and u64_stats_fetch_retry() to the _irq() + * variants on the older kernels to allow the same driver code working on + * both old and new kernels. + */ +#ifdef HAVE_U64_STATS_FETCH_BEGIN_IRQ +#define u64_stats_fetch_begin _kc_u64_stats_fetch_begin + +static inline unsigned int +_kc_u64_stats_fetch_begin(const struct u64_stats_sync *syncp) +{ + return u64_stats_fetch_begin_irq(syncp); +} +#endif /* HAVE_U64_STATS_FETCH_BEGIN_IRQ */ + +#ifdef HAVE_U64_STATS_FETCH_RETRY_IRQ +#define u64_stats_fetch_retry _kc_u64_stats_fetch_retry + +static inline bool +_kc_u64_stats_fetch_retry(const struct u64_stats_sync *syncp, + unsigned int start) +{ + return u64_stats_fetch_retry_irq(syncp, start); +} +#endif /* HAVE_U64_STATS_FETCH_RETRY_IRQ */ + +/* + * NEED_U64_STATS_READ + * NEED_U64_STATS_SET + * + * Upstream commit 316580b69d0 ("u64_stats: provide u64_stats_t type") + * introduces the u64_stats_t data type and other helper APIs to read, + * add and increment the stats, in Linux v5.5. Support them on older kernels + * as well. + * + * Upstream commit f2efdb179289 ("u64_stats: Introduce u64_stats_set()") + * introduces u64_stats_set API to set the u64_stats_t variable with the + * value provided, in Linux v5.16. Add support for older kernels. + */ +#ifdef NEED_U64_STATS_READ +#if BITS_PER_LONG == 64 +#include + +typedef struct { + local64_t v; +} u64_stats_t; + +static inline u64 u64_stats_read(u64_stats_t *p) +{ + return local64_read(&p->v); +} + +static inline void u64_stats_add(u64_stats_t *p, unsigned long val) +{ + local64_add(val, &p->v); +} + +static inline void u64_stats_inc(u64_stats_t *p) +{ + local64_inc(&p->v); +} +#else +typedef struct { + u64 v; +} u64_stats_t; + +static inline u64 u64_stats_read(u64_stats_t *p) +{ + return p->v; +} + +static inline void u64_stats_add(u64_stats_t *p, unsigned long val) +{ + p->v += val; +} + +static inline void u64_stats_inc(u64_stats_t *p) +{ + p->v++; +} +#endif /* BITS_PER_LONG == 64 */ +#endif /* NEED_U64_STATS_READ */ + +#ifdef NEED_U64_STATS_SET +#if BITS_PER_LONG == 64 +static inline void u64_stats_set(u64_stats_t *p, u64 val) +{ + local64_set(&p->v, val); +} +#else +static inline void u64_stats_set(u64_stats_t *p, u64 val) +{ + p->v = val; +} +#endif /* BITS_PER_LONG == 64 */ +#endif /* NEED_U64_STATS_SET */ + +/* + * NEED_DEVM_KFREE + * NEED_DEVM_KZALLOC + * + * Upstream commit 9ac7849e35f7 ("devres: device resource management") + * Implement device resource management to allocate and free the resource + * for driver + */ +#ifdef NEED_DEVM_KFREE +#define devm_kfree(dev, p) kfree(p) +#else +/* Since commit 0571967dfb5d ("devres: constify p in devm_kfree()") the + * devm_kfree function has accepted a const void * parameter. Since commit + * cad064f1bd52 ("devres: handle zero size in devm_kmalloc()"), it has also + * accepted a NULL pointer safely. However, the null pointer acceptance is in + * devres.c and thus cannot be checked by kcompat-generator.sh. To handle + * this, unconditionally replace devm_kfree with a variant that both accepts + * a const void * pointer and handles a NULL value correctly. + */ +static inline void _kc_devm_kfree(struct device *dev, const void *p) +{ + if (p) + devm_kfree(dev, (void *)p); +} +#define devm_kfree _kc_devm_kfree +#endif /* NEED_DEVM_KFREE */ + +#ifdef NEED_DEVM_KZALLOC +#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) +#endif /* NEED_DEVM_KZALLOC */ + +/* NEED_DIFF_BY_SCALED_PPM + * + * diff_by_scaled_ppm and adjust_by_scaled_ppm were introduced in + * kernel 6.1 by upstream commit 1060707e3809 ("ptp: introduce helpers + * to adjust by scaled parts per million"). + */ +#ifdef NEED_DIFF_BY_SCALED_PPM +static inline bool +diff_by_scaled_ppm(u64 base, long scaled_ppm, u64 *diff) +{ + bool negative = false; + + if (scaled_ppm < 0) { + negative = true; + scaled_ppm = -scaled_ppm; + } + + *diff = mul_u64_u64_div_u64(base, (u64)scaled_ppm, + 1000000ULL << 16); + + return negative; +} + +static inline u64 +adjust_by_scaled_ppm(u64 base, long scaled_ppm) +{ + u64 diff; + + if (diff_by_scaled_ppm(base, scaled_ppm, &diff)) + return base - diff; + + return base + diff; +} +#endif /* NEED_DIFF_BY_SCALED_PPM */ + +#ifndef HAVE_PCI_MSIX_CAN_ALLOC_DYN +static inline bool pci_msix_can_alloc_dyn(struct pci_dev __always_unused *dev) +{ + return false; +} +#endif /* !HAVE_PCI_MSIX_CAN_ALLOC_DYN */ + +#if !defined(HAVE_PCI_MSIX_ALLOC_IRQ_AT) && !defined(HAVE_PCI_MSIX_FREE_IRQ) +struct msi_map { + int index; + int virq; +}; +#endif /* !HAVE_PCI_MSIX_ALLOC_IRQ_AT && !HAVE_PCI_MSIX_FREE_IRQ */ + +#ifndef HAVE_PCI_MSIX_ALLOC_IRQ_AT +#define MSI_ANY_INDEX UINT_MAX +struct irq_affinity_desc; + +static inline struct msi_map +pci_msix_alloc_irq_at(struct pci_dev __always_unused *dev, + unsigned int __always_unused index, + const struct irq_affinity_desc __always_unused *affdesc) +{ + struct msi_map map = { .index = -ENOTSUPP }; + return map; +} +#endif /* !HAVE_PCI_MSIX_ALLOC_IRQ_AT */ + +#ifndef HAVE_PCI_MSIX_FREE_IRQ +static inline void +pci_msix_free_irq(struct pci_dev __always_unused *dev, + struct msi_map __always_unused map) +{ +} +#endif /* !HAVE_PCI_MSIX_FREE_IRQ */ + +#ifdef NEED_PCIE_PTM_ENABLED +/* NEED_PCIE_PTM_ENABLED + * + * pcie_ptm_enabled was added by upstream commit 014408cd624e + * ("PCI: Add pcie_ptm_enabled()"). + * + * It is easy to implement directly. + */ +static inline bool pcie_ptm_enabled(struct pci_dev *dev) +{ +#if defined(HAVE_STRUCT_PCI_DEV_PTM_ENABLED) && defined(CONFIG_PCIE_PTM) + if (!dev) + return false; + + return dev->ptm_enabled; +#else /* !HAVE_STRUCT_PCI_DEV_PTM_ENABLED || !CONFIG_PCIE_PTM */ + return false; +#endif /* HAVE_STRUCT_PCI_DEV_PTM_ENBED && CONFIG_PCIE_PTM */ +} +#endif /* NEED_PCIE_PTM_ENABLED */ + +/* NEED_PCI_ENABLE_PTM + * + * commit ac6c26da29c1 made this function private + * commit 1d71eb53e451 made this function public again + * This declares/defines the function for kernels missing it in linux/pci.h + */ +#ifdef NEED_PCI_ENABLE_PTM +#ifdef CONFIG_PCIE_PTM +int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); +#else +static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) +{ return -EINVAL; } +#endif /* CONFIG_PCIE_PTM */ +#endif /* NEED_PCI_ENABLE_PTM */ + +/* NEED_PCIE_FLR + * NEED_PCIE_FLR_RETVAL + * + * pcie_flr() was added in the past, but wasn't generally available until 4.12 + * commit a60a2b73ba69 (4.12) made this function available as an extern + * commit 91295d79d658 (4.17) made this function return int instead of void + + * This declares/defines the function for kernels missing it or needing a + * retval in linux/pci.h + */ +#ifdef NEED_PCIE_FLR +static inline int pcie_flr(struct pci_dev *dev) +{ + u32 cap; + + pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); + if (!(cap & PCI_EXP_DEVCAP_FLR)) + return -ENOTTY; + + if (!pci_wait_for_pending_transaction(dev)) + dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n"); + + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); + msleep(100); + return 0; +} +#endif /* NEED_PCIE_FLR */ +#ifdef NEED_PCIE_FLR_RETVAL +static inline int _kc_pcie_flr(struct pci_dev *dev) +{ + pcie_flr(dev); + return 0; +} +#define pcie_flr(dev) _kc_pcie_flr((dev)) +#endif /* NEED_PCIE_FLR_RETVAL */ + +/* NEED_DEV_PAGE_IS_REUSABLE + * + * dev_page_is_reusable was introduced by + * commit bc38f30f8dbc ("net: introduce common dev_page_is_reusable()") + * + * This function is trivial to re-implement in full. + */ +#ifdef NEED_DEV_PAGE_IS_REUSABLE +static inline bool dev_page_is_reusable(struct page *page) +{ + return likely(page_to_nid(page) == numa_mem_id() && + !page_is_pfmemalloc(page)); +} +#endif /* NEED_DEV_PAGE_IS_REUSABLE */ + +/* NEED_NAPI_BUILD_SKB + * + * napi_build_skb was introduced by + * commit f450d539c05a: ("skbuff: introduce {,__}napi_build_skb() which reuses NAPI cache heads") + * + * This function is a more efficient version of build_skb(). + */ +#ifdef NEED_NAPI_BUILD_SKB +static inline +struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) +{ + return build_skb(data, frag_size); +} +#endif /* NEED_NAPI_BUILD_SKB */ + +/* NEED_DEBUGFS_LOOKUP + * + * Old RHELs (7.2-7.4) do not have this backported. Create a stub and always + * return NULL. Should not affect important features workflow and allows the + * driver to compile on older kernels. + */ +#ifdef NEED_DEBUGFS_LOOKUP + +#include + +static inline struct dentry * +debugfs_lookup(const char *name, struct dentry *parent) +{ + return NULL; +} +#endif /* NEED_DEBUGFS_LOOKUP */ + +/* NEED_DEBUGFS_LOOKUP_AND_REMOVE + * + * Upstream commit dec9b2f1e0455("debugfs: add debugfs_lookup_and_remove()") + * + * Should work the same as upstream equivalent. + */ +#ifdef NEED_DEBUGFS_LOOKUP_AND_REMOVE + +#include + +static inline void +debugfs_lookup_and_remove(const char *name, struct dentry *parent) +{ + struct dentry *dentry; + + dentry = debugfs_lookup(name, parent); + if (!dentry) + return; + + debugfs_remove(dentry); + dput(dentry); +} +#endif /* NEED_DEBUGFS_LOOKUP_AND_REMOVE */ + +/* NEED_FS_FILE_DENTRY + * + * this is simple impl of file_dentry() (introduced in v4.6, backported to + * stable mainline 4.5 and 4.6 kernels) + * + * prior to file_dentry() existence logic of this function was open-coded, + * and if given kernel has had not backported it, then "oversimplification bugs" + * are present there anyway. + */ +#ifdef NEED_FS_FILE_DENTRY +static inline struct dentry *file_dentry(const struct file *file) +{ + return file->f_path.dentry; +} +#endif /* NEED_FS_FILE_DENTRY */ + +/* NEED_CLASS_CREATE_WITH_MODULE_PARAM + * + * Upstream removed owner argument form helper macro class_create in + * 1aaba11da9aa ("remove module * from class_create()") + * + * In dcfbb67e48a2 ("use lock_class_key already present in struct subsys_private") + * the macro was removed completely. + * + * class_create no longer has owner/module param as it was not used. + */ +#ifdef NEED_CLASS_CREATE_WITH_MODULE_PARAM +static inline struct class *_kc_class_create(const char *name) +{ + return class_create(THIS_MODULE, name); +} +#ifdef class_create +#undef class_create +#endif +#define class_create _kc_class_create +#endif /* NEED_CLASS_CREATE_WITH_MODULE_PARAM */ + +/* NEED_LOWER_16_BITS and NEED_UPPER_16_BITS + * + * Upstream commit 03cb4473be92 ("ice: add low level PTP clock access + * functions") introduced the lower_16_bits() and upper_16_bits() macros. They + * are straight forward to implement if missing. + */ +#ifdef NEED_LOWER_16_BITS +#define lower_16_bits(n) ((u16)((n) & 0xffff)) +#endif /* NEED_LOWER_16_BITS */ + +#ifdef NEED_UPPER_16_BITS +#define upper_16_bits(n) ((u16)((n) >> 16)) +#endif /* NEED_UPPER_16_BITS */ + +#ifdef NEED_HWMON_CHANNEL_INFO +#define HWMON_CHANNEL_INFO(stype, ...) \ + (&(struct hwmon_channel_info) { \ + .type = hwmon_##stype, \ + .config = (u32 []) { \ + __VA_ARGS__, 0 \ + } \ + }) +#endif /* NEED_HWMON_CHANNEL_INFO */ + +/* NEED_ASSIGN_BIT + * + * Upstream commit 5307e2ad69ab ("bitops: Introduce assign_bit()") added helper + * assign_bit() to replace if check for setting/clearing bits. + */ +#ifdef NEED_ASSIGN_BIT +static inline void assign_bit(long nr, unsigned long *addr, bool value) +{ + if (value) + set_bit(nr, addr); + else + clear_bit(nr, addr); +} +#endif /* NEED_ASSIGN_BIT */ + +/* + * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21. + * In the meantime, to support gcc < 10, we implement __has_builtin + * by hand. + */ +#ifndef __has_builtin +#define __has_builtin(x) (0) +#endif + +/* NEED___STRUCT_SIZE + * + * 9f7d69c5cd23 ("fortify: Convert to struct vs member helpers") of kernel v6.2 + * has added two following macros, one of them used by DEFINE_FLEX() + */ +#ifdef NEED___STRUCT_SIZE +/* + * When the size of an allocated object is needed, use the best available + * mechanism to find it. (For cases where sizeof() cannot be used.) + */ +#if __has_builtin(__builtin_dynamic_object_size) +#define __struct_size(p) __builtin_dynamic_object_size(p, 0) +#define __member_size(p) __builtin_dynamic_object_size(p, 1) +#else +#define __struct_size(p) __builtin_object_size(p, 0) +#define __member_size(p) __builtin_object_size(p, 1) +#endif +#endif /* NEED___STRUCT_SIZE */ + +/* NEED_KREALLOC_ARRAY + * + * krealloc_array was added by upstream commit + * f0dbd2bd1c22 ("mm: slab: provide krealloc_array()"). + * + * For older kernels, add a new API wrapper around krealloc(). + */ +#ifdef NEED_KREALLOC_ARRAY +static inline void *__must_check krealloc_array(void *p, + size_t new_n, + size_t new_size, + gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) + return NULL; + + return krealloc(p, bytes, flags); +} +#endif /* NEED_KREALLOC_ARRAY */ + +/* NEED_XDP_DO_FLUSH + * + * Upstream commit 1d233886dd90 ("xdp: Use bulking for non-map XDP_REDIRECT + * and consolidate code paths") replaced xdp_do_flush_map with xdp_do_flush + * and 7f04bd109d4c ("net: Tree wide: Replace xdp_do_flush_map() with + * xdp_do_flush()") cleaned up related code. + */ +#ifdef NEED_XDP_DO_FLUSH +static inline void xdp_do_flush(void) +{ + xdp_do_flush_map(); +} +#endif /* NEED_XDP_DO_FLUSH */ + +#ifdef NEED_XDP_FEATURES +enum netdev_xdp_act { + NETDEV_XDP_ACT_BASIC = 1, + NETDEV_XDP_ACT_REDIRECT = 2, + NETDEV_XDP_ACT_NDO_XMIT = 4, + NETDEV_XDP_ACT_XSK_ZEROCOPY = 8, + NETDEV_XDP_ACT_HW_OFFLOAD = 16, + NETDEV_XDP_ACT_RX_SG = 32, + NETDEV_XDP_ACT_NDO_XMIT_SG = 64, + + NETDEV_XDP_ACT_MASK = 127, +}; + +typedef u32 xdp_features_t; + +static inline void +xdp_set_features_flag(struct net_device *dev, xdp_features_t val) +{ +} + +static inline void xdp_clear_features_flag(struct net_device *dev) +{ +} + +static inline void +xdp_features_set_redirect_target(struct net_device *dev, bool support_sg) +{ +} + +static inline void xdp_features_clear_redirect_target(struct net_device *dev) +{ +} +#endif /* NEED_XDP_FEATURES */ + +#ifdef NEED_FIND_NEXT_BIT_WRAP +/* NEED_FIND_NEXT_BIT_WRAP + * + * The find_next_bit_wrap function was added by commit 6cc18331a987 + * ("lib/find_bit: add find_next{,_and}_bit_wrap") + * + * For older kernels, define find_next_bit_wrap function that calls + * find_next_bit function and find_first_bit macro. + */ +static inline +unsigned long find_next_bit_wrap(const unsigned long *addr, + unsigned long size, unsigned long offset) +{ + unsigned long bit = find_next_bit(addr, size, offset); + + if (bit < size) + return bit; + + bit = find_first_bit(addr, offset); + return bit < offset ? bit : size; +} +#endif /* NEED_FIND_NEXT_BIT_WRAP */ + +#ifdef NEED_IS_CONSTEXPR +/* __is_constexpr() macro has moved acros 3 upstream kernel headers: + * commit 3c8ba0d61d04 ("kernel.h: Retain constant expression output for max()/min()") + * introduced it in kernel.h, for kernel v4.17 + * commit b296a6d53339 ("kernel.h: split out min()/max() et al. helpers") moved + * it to minmax.h; + * commit f747e6667ebb ("linux/bits.h: fix compilation error with GENMASK") + * moved it to its current location of const.h + */ +/* + * This returns a constant expression while determining if an argument is + * a constant expression, most importantly without evaluating the argument. + * Glory to Martin Uecker + */ +#define __is_constexpr(x) \ + (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) +#endif /* NEED_IS_CONSTEXPR */ + +/* NEED_DECLARE_FLEX_ARRAY + * + * Upstream commit 3080ea5553 ("stddef: Introduce DECLARE_FLEX_ARRAY() helper") + * introduces DECLARE_FLEX_ARRAY to support flexible arrays in unions or + * alone in a structure. + */ +#ifdef NEED_DECLARE_FLEX_ARRAY +#define DECLARE_FLEX_ARRAY(TYPE, NAME) \ + struct { \ + struct { } __empty_ ## NAME; \ + TYPE NAME[]; \ + } +#endif /* NEED_DECLARE_FLEX_ARRAY */ + +#ifdef NEED_LIST_COUNT_NODES +/* list_count_nodes was added as part of the list.h API by commit 4d70c74659d9 + * ("i915: Move list_count() to list.h as list_count_nodes() for broader use") + * This landed in Linux v6.3 + * + * Its straightforward to directly implement the basic loop. + */ +static inline size_t list_count_nodes(struct list_head *head) +{ + struct list_head *pos; + size_t count = 0; + + list_for_each(pos, head) + count++; + + return count; +} +#endif /* NEED_LIST_COUNT_NODES */ +#ifdef NEED_STATIC_ASSERT +/* + * NEED_STATIC_ASSERT Introduced with upstream commit 6bab69c6501 + * ("build_bug.h: add wrapper for _Static_assert") + * * Available for kernels >= 5.1 + * + * Macro for _Static_assert GCC keyword (C11) + */ +#define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr) +#define __static_assert(expr, msg, ...) _Static_assert(expr, msg) +#endif /* NEED_STATIC_ASSERT */ + +#ifdef NEED_ETH_TYPE_VLAN +#include +/** + * eth_type_vlan was added in commit fe19c4f971a5 ("lan: Check for vlan ethernet + * types for 8021.q or 802.1ad"). + * + * eth_type_vlan - check for valid vlan ether type. + * @ethertype: ether type to check + * + * Returns true if the ether type is a vlan ether type. + */ +static inline bool eth_type_vlan(__be16 ethertype) +{ + switch (ethertype) { + case htons(ETH_P_8021Q): + case htons(ETH_P_8021AD): + return true; + default: + return false; + } +} +#endif /* NEED_ETH_TYPE_VLAN */ + +#ifdef NEED___STRUCT_GROUP +/** + * __struct_group() - Create a mirrored named and anonyomous struct + * + * @TAG: The tag name for the named sub-struct (usually empty) + * @NAME: The identifier name of the mirrored sub-struct + * @ATTRS: Any struct attributes (usually empty) + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical layout + * and size: one anonymous and one named. The former's members can be used + * normally without sub-struct naming, and the latter can be used to + * reason about the start, end, and size of the group of struct members. + * The named struct can also be explicitly tagged for layer reuse, as well + * as both having struct attributes appended. + */ +#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ + union { \ + struct { MEMBERS } ATTRS; \ + struct TAG { MEMBERS } ATTRS NAME; \ + } +#endif /* NEED___STRUCT_GROUP */ + +#ifdef NEED_STRUCT_GROUP +/** + * struct_group() - Wrap a set of declarations in a mirrored struct + * + * @NAME: The identifier name of the mirrored sub-struct + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical + * layout and size: one anonymous and one named. The former can be + * used normally without sub-struct naming, and the latter can be + * used to reason about the start, end, and size of the group of + * struct members. + */ +#define struct_group(NAME, MEMBERS...) \ + __struct_group(/* no tag */, NAME, /* no attrs */, MEMBERS) + +/** + * struct_group_tagged() - Create a struct_group with a reusable tag + * + * @TAG: The tag name for the named sub-struct + * @NAME: The identifier name of the mirrored sub-struct + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical + * layout and size: one anonymous and one named. The former can be + * used normally without sub-struct naming, and the latter can be + * used to reason about the start, end, and size of the group of + * struct members. Includes struct tag argument for the named copy, + * so the specified layout can be reused later. + */ +#define struct_group_tagged(TAG, NAME, MEMBERS...) \ + __struct_group(TAG, NAME, /* no attrs */, MEMBERS) +#endif /* NEED_STRUCT_GROUP */ + +#ifdef NEED_READ_POLL_TIMEOUT +/* + * 5f5323a14cad ("iopoll: introduce read_poll_timeout macro") + * Added in kernel 5.8 + */ +#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \ + sleep_before_read, args...) \ +({ \ + u64 __timeout_us = (timeout_us); \ + unsigned long __sleep_us = (sleep_us); \ + ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ + might_sleep_if((__sleep_us) != 0); \ + if (sleep_before_read && __sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + for (;;) { \ + (val) = op(args); \ + if (cond) \ + break; \ + if (__timeout_us && \ + ktime_compare(ktime_get(), __timeout) > 0) { \ + (val) = op(args); \ + break; \ + } \ + if (__sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + cpu_relax(); \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) +#else +#include +#endif /* NEED_READ_POLL_TIMEOUT */ + +#ifndef HAVE_DPLL_LOCK_STATUS_ERROR +/* Copied from include/uapi/linux/dpll.h to have common dpll status enums + * between sysfs and dpll subsystem based solutions. + * cf4f0f1e1c465 ("dpll: extend uapi by lock status error attribute") + * Added in kernel 6.9 + */ +enum dpll_lock_status_error { + DPLL_LOCK_STATUS_ERROR_NONE = 1, + DPLL_LOCK_STATUS_ERROR_UNDEFINED, + DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN, + DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH, + + /* private: */ + __DPLL_LOCK_STATUS_ERROR_MAX, + DPLL_LOCK_STATUS_ERROR_MAX = (__DPLL_LOCK_STATUS_ERROR_MAX - 1) +}; + +#endif /* HAVE_DPLL_LOCK_STATUS_ERROR */ + +#ifndef NEED_DPLL_NETDEV_PIN_SET +#define netdev_dpll_pin_set dpll_netdev_pin_set +#define netdev_dpll_pin_clear dpll_netdev_pin_clear +#endif /* HAVE_DPLL_NETDEV_PIN_SET */ + +#ifdef NEED_RADIX_TREE_EMPTY +static inline bool radix_tree_empty(struct radix_tree_root *root) +{ + return !root->rnode; +} +#endif /* NEED_RADIX_TREE_EMPTY */ + +#ifdef NEED_SET_SCHED_FIFO +/* + * 7318d4cc14c8 ("sched: Provide sched_set_fifo()") + * Added in kernel 5.9, + * converted to a macro for kcompat + */ +#include + +#ifdef NEED_SCHED_PARAM +#include +#endif /* NEED_SCHED_PARAM */ +#ifdef NEED_RT_H +#include +#else/* NEED_RT_H */ +#include +#endif /* NEED_RT_H */ +#define sched_set_fifo(p) \ +({ \ + struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; \ + \ + WARN_ON_ONCE(sched_setscheduler_nocheck((p), SCHED_FIFO,&sp) != 0);\ +}) +#endif /* NEED_SET_SCHED_FIFO */ + +#endif /* _KCOMPAT_IMPL_H_ */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_overflow.h b/drivers/net/ethernet/guangruntong/kcompat_overflow.h new file mode 100755 index 0000000000000..dc89d338af97a --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_overflow.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2023 Intel Corporation */ + +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +#ifndef __LINUX_OVERFLOW_H +#define __LINUX_OVERFLOW_H + +#include + +/* + * In the fallback code below, we need to compute the minimum and + * maximum values representable in a given type. These macros may also + * be useful elsewhere, so we provide them outside the + * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. + * + * It would seem more obvious to do something like + * + * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) + * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) + * + * Unfortunately, the middle expressions, strictly speaking, have + * undefined behaviour, and at least some versions of gcc warn about + * the type_max expression (but not if -fsanitize=undefined is in + * effect; in that case, the warning is deferred to runtime...). + * + * The slightly excessive casting in type_min is to make sure the + * macros also produce sensible values for the exotic type _Bool. [The + * overflow checkers only almost work for _Bool, but that's + * a-feature-not-a-bug, since people shouldn't be doing arithmetic on + * _Bools. Besides, the gcc builtins don't allow _Bool* as third + * argument.] + * + * Idea stolen from + * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - + * credit to Christian Biere. + */ +/* The is_signed_type macro is redefined in a few places in various kernel + * headers. If this header is included at the same time as one of those, we + * will generate compilation warnings. Since we can't fix every old kernel, + * rename is_signed_type for this file to _kc_is_signed_type. This prevents + * the macro name collision, and should be safe since our drivers do not + * directly call the macro. + */ +#define _kc_is_signed_type(type) (((type)(-1)) < (type)1) +#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - _kc_is_signed_type(type))) +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) +#define type_min(T) ((T)((T)-type_max(T)-(T)1)) + + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +/* + * For simplicity and code hygiene, the fallback code below insists on + * a, b and *d having the same type (similar to the min() and max() + * macros), whereas gcc's type-generic overflow checkers accept + * different types. Hence we don't just make check_add_overflow an + * alias for __builtin_add_overflow, but add type checks similar to + * below. + */ +#define check_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_add_overflow(__a, __b, __d); \ +}) + +#define check_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_sub_overflow(__a, __b, __d); \ +}) + +#define check_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_mul_overflow(__a, __b, __d); \ +}) + +#else + + +/* Checking for unsigned overflow is relatively easy without causing UB. */ +#define __unsigned_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a + __b; \ + *__d < __a; \ +}) +#define __unsigned_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a - __b; \ + __a < __b; \ +}) +/* + * If one of a or b is a compile-time constant, this avoids a division. + */ +#define __unsigned_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a * __b; \ + __builtin_constant_p(__b) ? \ + __b > 0 && __a > type_max(typeof(__a)) / __b : \ + __a > 0 && __b > type_max(typeof(__b)) / __a; \ +}) + +/* + * For signed types, detecting overflow is much harder, especially if + * we want to avoid UB. But the interface of these macros is such that + * we must provide a result in *d, and in fact we must produce the + * result promised by gcc's builtins, which is simply the possibly + * wrapped-around value. Fortunately, we can just formally do the + * operations in the widest relevant unsigned type (u64) and then + * truncate the result - gcc is smart enough to generate the same code + * with and without the (u64) casts. + */ + +/* + * Adding two signed integers can overflow only if they have the same + * sign, and overflow has happened iff the result has the opposite + * sign. + */ +#define __signed_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a + (u64)__b; \ + (((~(__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Subtraction is similar, except that overflow can now happen only + * when the signs are opposite. In this case, overflow has happened if + * the result has the opposite sign of a. + */ +#define __signed_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a - (u64)__b; \ + ((((__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Signed multiplication is rather hard. gcc always follows C99, so + * division is truncated towards 0. This means that we can write the + * overflow check like this: + * + * (a > 0 && (b > MAX/a || b < MIN/a)) || + * (a < -1 && (b > MIN/a || b < MAX/a) || + * (a == -1 && b == MIN) + * + * The redundant casts of -1 are to silence an annoying -Wtype-limits + * (included in -Wextra) warning: When the type is u8 or u16, the + * __b_c_e in check_mul_overflow obviously selects + * __unsigned_mul_overflow, but unfortunately gcc still parses this + * code and warns about the limited range of __b. + */ + +#define __signed_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + typeof(a) __tmax = type_max(typeof(a)); \ + typeof(a) __tmin = type_min(typeof(a)); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a * (u64)__b; \ + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ + (__b == (typeof(__b))-1 && __a == __tmin); \ +}) + + +#define check_add_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_add_overflow(a, b, d), \ + __unsigned_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_sub_overflow(a, b, d), \ + __unsigned_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_mul_overflow(a, b, d), \ + __unsigned_mul_overflow(a, b, d)) + + +#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ + +/** check_shl_overflow() - Calculate a left-shifted value and check overflow + * + * @a: Value to be shifted + * @s: How many bits left to shift + * @d: Pointer to where to store the result + * + * Computes *@d = (@a << @s) + * + * Returns true if '*d' cannot hold the result or when 'a << s' doesn't + * make sense. Example conditions: + * - 'a << s' causes bits to be lost when stored in *d. + * - 's' is garbage (e.g. negative) or so large that the result of + * 'a << s' is guaranteed to be 0. + * - 'a' is negative. + * - 'a << s' sets the sign bit, if any, in '*d'. + * + * '*d' will hold the results of the attempted shift, but is not + * considered "safe for use" if false is returned. + */ +#define check_shl_overflow(a, s, d) ({ \ + typeof(a) _a = a; \ + typeof(s) _s = s; \ + typeof(d) _d = d; \ + u64 _a_full = _a; \ + unsigned int _to_shift = \ + _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \ + *_d = (_a_full << _to_shift); \ + (_to_shift != _s || *_d < 0 || _a < 0 || \ + (*_d >> _to_shift) != _a); \ +}) + +/** + * array_size() - Calculate size of 2-dimensional array. + * + * @a: dimension one + * @b: dimension two + * + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array_size(size_t a, size_t b) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * array3_size() - Calculate size of 3-dimensional array. + * + * @a: dimension one + * @b: dimension two + * @c: dimension three + * + * Calculates size of 3-dimensional array: @a * @b * @c. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + if (check_mul_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(n, size, &bytes)) + return SIZE_MAX; + if (check_add_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * struct_size() - Calculate size of structure with trailing array. + * @p: Pointer to the structure. + * @member: Name of the array member. + * @n: Number of elements in the array. + * + * Calculates size of memory needed for structure @p followed by an + * array of @n @member elements. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define struct_size(p, member, n) \ + __ab_c_size(n, \ + sizeof(*(p)->member) + __must_be_array((p)->member),\ + sizeof(*(p))) + +#endif /* __LINUX_OVERFLOW_H */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_rhel_defs.h b/drivers/net/ethernet/guangruntong/kcompat_rhel_defs.h new file mode 100755 index 0000000000000..0d981f0ef77ac --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_rhel_defs.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2023 Intel Corporation */ + +#ifndef _KCOMPAT_RHEL_DEFS_H_ +#define _KCOMPAT_RHEL_DEFS_H_ + +/* This is the RedHat Enterprise Linux distribution specific definitions file. + * It defines what features need backports for a given version of the RHEL + * kernel. + * + * It checks the RHEL_RELEASE_CODE and RHEL_RELEASE_VERSION macros to decide + * what support the target kernel has. + * + * It assumes that kcompat_std_defs.h has already been processed, and will + * #define or #undef any flags that have changed based on backports done by + * RHEL. + */ + +#if !RHEL_RELEASE_CODE +#error "RHEL_RELEASE_CODE is 0 or undefined" +#endif + +#ifndef RHEL_RELEASE_VERSION +#error "RHEL_RELEASE_VERSION is undefined" +#endif + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3)) +#define NEED_NETDEV_TXQ_BQL_PREFETCH +#else /* >= 7.3 */ +#undef NEED_DEV_PRINTK_ONCE +#undef NEED_DEVM_KASPRINTF +#define HAVE_DEVLINK_PORT_SPLIT +#endif /* 7.3 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4)) +#define NEED_BUILD_BUG_ON +#else /* >= 7.4 */ +#define HAVE_RHEL7_EXTENDED_OFFLOAD_STATS +#define HAVE_INCLUDE_BITFIELD +#endif /* 7.4 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#else /* >= 7.5 */ +#define HAVE_TCF_EXTS_TO_LIST +#define HAVE_FLOW_DISSECTOR_KEY_IP +#endif /* 7.5 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)) +#undef HAVE_XDP_BUFF_RXQ +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#else /* >= 7.6 */ +#undef NEED_JIFFIES_64_TIME_IS_MACROS +#undef NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 +#undef NEED_TC_SETUP_QDISC_MQPRIO +#endif /* 7.6 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,7)) +#else /* >= 7.7 */ +#define HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR +#define HAVE_ETHTOOL_NEW_100G_BITS +#undef NEED_NETDEV_TX_SENT_QUEUE +#undef NEED_IN_TASK +#define HAVE_FLOW_DISSECTOR_KEY_ENC_IP +#endif /* 7.7 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,8)) +#else /* >= 7.8 */ +#endif /* 7.8 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,9)) +#else /* >= 7.9 */ +/* mul_u64_u64_div_u64 was backported into RHEL 7.9 but not into the early + * 8.x releases + */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +#undef NEED_MUL_U64_U64_DIV_U64 +#endif /* < 8.0 */ +#endif /* 7.9 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +#else /* >= 8.0 */ +#undef HAVE_TCF_EXTS_TO_LIST +#undef HAVE_ETHTOOL_NEW_100G_BITS +#define HAVE_NDO_OFFLOAD_STATS +#undef HAVE_RHEL7_EXTENDED_OFFLOAD_STATS +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +/* 7.7 undefs it due to a backport in 7.7+, but 8.0 needs it still */ +#define NEED_NETDEV_TX_SENT_QUEUE +#define HAVE_DEVLINK_REGIONS +#define HAVE_DEVLINK_PARAMS +#define HAVE_DEVLINK_PORT_SPLIT_EXTACK +#endif /* 8.0 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,1)) +#define NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#define NEED_FLOW_MATCH +#else /* >= 8.1 */ +#define HAVE_ETHTOOL_NEW_100G_BITS +#undef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#undef NEED_FLOW_MATCH +#define HAVE_DEVLINK_PARAMS_PUBLISH +#undef NEED_NETDEV_TX_SENT_QUEUE +#undef NEED_INDIRECT_CALL_WRAPPER_MACROS +#define HAVE_INDIRECT_CALL_WRAPPER_HEADER +#define HAVE_GRETAP_TYPE +#define HAVE_GENEVE_TYPE +#define HAVE_VXLAN_TYPE +#define HAVE_LINKMODE +#define HAVE_FLOW_DISSECTOR_KEY_CVLAN +#endif /* 8.1 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) +#else /* >= 8.2 */ +#undef NEED_BUS_FIND_DEVICE_CONST_DATA +#undef NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY +#undef NEED_SKB_FRAG_OFF +#undef NEED_SKB_FRAG_OFF_ADD +#undef NEED_FLOW_INDR_BLOCK_CB_REGISTER +#define HAVE_FLOW_INDR_BLOCK_LOCK +#define HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID +#define HAVE_DEVLINK_HEALTH +#define HAVE_NETDEV_SB_DEV +#endif /* 8.2 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,3)) +#else /* >= 8.3 */ +#undef NEED_CPU_LATENCY_QOS_RENAME +#define HAVE_DEVLINK_HEALTH_OPS_EXTACK +#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#define HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#undef NEED_DEVLINK_REGION_CREATE_OPS +#undef NEED_MUL_U64_U64_DIV_U64 +#endif /* 8.3 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,4)) +#else /* >= 8.4 */ +#undef NEED_DEVLINK_PORT_ATTRS_SET_STRUCT +#undef NEED_NET_PREFETCH +#undef NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY +#undef HAVE_XDP_QUERY_PROG +#define HAVE_AF_XDP_ZC_SUPPORT +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NDO_XSK_WAKEUP +#define XSK_UMEM_RETURNS_XDP_DESC +#undef NEED_XSK_UMEM_GET_RX_FRAME_SIZE +#define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT +#define HAVE_PTP_FIND_PIN_UNLOCKED +#endif /* 8.4 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,5)) +#else /* >= 8.5 */ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +#undef HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY +#undef HAVE_NAPI_BUSY_LOOP +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#undef NEED_XSK_BUFF_DMA_SYNC_FOR_CPU +#define NO_XDP_QUERY_XSK_UMEM +#undef NEED_XSK_BUFF_POOL_RENAME +#define HAVE_NETDEV_BPF_XSK_POOL +#define HAVE_AF_XDP_NETDEV_UMEM +#define HAVE_DEVLINK_OPS_CREATE_DEL +#undef NEED_ETHTOOL_SPRINTF +#endif /* 8.5 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,6)) +#else /* >= 8.6 */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,0)) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_DEVL_PORT_REGISTER +#endif /* < 9.0 */ +#undef NEED_ETH_HW_ADDR_SET +#endif /* 8.6 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,7)) +#else /* >= 8.7 */ +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#undef NEED_DEVLINK_ALLOC_SETS_DEV +#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#undef HAVE_DEVLINK_PARAMS_PUBLISH +#undef HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#undef HAVE_DEVLINK_REGISTER_SETS_DEV +#define HAVE_DEVLINK_NOTIFY_REGISTER +#define HAVE_DEVLINK_SET_FEATURES +#define HAVE_DEVLINK_PORT_SPLIT_PORT_STRUCT +#define HAVE_DEVLINK_SET_STATE_3_PARAM +#endif /* 8.7 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,0)) +#else /* >= 9.0 */ +#define HAVE_XDP_BUFF_RXQ +#define HAVE_NDO_ETH_IOCTL +#define NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#undef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#undef HAVE_DEVLINK_PORT_SPLIT_PORT_STRUCT +#endif /* 9.0 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,1)) +#else /* >= 9.1 */ +#undef HAVE_PASID_SUPPORT +#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_XDP_DO_FLUSH +#endif /* 9.1 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,2)) +#else /* >= 9.2 */ +#undef NEED_NETIF_NAPI_ADD_NO_WEIGHT +#endif /* 9.2 */ + +#endif /* _KCOMPAT_RHEL_DEFS_H_ */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_sles_defs.h b/drivers/net/ethernet/guangruntong/kcompat_sles_defs.h new file mode 100755 index 0000000000000..f7de700d4a6a3 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_sles_defs.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2023 Intel Corporation */ + +#ifndef _KCOMPAT_SLES_DEFS_H_ +#define _KCOMPAT_SLES_DEFS_H_ + +/* This is the SUSE Linux Enterprise distribution specific definitions file. + * It defines what features need backports for a given version of the SUSE + * Linux Enterprise kernel. + * + * It checks a combination of the LINUX_VERSION code and the + * SLE_LOCALVERSION_CODE to determine what support the kernel has. + * + * It assumes that kcompat_std_defs.h has already been processed, and will + * #define or #undef any flags that have changed based on backports done by + * SUSE. + */ + +#ifndef LINUX_VERSION_CODE +#error "LINUX_VERSION_CODE is undefined" +#endif + +#ifndef KERNEL_VERSION +#error "KERNEL_VERSION is undefined" +#endif + +#if !SLE_KERNEL_REVISION +#error "SLE_KERNEL_REVISION is 0 or undefined" +#endif + +#if SLE_KERNEL_REVISION > 65535 +#error "SLE_KERNEL_REVISION is unexpectedly large" +#endif + +/* SLE kernel versions are a combination of the LINUX_VERSION_CODE along with + * an extra digit that indicates the SUSE specific revision of that kernel. + * This value is found in the CONFIG_LOCALVERSION of the SUSE kernel, which is + * extracted by common.mk and placed into SLE_KERNEL_REVISION_CODE. + * + * We combine the value of SLE_KERNEL_REVISION along with the LINUX_VERSION code + * to generate the useful value that determines what specific kernel we're + * dealing with. + * + * Just in case the SLE_KERNEL_REVISION ever goes above 255, we reserve 16 bits + * instead of 8 for this value. + */ +#define SLE_KERNEL_CODE ((LINUX_VERSION_CODE << 16) + SLE_KERNEL_REVISION) +#define SLE_KERNEL_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,c) << 16) + (d)) + +/* Unlike RHEL, SUSE kernels are not always tied to a single service pack. For + * example, 4.12.14 was used as the base for SLE 15 SP1, SLE 12 SP4, and SLE 12 + * SP5. + * + * You can find the patches that SUSE applied to the kernel tree at + * https://github.com/SUSE/kernel-source. + * + * You can find the correct kernel version for a check by using steps similar + * to the following + * + * 1) download the kernel-source repo + * 2) checkout the relevant branch, i.e SLE15-SP3 + * 3) find the relevant backport you're interested in the patches.suse + * directory + * 4) git log to locate the commit that introduced the backport + * 5) git describe --contains to find the relevant tag that includes that + * commit, i.e. rpm-5.3.18-37 + * 6) those digits represent the SLE kernel that introduced that backport. + * + * Try to keep the checks in SLE_KERNEL_CODE order and condense where + * possible. + */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,4,0,73)) +#else /* >= 4.4.0-73 */ +#define HAVE_DEVLINK_PORT_SPLIT +#endif /* 4.4.0-73 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE > SLE_KERNEL_VERSION(4,12,14,23) && \ + SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,94)) +/* + * 4.12.14 is used as the base for SLE 12 SP4, SLE 12 SP5, SLE 15, and SLE 15 + * SP1. Unfortunately the revision codes do not line up cleanly. SLE 15 + * launched with 4.12.14-23. It appears that SLE 12 SP4 and SLE 15 SP1 both + * diverged from this point, with SLE 12 SP4 kernels starting around + * 4.12.14-94. A few backports for SLE 15 SP1 landed in some alpha and beta + * kernels tagged between 4.12.14-25 up to 4.12.14-32. These changes did not + * make it into SLE 12 SP4. This was cleaned up with SLE 12 SP5 by an apparent + * merge in 4.12.14-111. The official launch of SLE 15 SP1 ended up with + * version 4.12.14-195. + * + * Because of this inconsistency and because all of these kernels appear to be + * alpha or beta kernel releases for SLE 15 SP1, we do not rely on version + * checks between this range. Issue a warning to indicate that we do not + * support these. + */ +#warning "SLE kernel versions between 4.12.14-23 and 4.12.14-94 are not supported" +#endif + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,10)) +#else /* >= 4.12.14-10 */ +#undef NEED_INDIRECT_CALL_WRAPPER_MACROS +#define HAVE_INDIRECT_CALL_WRAPPER_HEADER +#endif /* 4.12.14-10 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,100)) +#else /* >= 4.12.14-100 */ +#undef HAVE_TCF_EXTS_TO_LIST +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#endif /* 4.12.14-100 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,111)) +#define NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#else /* >= 4.12.14-111 */ +#define HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR +#undef NEED_MACVLAN_ACCEL_PRIV +#undef NEED_MACVLAN_RELEASE_L2FW_OFFLOAD +#undef NEED_MACVLAN_SUPPORTS_DEST_FILTER +#undef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#define HAVE_DEVLINK_PORT_SPLIT_EXTACK +#endif /* 4.12.14-111 */ + +/*****************************************************************************/ +/* SLES 12-SP5 base kernel version */ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,115)) +#else /* >= 4.12.14-115 */ +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_TCF_MIRRED_DEV +#define HAVE_TCF_BLOCK +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#undef NEED_TC_SETUP_QDISC_MQPRIO +#undef NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 +#undef NEED_NETDEV_TX_SENT_QUEUE +#define HAVE_LINKMODE +#endif /* 4.12.14-115 */ + +/*****************************************************************************/ +/* SLES 15-SP1 base */ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4,12,14,195)) +#else /* >= 4.12.14-195 */ +#define HAVE_DEVLINK_PARAMS +#undef NEED_NETDEV_TX_SENT_QUEUE +#endif /* 4.12.14-195 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,8,2)) +#else /* >= 5.3.8-2 */ +#undef NEED_BUS_FIND_DEVICE_CONST_DATA +#undef NEED_FLOW_INDR_BLOCK_CB_REGISTER +#undef NEED_SKB_FRAG_OFF +#undef NEED_SKB_FRAG_OFF_ADD +#define HAVE_FLOW_INDR_BLOCK_LOCK +#define HAVE_DEVLINK_PARAMS_PUBLISH +#endif /* 5.3.8-2 */ + +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,16,2)) +#else /* >= 5.3.16-2 */ +#define HAVE_DEVLINK_HEALTH_OPS_EXTACK +#endif /* 5.3.16-2 */ + +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,24)) +#else /* >= 5.3.18-24 */ +#undef NEED_MUL_U64_U64_DIV_U64 +#endif + +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,26)) +#else /* >= 5.3.18-26 */ +#undef NEED_CPU_LATENCY_QOS_RENAME +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#endif + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,34)) +#else /* >= 5.3.18-34 */ +#undef NEED_DEVLINK_REGION_CREATE_OPS +#undef NEED_DEVLINK_PORT_ATTRS_SET_STRUCT +#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER +#endif /* 5.3.18-34 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,37)) +#else /* >= 5.3.18-37 */ +#undef NEED_NET_PREFETCH +#endif /* 5.3.18-37 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,38)) +#else /* >= 5.3.18-38 */ +#undef NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY +#endif /* 5.3.18-38 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,41)) +#define NEED_XSK_BUFF_POOL_RENAME +#else /* >= 5.3.18-41 */ +#define HAVE_XDP_BUFF_FRAME_SZ +#define HAVE_NETDEV_BPF_XSK_POOL +#undef NEED_XSK_UMEM_GET_RX_FRAME_SIZE +#undef NEED_XSK_BUFF_POOL_RENAME +#undef NEED_XSK_BUFF_DMA_SYNC_FOR_CPU +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif /* 5.3.18-41 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,3,18,59)) +#else /* >= 5.3.18-59 */ +#undef NEED_ETH_HW_ADDR_SET +#endif /* 5.3.18-59 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 14, 17, 1)) +#else /* >= 5.14.17-150400.1 */ + #undef HAVE_DEVLINK_PARAMS_PUBLISH + #undef HAVE_DEVLINK_REGISTER_SETS_DEV + #define HAVE_DEVLINK_SET_FEATURES + #undef NEED_ETHTOOL_SPRINTF +#endif /* 5.14.17-150400.1 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5,14,21,9)) +#else /* >= 5.14.21-150400.9 */ +#undef NEED_DEVLINK_ALLOC_SETS_DEV +#undef HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_DEVLINK_OPS_CREATE_DEL +#define HAVE_DEVLINK_SET_STATE_3_PARAM +#endif /* 5.14.21-150400.9 */ + +#endif /* _KCOMPAT_SLES_DEFS_H_ */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_std_defs.h b/drivers/net/ethernet/guangruntong/kcompat_std_defs.h new file mode 100644 index 0000000000000..46a92ff953246 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_std_defs.h @@ -0,0 +1,393 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2023 Intel Corporation */ + +#ifndef _KCOMPAT_STD_DEFS_H_ +#define _KCOMPAT_STD_DEFS_H_ + +/* This file contains the definitions for what kernel features need backports + * for a given kernel. It targets only the standard stable kernel releases. + * It must check only LINUX_VERSION_CODE and assume the kernel is a standard + * release, and not a custom distribution. + * + * It must define HAVE_ and NEED_ for features. It must not + * implement any backports, instead leaving the implementation to the + * kcompat_impl.h header. + * + * If a feature can be easily implemented as a replacement macro or fully + * backported, use a NEED_ to indicate that the feature needs + * a backport. (If NEED_ is undefined, then no backport for that feature + * is needed). + * + * If a feature cannot be easily implemented in kcompat directly, but + * requires drivers to make specific changes such as stripping out an entire + * feature or modifying a function pointer prototype, use a HAVE_. + */ + +#ifndef LINUX_VERSION_CODE +#error "LINUX_VERSION_CODE is undefined" +#endif + +#ifndef KERNEL_VERSION +#error "KERNEL_VERSION is undefined" +#endif + +/*****************************************************************************/ +//#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) //sam +#else /* >= 3,10,0 */ +#define NEED_NETIF_NAPI_ADD_NO_WEIGHT +#define NEED_ETHTOOL_SPRINTF +#endif /* 3,10,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)) //sam +#define NEED_DEVM_KASPRINTF +#else /* >= 3,17,0 */ +#endif /* 3,17,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) //sam +#define NEED_DEV_PM_DOMAIN_ATTACH_DETACH +#else /* >= 3,18,0 */ +#endif /* 3,18,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)) +#define NEED_DEV_PRINTK_ONCE +#else /* >= 3,19,0 */ +#endif /* 3,19,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#define NEED_DEFINE_STATIC_KEY_FALSE +#define NEED_STATIC_BRANCH +#else /* >= 4,3,0 */ +#define NEED_DECLARE_STATIC_KEY_FALSE +#endif /* 4,3,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) +#else /* >= 4,6,0 */ +#define HAVE_DEVLINK_PORT_SPLIT +#endif /* 4,6,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) +#else /* >= 4,8,0 */ +#define HAVE_TCF_EXTS_TO_LIST +#define HAVE_PCI_ALLOC_IRQ +#define HAVE_NDO_UDP_TUNNEL_CALLBACK +#endif /* 4,8,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#define NEED_JIFFIES_64_TIME_IS_MACROS +#else /* >= 4,9,0 */ +#define HAVE_KTHREAD_DELAYED_API +#define HAVE_NDO_OFFLOAD_STATS +#undef NEED_DECLARE_STATIC_KEY_FALSE +#define HAVE_INCLUDE_BITFIELD +#endif /* 4,9,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,62)) +#ifndef KYLIN_KERNEL44 +#define NEED_IN_TASK +#endif +#else /* >= 4,9,62 */ +#endif /* 4,9,62 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#else /* >= 4,12,0 */ +#define HAVE_NAPI_BUSY_LOOP +#endif /* 4,12,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#else /* >= 4,13,0 */ +#define HAVE_FLOW_DISSECTOR_KEY_IP +#endif /* 4,13,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#define NEED_TC_SETUP_QDISC_MQPRIO +#define NEED_NETDEV_XDP_STRUCT +#else /* >= 4,15,0 */ +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_NDO_BPF +#endif /* 4,15,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#define NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 +#else /* >= 4,16,0 */ +#define HAVE_XDP_BUFF_RXQ +#define HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#endif /* 4,16,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +#define NEED_CONVERT_ART_NS_TO_TSC +#else /* >= 4,17,0 */ +#endif /* 4,17,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#define NEED_MACVLAN_ACCEL_PRIV +#define NEED_MACVLAN_RELEASE_L2FW_OFFLOAD +#define NEED_MACVLAN_SUPPORTS_DEST_FILTER +#else /* >= 4,18,0 */ +#define HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR +#define HAVE_DEVLINK_PORT_SPLIT_EXTACK +#endif /* 4,18,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)) +#define NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#else /* >= 4,19,0 */ +#undef HAVE_TCF_EXTS_TO_LIST +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#define HAVE_DEVLINK_REGIONS +#define HAVE_TC_ETF_QOPT_OFFLOAD +#define HAVE_DEVLINK_PARAMS +#define HAVE_FLOW_DISSECTOR_KEY_ENC_IP +#endif /* 4,19,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#ifndef KYLIN_KERNEL +#define NEED_NETDEV_TX_SENT_QUEUE +#endif +#else /* >= 4.20.0 */ +#define HAVE_VXLAN_TYPE +#define HAVE_LINKMODE +#endif /* 4.20.0 */ + +#if defined (UOS_KERNEL) || defined (KYLIN_KERNEL44) +#define HAVE_LINKMODE +#endif /* UOS_KERNEL */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) +#define NEED_INDIRECT_CALL_WRAPPER_MACROS +#else /* >= 5.0.0 */ +#define HAVE_GRETAP_TYPE +#define HAVE_GENEVE_TYPE +#define HAVE_INDIRECT_CALL_WRAPPER_HEADER +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#define NEED_FLOW_MATCH +#else /* >= 5.1.0 */ +#define HAVE_ETHTOOL_200G_BITS +#define HAVE_ETHTOOL_NEW_100G_BITS +#define HAVE_DEVLINK_PARAMS_PUBLISH +#define HAVE_DEVLINK_HEALTH +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#else /* >= 5.2.0 */ +#define HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID +#define HAVE_FLOW_DISSECTOR_KEY_CVLAN +#endif /* 5.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#define NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY +#define NEED_BUS_FIND_DEVICE_CONST_DATA +#else /* >= 5.3.0 */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(5,3,10)) +#define HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#endif /* 5.3.10 */ +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +#ifndef KYLIN_KERNEL +#define NEED_SKB_FRAG_OFF_ADD +#define NEED_SKB_FRAG_OFF +#endif +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4,14,241) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#undef NEED_SKB_FRAG_OFF +#endif /* > 4.14.241 && < 4.15.0 */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4,19,200) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#undef NEED_SKB_FRAG_OFF +#endif /* > 4.19.200 && < 4.20.0 */ + +#define NEED_FLOW_INDR_BLOCK_CB_REGISTER +#else /* >= 5.4.0 */ +#define HAVE_FLOW_INDR_BLOCK_LOCK +#define HAVE_XSK_UNALIGNED_CHUNK_PLACEMENT +#endif /* 5.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,5,0)) +#else /* >= 5.5.0 */ +#define HAVE_DEVLINK_HEALTH_OPS_EXTACK +#endif /* 5.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +#define NEED_DEVLINK_REGION_CREATE_OPS +#define NEED_CPU_LATENCY_QOS_RENAME +#else /* >= 5.7.0 */ +#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#define HAVE_PTP_FIND_PIN_UNLOCKED +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#define NEED_XSK_UMEM_GET_RX_FRAME_SIZE +#else /* >= 5.8.0 */ +#undef HAVE_XSK_UNALIGNED_CHUNK_PLACEMENT +#endif /* 5.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#define NEED_DEVLINK_PORT_ATTRS_SET_STRUCT +#define HAVE_XDP_QUERY_PROG +#define NEED_INDIRECT_CALL_3_AND_4 +#define NEED_MUL_U64_U64_DIV_U64 +#else /* >= 5.9.0 */ +#define HAVE_TASKLET_SETUP +#endif /* 5.9.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) +#define NEED_NET_PREFETCH +#define NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY +#define NEED_XSK_BUFF_DMA_SYNC_FOR_CPU +#define NEED_XSK_BUFF_POOL_RENAME +#else /* >= 5.10.0 */ +#define HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_UDP_TUNNEL_NIC_SHARED +#define HAVE_NETDEV_BPF_XSK_POOL +#endif /* 5.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)) +#define HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY +#else /* >= 5.11.0 */ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW +#define HAVE_XSK_BATCHED_DESCRIPTOR_INTERFACES +#define HAVE_PASID_SUPPORT +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#define HAVE_XSK_TX_PEEK_RELEASE_DESC_BATCH_3_PARAMS +#endif /* 5.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,12,0)) +#define NEED_EXPORT_INDIRECT_CALLABLE +#else /* >= 5.12.0 */ +#undef HAVE_NDO_UDP_TUNNEL_CALLBACK +#define HAVE_DEVLINK_OPS_CREATE_DEL +#endif /* 5.12.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,13,0)) +/* HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE + * + * create api changed as part of the commit c2ef2f50ad0c( vfio/mdev: Remove + * kobj from mdev_parent_ops->create()) + * + * if flag is defined use the old API else new API + */ +#define HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE +#define HAVE_DEV_IN_MDEV_API +#else /* >= 5.13.0 */ +#define HAVE_XPS_MAP_TYPE +#undef NEED_ETHTOOL_SPRINTF +#endif /* 5.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,14,0)) +#else /* >= 5.14.0 */ +#define HAVE_TTY_WRITE_ROOM_UINT +#endif /* 5.14.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0)) +#define NEED_DEVLINK_ALLOC_SETS_DEV +#define HAVE_DEVLINK_REGISTER_SETS_DEV +#define NEED_ETH_HW_ADDR_SET +#else /* >= 5.15.0 */ +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_NDO_ETH_IOCTL +#define HAVE_DEVICE_IN_MDEV_PARENT_OPS +#define HAVE_LMV1_SUPPORT +#define NEED_PCI_IOV_VF_ID +#define HAVE_DEVLINK_SET_STATE_3_PARAM +#endif /* 5.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,16,0)) +#else /* >= 5.16.0 */ +#undef HAVE_PASID_SUPPORT +#define HAVE_DEVLINK_SET_FEATURES +#define HAVE_DEVLINK_NOTIFY_REGISTER +#undef HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#undef HAVE_DEVLINK_PARAMS_PUBLISH +#define HAVE_XSK_BATCHED_RX_ALLOC +#endif /* 5.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,17,0)) +#define NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#else /* >=5.17.0*/ +#define HAVE_XDP_DO_FLUSH +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif /* 5.17.0 */ + +#if defined(EULER_KERNEL) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif /* EULER_KERNEL */ + + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)) +#else /* >=5.18.0*/ +#undef HAVE_LMV1_SUPPORT +#undef NEED_PCI_IOV_VF_ID +#define HAVE_GTP_SUPPORT +#undef HAVE_XSK_TX_PEEK_RELEASE_DESC_BATCH_3_PARAMS +#define HAVE_DEVLINK_PORT_SPLIT_PORT_STRUCT +#define HAVE_DEVL_PORT_REGISTER +#endif /* 5.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,19,0)) +#else /* >=5.19.0 */ +#define HAVE_NDO_FDB_DEL_EXTACK +#define HAVE_NETIF_SET_TSO_MAX +#endif /* 5.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,0,0)) +#else /* >=6.0.0 */ +#define HAVE_FLOW_DISSECTOR_KEY_PPPOE +#endif /* 6.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,1,0)) +#else /* >=6.1.0 */ +#define HAVE_FLOW_DISSECTOR_KEY_L2TPV3 +#undef NEED_NETIF_NAPI_ADD_NO_WEIGHT +#define HAVE_TTY_TERMIOS_CONST_STRUCT +#endif /* 6.1.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,2,0)) +#else /* >=6.2.0 */ +#define HAVE_SET_NETDEV_DEVLINK_PORT +#undef HAVE_NDO_GET_DEVLINK_PORT +#endif /* 6.2.0 */ + +#endif /* _KCOMPAT_STD_DEFS_H_ */ diff --git a/drivers/net/ethernet/guangruntong/kcompat_ubuntu_defs.h b/drivers/net/ethernet/guangruntong/kcompat_ubuntu_defs.h new file mode 100755 index 0000000000000..9b84e9d0304b4 --- /dev/null +++ b/drivers/net/ethernet/guangruntong/kcompat_ubuntu_defs.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 1999 - 2023 Intel Corporation */ + +#ifndef _KCOMPAT_UBUNTU_DEFS_H_ +#define _KCOMPAT_UBUNTU_DEFS_H_ + +/* This file contains the definitions for the Ubuntu specific distribution of + * the Linux kernel. + * + * It checks the UBUNTU_VERSION_CODE to decide which features are available in + * the target kernel. It assumes that kcompat_std_defs.h has already been + * processed, and will #define or #undef the relevant flags based on what + * features were backported by Ubuntu. + */ + +#if !UTS_UBUNTU_RELEASE_ABI +#error "UTS_UBUNTU_RELEASE_ABI is 0 or undefined" +#endif + +#if !UBUNTU_VERSION_CODE +#error "UBUNTU_VERSION_CODE is 0 or undefined" +#endif + +#ifndef UBUNTU_VERSION +#error "UBUNTU_VERSION is undefined" +#endif + +/*****************************************************************************/ +#if (UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,15,0,159) && \ + UBUNTU_VERSION_CODE < UBUNTU_VERSION(4,15,0,999)) +#undef NEED_SKB_FRAG_OFF +#endif + +/*****************************************************************************/ +#endif /* _KCOMPAT_UBUNTU_DEFS_H_ */