aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-mesh8
-rw-r--r--Documentation/devicetree/bindings/net/can/atmel-can.txt14
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt16
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.txt91
-rw-r--r--Documentation/devicetree/bindings/net/marvell-orion-mdio.txt3
-rw-r--r--Documentation/networking/ip-sysctl.txt53
-rw-r--r--Documentation/networking/packet_mmap.txt327
-rw-r--r--Documentation/networking/stmmac.txt33
-rw-r--r--MAINTAINERS4
-rw-r--r--arch/alpha/include/uapi/asm/socket.h2
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi2
-rw-r--r--arch/arm/net/bpf_jit_32.c5
-rw-r--r--arch/arm/plat-orion/common.c54
-rw-r--r--arch/avr32/include/uapi/asm/socket.h2
-rw-r--r--arch/cris/include/uapi/asm/socket.h2
-rw-r--r--arch/frv/include/uapi/asm/socket.h2
-rw-r--r--arch/h8300/include/uapi/asm/socket.h2
-rw-r--r--arch/ia64/include/uapi/asm/socket.h2
-rw-r--r--arch/m32r/include/uapi/asm/socket.h2
-rw-r--r--arch/mips/include/uapi/asm/socket.h2
-rw-r--r--arch/mn10300/include/uapi/asm/socket.h2
-rw-r--r--arch/parisc/include/uapi/asm/socket.h2
-rw-r--r--arch/powerpc/include/uapi/asm/socket.h2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c12
-rw-r--r--arch/powerpc/platforms/chrp/pegasos_eth.c20
-rw-r--r--arch/powerpc/sysdev/mv64x60_dev.c16
-rw-r--r--arch/s390/include/uapi/asm/socket.h2
-rw-r--r--arch/sparc/include/uapi/asm/socket.h2
-rw-r--r--arch/sparc/net/bpf_jit_comp.c6
-rw-r--r--arch/x86/net/bpf_jit_comp.c9
-rw-r--r--arch/xtensa/include/uapi/asm/socket.h2
-rw-r--r--drivers/bcma/core.c8
-rw-r--r--drivers/bcma/main.c5
-rw-r--r--drivers/connector/cn_proc.c25
-rw-r--r--drivers/connector/connector.c12
-rw-r--r--drivers/dma/ioat/dca.c11
-rw-r--r--drivers/firewire/Kconfig6
-rw-r--r--drivers/firewire/net.c462
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c68
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c34
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h14
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c155
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c15
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c113
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h11
-rw-r--r--drivers/isdn/capi/capidrv.c3
-rw-r--r--drivers/isdn/divert/isdn_divert.c8
-rw-r--r--drivers/isdn/hisax/fsm.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/isdn/i4l/isdn_net.c2
-rw-r--r--drivers/media/dvb-core/dvb_net.c10
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/appletalk/Kconfig18
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/caif/Kconfig7
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/caif/caif_shm_u5500.c128
-rw-r--r--drivers/net/caif/caif_shmcore.c747
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/at91_can.c76
-rw-r--r--drivers/net/can/bfin_can.c4
-rw-r--r--drivers/net/can/mcp251x.c35
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c6
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c24
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/a2065.c1
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c1
-rw-r--r--drivers/net/ethernet/amd/ariadne.c1
-rw-r--r--drivers/net/ethernet/amd/atarilance.c6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/mvme147.c4
-rw-r--r--drivers/net/ethernet/amd/ni65.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c1
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c7
-rw-r--r--drivers/net/ethernet/amd/sunlance.c9
-rw-r--r--drivers/net/ethernet/apple/macmace.c16
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c73
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c84
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h47
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c327
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c127
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h87
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h252
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c226
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c278
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c79
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c351
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c77
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c107
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h9
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c389
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h18
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c5
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c77
-rw-r--r--drivers/net/ethernet/cadence/macb.c80
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h55
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c778
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c89
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c256
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h53
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h98
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c14
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c74
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c13
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c17
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c9
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c31
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c11
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c60
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c22
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c8
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c (renamed from drivers/net/ethernet/freescale/fec.c)53
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c159
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h8
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c3
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c8
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c9
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c131
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c38
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h27
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c240
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c408
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c37
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c306
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c62
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c134
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c104
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c7
-rw-r--r--drivers/net/ethernet/marvell/Kconfig5
-rw-r--r--drivers/net/ethernet/marvell/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c239
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c131
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c14
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c5
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c20
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c32
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c3
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c12
-rw-r--r--drivers/net/ethernet/neterion/s2io.c6
-rw-r--r--drivers/net/ethernet/netx-eth.c2
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c20
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c42
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c21
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c5
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c220
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h81
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c379
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h181
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c55
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c75
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c49
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c68
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c107
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h214
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c1297
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c1175
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c255
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c6
-rw-r--r--drivers/net/ethernet/rdc/r6040.c12
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/atp.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c397
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h220
-rw-r--r--drivers/net/ethernet/s6gmac.c16
-rw-r--r--drivers/net/ethernet/seeq/ether3.c22
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.c267
-rw-r--r--drivers/net/ethernet/sfc/efx.h14
-rw-r--r--drivers/net/ethernet/sfc/enum.h12
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon.c17
-rw-r--r--drivers/net/ethernet/sfc/filter.c249
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h1
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h97
-rw-r--r--drivers/net/ethernet/sfc/nic.c94
-rw-r--r--drivers/net/ethernet/sfc/ptp.c116
-rw-r--r--drivers/net/ethernet/sfc/rx.c793
-rw-r--r--drivers/net/ethernet/sfc/siena.c25
-rw-r--r--drivers/net/ethernet/sgi/meth.c5
-rw-r--r--drivers/net/ethernet/sis/sis900.c7
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c90
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h122
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h44
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h40
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c104
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c151
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c85
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c156
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c148
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c998
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c215
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h74
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c13
-rw-r--r--drivers/net/ethernet/sun/sunqe.c5
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c159
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c48
-rw-r--r--drivers/net/ethernet/ti/tlan.c4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c240
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c3
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c21
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c27
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c25
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c1
-rw-r--r--drivers/net/fddi/defxx.c9
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/ieee802154/at86rf230.c51
-rw-r--r--drivers/net/ieee802154/mrf24j40.c15
-rw-r--r--drivers/net/irda/ali-ircc.c6
-rw-r--r--drivers/net/irda/au1k_ir.c18
-rw-r--r--drivers/net/irda/bfin_sir.c3
-rw-r--r--drivers/net/irda/nsc-ircc.c6
-rw-r--r--drivers/net/irda/pxaficp_ir.c4
-rw-r--r--drivers/net/irda/smsc-ircc2.c17
-rw-r--r--drivers/net/irda/via-ircc.c6
-rw-r--r--drivers/net/irda/w83977af_ir.c7
-rw-r--r--drivers/net/macvlan.c19
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell.c127
-rw-r--r--drivers/net/phy/mdio-gpio.c12
-rw-r--r--drivers/net/phy/mdio-octeon.c13
-rw-r--r--drivers/net/phy/micrel.c41
-rw-r--r--drivers/net/phy/phy.c66
-rw-r--r--drivers/net/phy/vitesse.c3
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/ppp/ppp_synctty.c53
-rw-r--r--drivers/net/team/Kconfig12
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team.c25
-rw-r--r--drivers/net/team/team_mode_broadcast.c14
-rw-r--r--drivers/net/team/team_mode_random.c71
-rw-r--r--drivers/net/team/team_mode_roundrobin.c36
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c4
-rw-r--r--drivers/net/virtio_net.c4
-rw-r--r--drivers/net/vxlan.c399
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig9
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c15
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h3
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c72
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h11
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c21
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c15
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c116
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c41
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c16
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.c23
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h332
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c38
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h49
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c188
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c69
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile4
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c33
-rw-r--r--drivers/net/wireless/ath/wil6210/dbg_hexdump.h20
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c58
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c25
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c60
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c7
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h17
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c154
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h363
-rw-r--r--drivers/net/wireless/b43/Kconfig6
-rw-r--r--drivers/net/wireless/b43/b43.h6
-rw-r--r--drivers/net/wireless/b43/dma.c9
-rw-r--r--drivers/net/wireless/b43/main.c7
-rw-r--r--drivers/net/wireless/b43/phy_ht.c610
-rw-r--r--drivers/net/wireless/b43/phy_ht.h77
-rw-r--r--drivers/net/wireless/b43legacy/dma.c8
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c30
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c33
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c42
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h34
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c28
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c382
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h25
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c22
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h87
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c37
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/Makefile4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/led.c126
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/led.h36
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c11
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c25
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h27
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c4
-rw-r--r--drivers/net/wireless/iwlegacy/3945.h4
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c33
-rw-r--r--drivers/net/wireless/iwlegacy/common.c21
-rw-r--r--drivers/net/wireless/iwlegacy/common.h9
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig11
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c26
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/testmode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c (renamed from drivers/net/wireless/iwlwifi/pcie/1000.c)1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c (renamed from drivers/net/wireless/iwlwifi/pcie/2000.c)1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c (renamed from drivers/net/wireless/iwlwifi/pcie/5000.c)1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c (renamed from drivers/net/wireless/iwlwifi/pcie/6000.c)1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c (renamed from drivers/net/wireless/iwlwifi/pcie/7000.c)63
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h49
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c51
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/binding.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c347
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c260
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c138
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h319
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h53
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c45
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c57
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c91
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/cfg.h115
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c25
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c6
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c3
-rw-r--r--drivers/net/wireless/mwifiex/Makefile1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c218
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c2
-rw-r--r--drivers/net/wireless/mwifiex/ethtool.c70
-rw-r--r--drivers/net/wireless/mwifiex/fw.h72
-rw-r--r--drivers/net/wireless/mwifiex/init.c6
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h23
-rw-r--r--drivers/net/wireless/mwifiex/main.c15
-rw-r--r--drivers/net/wireless/mwifiex/main.h9
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c156
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c79
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c10
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c2
-rw-r--r--drivers/net/wireless/mwifiex/util.c5
-rw-r--r--drivers/net/wireless/mwl8k.c111
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/ray_cs.c8
-rw-r--r--drivers/net/wireless/rndis_wlan.c5
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig7
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h103
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c857
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c116
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c63
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c20
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c3
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c3
-rw-r--r--drivers/net/xen-netback/netback.c9
-rw-r--r--drivers/net/xen-netfront.c26
-rw-r--r--drivers/nfc/microread/mei.c2
-rw-r--r--drivers/ptp/ptp_pch.c29
-rw-r--r--drivers/s390/kvm/virtio_ccw.c6
-rw-r--r--drivers/scsi/csiostor/Makefile3
-rw-r--r--drivers/scsi/csiostor/csio_hw.c559
-rw-r--r--drivers/scsi/csiostor/csio_hw.h47
-rw-r--r--drivers/scsi/csiostor/csio_hw_chip.h175
-rw-r--r--drivers/scsi/csiostor/csio_hw_t4.c403
-rw-r--r--drivers/scsi/csiostor/csio_hw_t5.c397
-rw-r--r--drivers/scsi/csiostor/csio_init.c48
-rw-r--r--drivers/scsi/csiostor/csio_init.h29
-rw-r--r--drivers/scsi/csiostor/csio_lnode.h2
-rw-r--r--drivers/scsi/csiostor/csio_rnode.c10
-rw-r--r--drivers/scsi/csiostor/csio_rnode.h2
-rw-r--r--drivers/scsi/csiostor/csio_wr.c60
-rw-r--r--drivers/scsi/scsi_netlink.c4
-rw-r--r--drivers/scsi/scsi_transport_fc.c21
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c70
-rw-r--r--drivers/ssb/driver_chipcommon.c2
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c41
-rw-r--r--drivers/ssb/driver_mipscore.c25
-rw-r--r--drivers/ssb/driver_pcicore.c15
-rw-r--r--drivers/ssb/embedded.c5
-rw-r--r--drivers/ssb/main.c51
-rw-r--r--drivers/ssb/pci.c97
-rw-r--r--drivers/ssb/pcmcia.c46
-rw-r--r--drivers/ssb/scan.c31
-rw-r--r--drivers/ssb/sprom.c4
-rw-r--r--drivers/ssb/ssb_private.h19
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c12
-rw-r--r--include/linux/cn_proc.h4
-rw-r--r--include/linux/filter.h14
-rw-r--r--include/linux/ieee80211.h52
-rw-r--r--include/linux/if_arp.h12
-rw-r--r--include/linux/if_team.h25
-rw-r--r--include/linux/if_vlan.h2
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/mv643xx_eth.h1
-rw-r--r--include/linux/netdev_features.h9
-rw-r--r--include/linux/netdevice.h62
-rw-r--r--include/linux/of_net.h10
-rw-r--r--include/linux/openvswitch.h13
-rw-r--r--include/linux/phy.h10
-rw-r--r--include/linux/platform_data/cpsw.h2
-rw-r--r--include/linux/rtnetlink.h9
-rw-r--r--include/linux/sh_eth.h1
-rw-r--r--include/linux/skbuff.h52
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/ssb/ssb.h6
-rw-r--r--include/linux/tcp.h21
-rw-r--r--include/linux/virtio.h6
-rw-r--r--include/linux/vm_sockets.h23
-rw-r--r--include/net/caif/caif_shm.h26
-rw-r--r--include/net/cfg80211.h126
-rw-r--r--include/net/dn_fib.h28
-rw-r--r--include/net/firewire.h25
-rw-r--r--include/net/gre.h51
-rw-r--r--include/net/inet_connection_sock.h5
-rw-r--r--include/net/inet_frag.h14
-rw-r--r--include/net/ip6_tunnel.h21
-rw-r--r--include/net/ip_tunnels.h177
-rw-r--r--include/net/ip_vs.h2
-rw-r--r--include/net/ipip.h87
-rw-r--r--include/net/ipv6.h22
-rw-r--r--include/net/mac80211.h29
-rw-r--r--include/net/netfilter/nf_conntrack_core.h1
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/request_sock.h8
-rw-r--r--include/net/rtnetlink.h2
-rw-r--r--include/net/sock.h1
-rw-r--r--include/net/tcp.h145
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/linux/cn_proc.h10
-rw-r--r--include/uapi/linux/filter.h3
-rw-r--r--include/uapi/linux/if_ether.h3
-rw-r--r--include/uapi/linux/if_packet.h2
-rw-r--r--include/uapi/linux/neighbour.h3
-rw-r--r--include/uapi/linux/netlink.h2
-rw-r--r--include/uapi/linux/netlink_diag.h42
-rw-r--r--include/uapi/linux/nfc.h16
-rw-r--r--include/uapi/linux/nl80211.h117
-rw-r--r--include/uapi/linux/pkt_sched.h1
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--include/uapi/linux/tcp.h26
-rw-r--r--include/uapi/linux/vm_sockets.h23
-rw-r--r--kernel/audit.c10
-rw-r--r--kernel/signal.c2
-rw-r--r--net/Kconfig1
-rw-r--r--net/atm/lec.h2
-rw-r--r--net/batman-adv/Kconfig14
-rw-r--r--net/batman-adv/Makefile3
-rw-r--r--net/batman-adv/bat_iv_ogm.c5
-rw-r--r--net/batman-adv/debugfs.c18
-rw-r--r--net/batman-adv/distributed-arp-table.c22
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/hard-interface.c66
-rw-r--r--net/batman-adv/hard-interface.h13
-rw-r--r--net/batman-adv/main.c10
-rw-r--r--net/batman-adv/main.h15
-rw-r--r--net/batman-adv/network-coding.c1821
-rw-r--r--net/batman-adv/network-coding.h123
-rw-r--r--net/batman-adv/originator.c10
-rw-r--r--net/batman-adv/packet.h33
-rw-r--r--net/batman-adv/routing.c49
-rw-r--r--net/batman-adv/send.c5
-rw-r--r--net/batman-adv/soft-interface.c281
-rw-r--r--net/batman-adv/soft-interface.h3
-rw-r--r--net/batman-adv/sysfs.c22
-rw-r--r--net/batman-adv/translation-table.c29
-rw-r--r--net/batman-adv/types.h136
-rw-r--r--net/batman-adv/unicast.c6
-rw-r--r--net/batman-adv/vis.c4
-rw-r--r--net/bluetooth/af_bluetooth.c3
-rw-r--r--net/bluetooth/bnep/netdev.c2
-rw-r--r--net/bridge/br_fdb.c14
-rw-r--r--net/bridge/br_if.c1
-rw-r--r--net/bridge/br_mdb.c4
-rw-r--r--net/bridge/br_multicast.c2
-rw-r--r--net/bridge/br_netlink.c21
-rw-r--r--net/bridge/netfilter/ebt_ulog.c7
-rw-r--r--net/bridge/netfilter/ebtable_broute.c4
-rw-r--r--net/bridge/netfilter/ebtables.c2
-rw-r--r--net/caif/caif_dev.c9
-rw-r--r--net/caif/caif_socket.c22
-rw-r--r--net/caif/caif_usb.c4
-rw-r--r--net/caif/cfcnfg.c19
-rw-r--r--net/caif/cfctrl.c14
-rw-r--r--net/caif/cffrml.c4
-rw-r--r--net/caif/cfmuxl.c4
-rw-r--r--net/caif/cfpkt_skbuff.c8
-rw-r--r--net/caif/cfrfml.c4
-rw-r--r--net/caif/cfserl.c4
-rw-r--r--net/caif/cfsrvl.c13
-rw-r--r--net/caif/chnl_net.c6
-rw-r--r--net/can/af_can.c30
-rw-r--r--net/can/gw.c5
-rw-r--r--net/core/datagram.c4
-rw-r--r--net/core/dev.c80
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/filter.c5
-rw-r--r--net/core/flow.c42
-rw-r--r--net/core/flow_dissector.c68
-rw-r--r--net/core/neighbour.c6
-rw-r--r--net/core/net-procfs.c2
-rw-r--r--net/core/netpoll.c3
-rw-r--r--net/core/rtnetlink.c176
-rw-r--r--net/core/skbuff.c50
-rw-r--r--net/core/sock.c10
-rw-r--r--net/core/utils.c5
-rw-r--r--net/dcb/dcbevent.c1
-rw-r--r--net/dcb/dcbnl.c2
-rw-r--r--net/dccp/ipv4.c5
-rw-r--r--net/dccp/ipv6.c5
-rw-r--r--net/decnet/dn_dev.c4
-rw-r--r--net/decnet/dn_fib.c203
-rw-r--r--net/decnet/dn_route.c43
-rw-r--r--net/decnet/dn_table.c46
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c12
-rw-r--r--net/dsa/dsa.c233
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ieee802154/6lowpan.c138
-rw-r--r--net/ieee802154/6lowpan.h7
-rw-r--r--net/ieee802154/dgram.c10
-rw-r--r--net/ieee802154/netlink.c8
-rw-r--r--net/ipv4/Kconfig7
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c10
-rw-r--r--net/ipv4/arp.c27
-rw-r--r--net/ipv4/devinet.c83
-rw-r--r--net/ipv4/fib_frontend.c10
-rw-r--r--net/ipv4/gre.c5
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/inet_fragment.c27
-rw-r--r--net/ipv4/inet_lro.c5
-rw-r--r--net/ipv4/ip_fragment.c31
-rw-r--r--net/ipv4/ip_gre.c1517
-rw-r--r--net/ipv4/ip_tunnel.c1035
-rw-r--r--net/ipv4/ip_vti.c42
-rw-r--r--net/ipv4/ipconfig.c13
-rw-r--r--net/ipv4/ipip.c748
-rw-r--r--net/ipv4/ipmr.c12
-rw-r--r--net/ipv4/netfilter/arptable_filter.c4
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c4
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c18
-rw-r--r--net/ipv4/tcp.c268
-rw-r--r--net/ipv4/tcp_input.c606
-rw-r--r--net/ipv4/tcp_ipv4.c108
-rw-r--r--net/ipv4/tcp_minisocks.c44
-rw-r--r--net/ipv4/tcp_output.c367
-rw-r--r--net/ipv4/tcp_timer.c21
-rw-r--r--net/ipv4/tcp_westwood.c2
-rw-r--r--net/ipv4/udp.c115
-rw-r--r--net/ipv4/udp_diag.c6
-rw-r--r--net/ipv6/Kconfig2
-rw-r--r--net/ipv6/addrconf.c108
-rw-r--r--net/ipv6/addrlabel.c12
-rw-r--r--net/ipv6/af_inet6.c7
-rw-r--r--net/ipv6/datagram.c20
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/inet6_connection_sock.c10
-rw-r--r--net/ipv6/ip6_flowlabel.c11
-rw-r--r--net/ipv6/ip6_gre.c62
-rw-r--r--net/ipv6/ip6_offload.c4
-rw-r--r--net/ipv6/ip6_tunnel.c16
-rw-r--r--net/ipv6/ip6mr.c10
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter/ip6t_NPT.c11
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c8
-rw-r--r--net/ipv6/raw.c9
-rw-r--r--net/ipv6/reassembly.c23
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/ipv6/sit.c41
-rw-r--r--net/ipv6/syncookies.c3
-rw-r--r--net/ipv6/tcp_ipv6.c56
-rw-r--r--net/ipv6/udp.c13
-rw-r--r--net/ipv6/udp_offload.c8
-rw-r--r--net/iucv/af_iucv.c3
-rw-r--r--net/l2tp/l2tp_core.c2
-rw-r--r--net/mac80211/cfg.c151
-rw-r--r--net/mac80211/debugfs_sta.c31
-rw-r--r--net/mac80211/driver-ops.h7
-rw-r--r--net/mac80211/ht.c52
-rw-r--r--net/mac80211/ibss.c29
-rw-r--r--net/mac80211/ieee80211_i.h26
-rw-r--r--net/mac80211/iface.c14
-rw-r--r--net/mac80211/key.c103
-rw-r--r--net/mac80211/key.h5
-rw-r--r--net/mac80211/main.c55
-rw-r--r--net/mac80211/mesh.c59
-rw-r--r--net/mac80211/mesh.h12
-rw-r--r--net/mac80211/mesh_plink.c37
-rw-r--r--net/mac80211/mlme.c100
-rw-r--r--net/mac80211/offchannel.c2
-rw-r--r--net/mac80211/pm.c117
-rw-r--r--net/mac80211/rc80211_minstrel.c204
-rw-r--r--net/mac80211/rc80211_minstrel.h31
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c12
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c79
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h6
-rw-r--r--net/mac80211/rx.c61
-rw-r--r--net/mac80211/sta_info.c12
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/trace.h11
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/mac80211/util.c73
-rw-r--r--net/mac80211/vht.c212
-rw-r--r--net/mac802154/mac802154.h1
-rw-r--r--net/mac802154/mac_cmd.c1
-rw-r--r--net/mac802154/mib.c9
-rw-r--r--net/mac802154/wpan.c2
-rw-r--r--net/netfilter/ipset/ip_set_core.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c47
-rw-r--r--net/netfilter/nf_conntrack_netlink.c100
-rw-r--r--net/netfilter/nf_conntrack_standalone.c16
-rw-r--r--net/netfilter/nfnetlink.c7
-rw-r--r--net/netfilter/nfnetlink_log.c4
-rw-r--r--net/netfilter/nfnetlink_queue_core.c98
-rw-r--r--net/netlink/Kconfig10
-rw-r--r--net/netlink/Makefile3
-rw-r--r--net/netlink/af_netlink.c63
-rw-r--r--net/netlink/af_netlink.h62
-rw-r--r--net/netlink/diag.c188
-rw-r--r--net/nfc/llcp/commands.c205
-rw-r--r--net/nfc/llcp/llcp.c112
-rw-r--r--net/nfc/llcp/llcp.h36
-rw-r--r--net/nfc/llcp/sock.c133
-rw-r--r--net/nfc/netlink.c172
-rw-r--r--net/nfc/nfc.h14
-rw-r--r--net/openvswitch/datapath.c15
-rw-r--r--net/openvswitch/datapath.h2
-rw-r--r--net/openvswitch/flow.c6
-rw-r--r--net/openvswitch/vport-internal_dev.c13
-rw-r--r--net/openvswitch/vport.h4
-rw-r--r--net/packet/af_packet.c114
-rw-r--r--net/packet/internal.h3
-rw-r--r--net/phonet/pn_netlink.c4
-rw-r--r--net/rfkill/rfkill-regulator.c2
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/cls_api.c15
-rw-r--r--net/sched/sch_api.c44
-rw-r--r--net/sched/sch_htb.c31
-rw-r--r--net/sctp/socket.c3
-rw-r--r--net/tipc/netlink.c6
-rw-r--r--net/unix/af_unix.c4
-rw-r--r--net/vmw_vsock/vmci_transport.c16
-rw-r--r--net/vmw_vsock/vmci_transport.h3
-rw-r--r--net/wireless/ap.c62
-rw-r--r--net/wireless/core.c73
-rw-r--r--net/wireless/core.h24
-rw-r--r--net/wireless/mesh.c15
-rw-r--r--net/wireless/mlme.c230
-rw-r--r--net/wireless/nl80211.c1857
-rw-r--r--net/wireless/nl80211.h68
-rw-r--r--net/wireless/rdev-ops.h20
-rw-r--r--net/wireless/reg.c6
-rw-r--r--net/wireless/sme.c26
-rw-r--r--net/wireless/sysfs.c25
-rw-r--r--net/wireless/trace.h46
-rw-r--r--net/wireless/util.c2
-rw-r--r--net/xfrm/xfrm_policy.c23
-rw-r--r--security/selinux/hooks.c4
-rw-r--r--security/selinux/netlink.c3
-rw-r--r--tools/Makefile11
-rw-r--r--tools/net/Makefile15
-rw-r--r--tools/net/bpf_jit_disasm.c199
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/net/Makefile19
-rw-r--r--tools/testing/selftests/net/psock_fanout.c394
-rw-r--r--tools/testing/selftests/net/run_afpackettests16
-rw-r--r--tools/testing/selftests/net/run_netsocktests12
-rw-r--r--tools/testing/selftests/net/socket.c92
836 files changed, 32758 insertions, 15018 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-net-mesh b/Documentation/ABI/testing/sysfs-class-net-mesh
index bc41da61608d..bdcd8b4e38f2 100644
--- a/Documentation/ABI/testing/sysfs-class-net-mesh
+++ b/Documentation/ABI/testing/sysfs-class-net-mesh
@@ -67,6 +67,14 @@ Description:
Defines the penalty which will be applied to an
originator message's tq-field on every hop.
+What: /sys/class/net/<mesh_iface>/mesh/network_coding
+Date: Nov 2012
+Contact: Martin Hundeboll <martin@hundeboll.net>
+Description:
+ Controls whether Network Coding (using some magic
+ to send fewer wifi packets but still the same
+ content) is enabled or not.
+
What: /sys/class/net/<mesh_iface>/mesh/orig_interval
Date: May 2010
Contact: Marek Lindner <lindner_marek@yahoo.de>
diff --git a/Documentation/devicetree/bindings/net/can/atmel-can.txt b/Documentation/devicetree/bindings/net/can/atmel-can.txt
new file mode 100644
index 000000000000..72cf0c5daff4
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/atmel-can.txt
@@ -0,0 +1,14 @@
+* AT91 CAN *
+
+Required properties:
+ - compatible: Should be "atmel,at91sam9263-can" or "atmel,at91sam9x5-can"
+ - reg: Should contain CAN controller registers location and length
+ - interrupts: Should contain IRQ line for the CAN controller
+
+Example:
+
+ can0: can@f000c000 {
+ compatbile = "atmel,at91sam9x5-can";
+ reg = <0xf000c000 0x300>;
+ interrupts = <40 4 5>
+ };
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index ecfdf756d10f..4f2ca6b4a182 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -15,16 +15,22 @@ Required properties:
- mac_control : Specifies Default MAC control register content
for the specific platform
- slaves : Specifies number for slaves
-- cpts_active_slave : Specifies the slave to use for time stamping
+- active_slave : Specifies the slave to use for time stamping,
+ ethtool and SIOCGMIIPHY
- cpts_clock_mult : Numerator to convert input clock ticks into nanoseconds
- cpts_clock_shift : Denominator to convert input clock ticks into nanoseconds
-- phy_id : Specifies slave phy id
-- mac-address : Specifies slave MAC address
Optional properties:
- ti,hwmods : Must be "cpgmac0"
- no_bd_ram : Must be 0 or 1
- dual_emac : Specifies Switch to act as Dual EMAC
+
+Slave Properties:
+Required properties:
+- phy_id : Specifies slave phy id
+- mac-address : Specifies slave MAC address
+
+Optional properties:
- dual_emac_res_vlan : Specifies VID to be used to segregate the ports
Note: "ti,hwmods" field is used to fetch the base address and irq
@@ -47,7 +53,7 @@ Examples:
rx_descs = <64>;
mac_control = <0x20>;
slaves = <2>;
- cpts_active_slave = <0>;
+ active_slave = <0>;
cpts_clock_mult = <0x80000000>;
cpts_clock_shift = <29>;
cpsw_emac0: slave@0 {
@@ -73,7 +79,7 @@ Examples:
rx_descs = <64>;
mac_control = <0x20>;
slaves = <2>;
- cpts_active_slave = <0>;
+ active_slave = <0>;
cpts_clock_mult = <0x80000000>;
cpts_clock_shift = <29>;
cpsw_emac0: slave@0 {
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
new file mode 100644
index 000000000000..49f4f7ae3f51
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -0,0 +1,91 @@
+Marvell Distributed Switch Architecture Device Tree Bindings
+------------------------------------------------------------
+
+Required properties:
+- compatible : Should be "marvell,dsa"
+- #address-cells : Must be 2, first cell is the address on the MDIO bus
+ and second cell is the address in the switch tree.
+ Second cell is used only when cascading/chaining.
+- #size-cells : Must be 0
+- dsa,ethernet : Should be a phandle to a valid Ethernet device node
+- dsa,mii-bus : Should be a phandle to a valid MDIO bus device node
+
+Optionnal properties:
+- interrupts : property with a value describing the switch
+ interrupt number (not supported by the driver)
+
+A DSA node can contain multiple switch chips which are therefore child nodes of
+the parent DSA node. The maximum number of allowed child nodes is 4
+(DSA_MAX_SWITCHES).
+Each of these switch child nodes should have the following required properties:
+
+- reg : Describes the switch address on the MII bus
+- #address-cells : Must be 1
+- #size-cells : Must be 0
+
+A switch may have multiple "port" children nodes
+
+Each port children node must have the following mandatory properties:
+- reg : Describes the port address in the switch
+- label : Describes the label associated with this port, special
+ labels are "cpu" to indicate a CPU port and "dsa" to
+ indicate an uplink/downlink port.
+
+Note that a port labelled "dsa" will imply checking for the uplink phandle
+described below.
+
+Optionnal property:
+- link : Should be a phandle to another switch's DSA port.
+ This property is only used when switches are being
+ chained/cascaded together.
+
+Example:
+
+ dsa@0 {
+ compatible = "marvell,dsa";
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ interrupts = <10>;
+ dsa,ethernet = <&ethernet0>;
+ dsa,mii-bus = <&mii_bus0>;
+
+ switch@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <16 0>; /* MDIO address 16, switch 0 in tree */
+
+ port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "cpu";
+ };
+
+ switch0uplink: port@6 {
+ reg = <6>;
+ label = "dsa";
+ link = <&switch1uplink>;
+ };
+ };
+
+ switch@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <17 1>; /* MDIO address 17, switch 1 in tree */
+
+ switch1uplink: port@0 {
+ reg = <0>;
+ label = "dsa";
+ link = <&switch0uplink>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
index 34e7aafa321c..052b5f28a624 100644
--- a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
+++ b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
@@ -9,6 +9,9 @@ Required properties:
- compatible: "marvell,orion-mdio"
- reg: address and length of the SMI register
+Optional properties:
+- interrupts: interrupt line number for the SMI error/done interrupt
+
The child nodes of the MDIO driver are the individual PHY devices
connected to this MDIO bus. They must have a "reg" property given the
PHY address on the MDIO bus.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index dc2dc87d2557..f98ca633b528 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -29,7 +29,7 @@ route/max_size - INTEGER
neigh/default/gc_thresh1 - INTEGER
Minimum number of entries to keep. Garbage collector will not
purge entries if there are fewer than this number.
- Default: 256
+ Default: 128
neigh/default/gc_thresh3 - INTEGER
Maximum number of neighbor entries allowed. Increase this
@@ -175,14 +175,6 @@ tcp_congestion_control - STRING
is inherited.
[see setsockopt(listenfd, SOL_TCP, TCP_CONGESTION, "name" ...) ]
-tcp_cookie_size - INTEGER
- Default size of TCP Cookie Transactions (TCPCT) option, that may be
- overridden on a per socket basis by the TCPCT socket option.
- Values greater than the maximum (16) are interpreted as the maximum.
- Values greater than zero and less than the minimum (8) are interpreted
- as the minimum. Odd values are interpreted as the next even value.
- Default: 0 (off).
-
tcp_dsack - BOOLEAN
Allows TCP to send "duplicate" SACKs.
@@ -190,7 +182,9 @@ tcp_early_retrans - INTEGER
Enable Early Retransmit (ER), per RFC 5827. ER lowers the threshold
for triggering fast retransmit when the amount of outstanding data is
small and when no previously unsent data can be transmitted (such
- that limited transmit could be used).
+ that limited transmit could be used). Also controls the use of
+ Tail loss probe (TLP) that converts RTOs occuring due to tail
+ losses into fast recovery (draft-dukkipati-tcpm-tcp-loss-probe-01).
Possible values:
0 disables ER
1 enables ER
@@ -198,7 +192,9 @@ tcp_early_retrans - INTEGER
by a fourth of RTT. This mitigates connection falsely
recovers when network has a small degree of reordering
(less than 3 packets).
- Default: 2
+ 3 enables delayed ER and TLP.
+ 4 enables TLP only.
+ Default: 3
tcp_ecn - INTEGER
Control use of Explicit Congestion Notification (ECN) by TCP.
@@ -229,36 +225,13 @@ tcp_fin_timeout - INTEGER
Default: 60 seconds
tcp_frto - INTEGER
- Enables Forward RTO-Recovery (F-RTO) defined in RFC4138.
+ Enables Forward RTO-Recovery (F-RTO) defined in RFC5682.
F-RTO is an enhanced recovery algorithm for TCP retransmission
- timeouts. It is particularly beneficial in wireless environments
- where packet loss is typically due to random radio interference
- rather than intermediate router congestion. F-RTO is sender-side
- only modification. Therefore it does not require any support from
- the peer.
-
- If set to 1, basic version is enabled. 2 enables SACK enhanced
- F-RTO if flow uses SACK. The basic version can be used also when
- SACK is in use though scenario(s) with it exists where F-RTO
- interacts badly with the packet counting of the SACK enabled TCP
- flow.
-
-tcp_frto_response - INTEGER
- When F-RTO has detected that a TCP retransmission timeout was
- spurious (i.e, the timeout would have been avoided had TCP set a
- longer retransmission timeout), TCP has several options what to do
- next. Possible values are:
- 0 Rate halving based; a smooth and conservative response,
- results in halved cwnd and ssthresh after one RTT
- 1 Very conservative response; not recommended because even
- though being valid, it interacts poorly with the rest of
- Linux TCP, halves cwnd and ssthresh immediately
- 2 Aggressive response; undoes congestion control measures
- that are now known to be unnecessary (ignoring the
- possibility of a lost retransmission that would require
- TCP to be more cautious), cwnd and ssthresh are restored
- to the values prior timeout
- Default: 0 (rate halving based)
+ timeouts. It is particularly beneficial in networks where the
+ RTT fluctuates (e.g., wireless). F-RTO is sender-side only
+ modification. It does not require any support from the peer.
+
+ By default it's enabled with a non-zero value. 0 disables F-RTO.
tcp_keepalive_time - INTEGER
How often TCP sends out keepalive messages when keepalive is enabled.
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 94444b152fbc..65efb85e49de 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -685,6 +685,333 @@ int main(int argc, char **argp)
}
-------------------------------------------------------------------------------
++ AF_PACKET TPACKET_V3 example
+-------------------------------------------------------------------------------
+
+AF_PACKET's TPACKET_V3 ring buffer can be configured to use non-static frame
+sizes by doing it's own memory management. It is based on blocks where polling
+works on a per block basis instead of per ring as in TPACKET_V2 and predecessor.
+
+It is said that TPACKET_V3 brings the following benefits:
+ *) ~15 - 20% reduction in CPU-usage
+ *) ~20% increase in packet capture rate
+ *) ~2x increase in packet density
+ *) Port aggregation analysis
+ *) Non static frame size to capture entire packet payload
+
+So it seems to be a good candidate to be used with packet fanout.
+
+Minimal example code by Daniel Borkmann based on Chetan Loke's lolpcap (compile
+it with gcc -Wall -O2 blob.c, and try things like "./a.out eth0", etc.):
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <assert.h>
+#include <net/if.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <poll.h>
+#include <unistd.h>
+#include <signal.h>
+#include <inttypes.h>
+#include <sys/socket.h>
+#include <sys/mman.h>
+#include <linux/if_packet.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+
+#define BLOCK_SIZE (1 << 22)
+#define FRAME_SIZE 2048
+
+#define NUM_BLOCKS 64
+#define NUM_FRAMES ((BLOCK_SIZE * NUM_BLOCKS) / FRAME_SIZE)
+
+#define BLOCK_RETIRE_TOV_IN_MS 64
+#define BLOCK_PRIV_AREA_SZ 13
+
+#define ALIGN_8(x) (((x) + 8 - 1) & ~(8 - 1))
+
+#define BLOCK_STATUS(x) ((x)->h1.block_status)
+#define BLOCK_NUM_PKTS(x) ((x)->h1.num_pkts)
+#define BLOCK_O2FP(x) ((x)->h1.offset_to_first_pkt)
+#define BLOCK_LEN(x) ((x)->h1.blk_len)
+#define BLOCK_SNUM(x) ((x)->h1.seq_num)
+#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
+#define BLOCK_PRIV(x) ((void *) ((uint8_t *) (x) + BLOCK_O2PRIV(x)))
+#define BLOCK_HDR_LEN (ALIGN_8(sizeof(struct block_desc)))
+#define BLOCK_PLUS_PRIV(sz_pri) (BLOCK_HDR_LEN + ALIGN_8((sz_pri)))
+
+#ifndef likely
+# define likely(x) __builtin_expect(!!(x), 1)
+#endif
+#ifndef unlikely
+# define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
+
+struct block_desc {
+ uint32_t version;
+ uint32_t offset_to_priv;
+ struct tpacket_hdr_v1 h1;
+};
+
+struct ring {
+ struct iovec *rd;
+ uint8_t *map;
+ struct tpacket_req3 req;
+};
+
+static unsigned long packets_total = 0, bytes_total = 0;
+static sig_atomic_t sigint = 0;
+
+void sighandler(int num)
+{
+ sigint = 1;
+}
+
+static int setup_socket(struct ring *ring, char *netdev)
+{
+ int err, i, fd, v = TPACKET_V3;
+ struct sockaddr_ll ll;
+
+ fd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
+ if (fd < 0) {
+ perror("socket");
+ exit(1);
+ }
+
+ err = setsockopt(fd, SOL_PACKET, PACKET_VERSION, &v, sizeof(v));
+ if (err < 0) {
+ perror("setsockopt");
+ exit(1);
+ }
+
+ memset(&ring->req, 0, sizeof(ring->req));
+ ring->req.tp_block_size = BLOCK_SIZE;
+ ring->req.tp_frame_size = FRAME_SIZE;
+ ring->req.tp_block_nr = NUM_BLOCKS;
+ ring->req.tp_frame_nr = NUM_FRAMES;
+ ring->req.tp_retire_blk_tov = BLOCK_RETIRE_TOV_IN_MS;
+ ring->req.tp_sizeof_priv = BLOCK_PRIV_AREA_SZ;
+ ring->req.tp_feature_req_word |= TP_FT_REQ_FILL_RXHASH;
+
+ err = setsockopt(fd, SOL_PACKET, PACKET_RX_RING, &ring->req,
+ sizeof(ring->req));
+ if (err < 0) {
+ perror("setsockopt");
+ exit(1);
+ }
+
+ ring->map = mmap(NULL, ring->req.tp_block_size * ring->req.tp_block_nr,
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED,
+ fd, 0);
+ if (ring->map == MAP_FAILED) {
+ perror("mmap");
+ exit(1);
+ }
+
+ ring->rd = malloc(ring->req.tp_block_nr * sizeof(*ring->rd));
+ assert(ring->rd);
+ for (i = 0; i < ring->req.tp_block_nr; ++i) {
+ ring->rd[i].iov_base = ring->map + (i * ring->req.tp_block_size);
+ ring->rd[i].iov_len = ring->req.tp_block_size;
+ }
+
+ memset(&ll, 0, sizeof(ll));
+ ll.sll_family = PF_PACKET;
+ ll.sll_protocol = htons(ETH_P_ALL);
+ ll.sll_ifindex = if_nametoindex(netdev);
+ ll.sll_hatype = 0;
+ ll.sll_pkttype = 0;
+ ll.sll_halen = 0;
+
+ err = bind(fd, (struct sockaddr *) &ll, sizeof(ll));
+ if (err < 0) {
+ perror("bind");
+ exit(1);
+ }
+
+ return fd;
+}
+
+#ifdef __checked
+static uint64_t prev_block_seq_num = 0;
+
+void assert_block_seq_num(struct block_desc *pbd)
+{
+ if (unlikely(prev_block_seq_num + 1 != BLOCK_SNUM(pbd))) {
+ printf("prev_block_seq_num:%"PRIu64", expected seq:%"PRIu64" != "
+ "actual seq:%"PRIu64"\n", prev_block_seq_num,
+ prev_block_seq_num + 1, (uint64_t) BLOCK_SNUM(pbd));
+ exit(1);
+ }
+
+ prev_block_seq_num = BLOCK_SNUM(pbd);
+}
+
+static void assert_block_len(struct block_desc *pbd, uint32_t bytes, int block_num)
+{
+ if (BLOCK_NUM_PKTS(pbd)) {
+ if (unlikely(bytes != BLOCK_LEN(pbd))) {
+ printf("block:%u with %upackets, expected len:%u != actual len:%u\n",
+ block_num, BLOCK_NUM_PKTS(pbd), bytes, BLOCK_LEN(pbd));
+ exit(1);
+ }
+ } else {
+ if (unlikely(BLOCK_LEN(pbd) != BLOCK_PLUS_PRIV(BLOCK_PRIV_AREA_SZ))) {
+ printf("block:%u, expected len:%lu != actual len:%u\n",
+ block_num, BLOCK_HDR_LEN, BLOCK_LEN(pbd));
+ exit(1);
+ }
+ }
+}
+
+static void assert_block_header(struct block_desc *pbd, const int block_num)
+{
+ uint32_t block_status = BLOCK_STATUS(pbd);
+
+ if (unlikely((block_status & TP_STATUS_USER) == 0)) {
+ printf("block:%u, not in TP_STATUS_USER\n", block_num);
+ exit(1);
+ }
+
+ assert_block_seq_num(pbd);
+}
+#else
+static inline void assert_block_header(struct block_desc *pbd, const int block_num)
+{
+}
+static void assert_block_len(struct block_desc *pbd, uint32_t bytes, int block_num)
+{
+}
+#endif
+
+static void display(struct tpacket3_hdr *ppd)
+{
+ struct ethhdr *eth = (struct ethhdr *) ((uint8_t *) ppd + ppd->tp_mac);
+ struct iphdr *ip = (struct iphdr *) ((uint8_t *) eth + ETH_HLEN);
+
+ if (eth->h_proto == htons(ETH_P_IP)) {
+ struct sockaddr_in ss, sd;
+ char sbuff[NI_MAXHOST], dbuff[NI_MAXHOST];
+
+ memset(&ss, 0, sizeof(ss));
+ ss.sin_family = PF_INET;
+ ss.sin_addr.s_addr = ip->saddr;
+ getnameinfo((struct sockaddr *) &ss, sizeof(ss),
+ sbuff, sizeof(sbuff), NULL, 0, NI_NUMERICHOST);
+
+ memset(&sd, 0, sizeof(sd));
+ sd.sin_family = PF_INET;
+ sd.sin_addr.s_addr = ip->daddr;
+ getnameinfo((struct sockaddr *) &sd, sizeof(sd),
+ dbuff, sizeof(dbuff), NULL, 0, NI_NUMERICHOST);
+
+ printf("%s -> %s, ", sbuff, dbuff);
+ }
+
+ printf("rxhash: 0x%x\n", ppd->hv1.tp_rxhash);
+}
+
+static void walk_block(struct block_desc *pbd, const int block_num)
+{
+ int num_pkts = BLOCK_NUM_PKTS(pbd), i;
+ unsigned long bytes = 0;
+ unsigned long bytes_with_padding = BLOCK_PLUS_PRIV(BLOCK_PRIV_AREA_SZ);
+ struct tpacket3_hdr *ppd;
+
+ assert_block_header(pbd, block_num);
+
+ ppd = (struct tpacket3_hdr *) ((uint8_t *) pbd + BLOCK_O2FP(pbd));
+ for (i = 0; i < num_pkts; ++i) {
+ bytes += ppd->tp_snaplen;
+ if (ppd->tp_next_offset)
+ bytes_with_padding += ppd->tp_next_offset;
+ else
+ bytes_with_padding += ALIGN_8(ppd->tp_snaplen + ppd->tp_mac);
+
+ display(ppd);
+
+ ppd = (struct tpacket3_hdr *) ((uint8_t *) ppd + ppd->tp_next_offset);
+ __sync_synchronize();
+ }
+
+ assert_block_len(pbd, bytes_with_padding, block_num);
+
+ packets_total += num_pkts;
+ bytes_total += bytes;
+}
+
+void flush_block(struct block_desc *pbd)
+{
+ BLOCK_STATUS(pbd) = TP_STATUS_KERNEL;
+ __sync_synchronize();
+}
+
+static void teardown_socket(struct ring *ring, int fd)
+{
+ munmap(ring->map, ring->req.tp_block_size * ring->req.tp_block_nr);
+ free(ring->rd);
+ close(fd);
+}
+
+int main(int argc, char **argp)
+{
+ int fd, err;
+ socklen_t len;
+ struct ring ring;
+ struct pollfd pfd;
+ unsigned int block_num = 0;
+ struct block_desc *pbd;
+ struct tpacket_stats_v3 stats;
+
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s INTERFACE\n", argp[0]);
+ return EXIT_FAILURE;
+ }
+
+ signal(SIGINT, sighandler);
+
+ memset(&ring, 0, sizeof(ring));
+ fd = setup_socket(&ring, argp[argc - 1]);
+ assert(fd > 0);
+
+ memset(&pfd, 0, sizeof(pfd));
+ pfd.fd = fd;
+ pfd.events = POLLIN | POLLERR;
+ pfd.revents = 0;
+
+ while (likely(!sigint)) {
+ pbd = (struct block_desc *) ring.rd[block_num].iov_base;
+retry_block:
+ if ((BLOCK_STATUS(pbd) & TP_STATUS_USER) == 0) {
+ poll(&pfd, 1, -1);
+ goto retry_block;
+ }
+
+ walk_block(pbd, block_num);
+ flush_block(pbd);
+ block_num = (block_num + 1) % NUM_BLOCKS;
+ }
+
+ len = sizeof(stats);
+ err = getsockopt(fd, SOL_PACKET, PACKET_STATISTICS, &stats, &len);
+ if (err < 0) {
+ perror("getsockopt");
+ exit(1);
+ }
+
+ fflush(stdout);
+ printf("\nReceived %u packets, %lu bytes, %u dropped, freeze_q_cnt: %u\n",
+ stats.tp_packets, bytes_total, stats.tp_drops,
+ stats.tp_freeze_q_cnt);
+
+ teardown_socket(&ring, fd);
+ return 0;
+}
+
+-------------------------------------------------------------------------------
+ PACKET_TIMESTAMP
-------------------------------------------------------------------------------
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index f9fa6db40a52..8efe0b3c8b83 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -326,6 +326,35 @@ To enter in Tx LPI mode the driver needs to have a software timer
that enable and disable the LPI mode when there is nothing to be
transmitted.
-7) TODO:
+7) Extended descriptors
+The extended descriptors give us information about the receive Ethernet payload
+when it is carrying PTP packets or TCP/UDP/ICMP over IP.
+These are not available on GMAC Synopsys chips older than the 3.50.
+At probe time the driver will decide if these can be actually used.
+This support also is mandatory for PTPv2 because the extra descriptors 6 and 7
+are used for saving the hardware timestamps.
+
+8) Precision Time Protocol (PTP)
+The driver supports the IEEE 1588-2002, Precision Time Protocol (PTP),
+which enables precise synchronization of clocks in measurement and
+control systems implemented with technologies such as network
+communication.
+
+In addition to the basic timestamp features mentioned in IEEE 1588-2002
+Timestamps, new GMAC cores support the advanced timestamp features.
+IEEE 1588-2008 that can be enabled when configure the Kernel.
+
+9) SGMII/RGMII supports
+New GMAC devices provide own way to manage RGMII/SGMII.
+This information is available at run-time by looking at the
+HW capability register. This means that the stmmac can manage
+auto-negotiation and link status w/o using the PHYLIB stuff
+In fact, the HW provides a subset of extended registers to
+restart the ANE, verify Full/Half duplex mode and Speed.
+Also thanks to these registers it is possible to look at the
+Auto-negotiated Link Parter Ability.
+
+10) TODO:
o XGMAC is not supported.
- o Add the PTP - precision time protocol
+ o Complete the TBI & RTBI support.
+ o extened VLAN support for 3.70a SYNP GMAC.
diff --git a/MAINTAINERS b/MAINTAINERS
index 836a6183c37f..d3a888cfa0ea 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6330,6 +6330,7 @@ F: drivers/acpi/apei/erst.c
PTP HARDWARE CLOCK SUPPORT
M: Richard Cochran <richardcochran@gmail.com>
+L: netdev@vger.kernel.org
S: Maintained
W: http://linuxptp.sourceforge.net/
F: Documentation/ABI/testing/sysfs-ptp
@@ -6461,6 +6462,7 @@ S: Supported
F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER
+M: Shahed Shaikh <shahed.shaikh@qlogic.com>
M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
M: Ron Mercer <ron.mercer@qlogic.com>
M: linux-driver@qlogic.com
@@ -8515,7 +8517,7 @@ F: drivers/usb/gadget/*uvc*.c
F: drivers/usb/gadget/webcam.c
USB WIRELESS RNDIS DRIVER (rndis_wlan)
-M: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+M: Jussi Kivilinna <jussi.kivilinna@iki.fi>
L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/rndis_wlan.c
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index c5195524d1ef..eee6ea76bdaf 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -79,4 +79,6 @@
#define SO_LOCK_FILTER 44
+#define SO_SELECT_ERR_QUEUE 45
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 0957645b73af..91fe4f148f80 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -349,7 +349,7 @@
rx_descs = <64>;
mac_control = <0x20>;
slaves = <2>;
- cpts_active_slave = <0>;
+ active_slave = <0>;
cpts_clock_mult = <0x80000000>;
cpts_clock_shift = <29>;
reg = <0x4a100000 0x800
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index a0bd8a755bdf..1a643ee8e082 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -918,9 +918,8 @@ void bpf_jit_compile(struct sk_filter *fp)
#endif
if (bpf_jit_enable > 1)
- print_hex_dump(KERN_INFO, "BPF JIT code: ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx.target,
- alloc_size, false);
+ /* there are 2 passes here */
+ bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
fp->bpf_func = (void *)ctx.target;
out:
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 2d4b6414609f..251f827271e9 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -238,6 +238,7 @@ static __init void ge_complete(
struct mv643xx_eth_shared_platform_data *orion_ge_shared_data,
struct resource *orion_ge_resource, unsigned long irq,
struct platform_device *orion_ge_shared,
+ struct platform_device *orion_ge_mvmdio,
struct mv643xx_eth_platform_data *eth_data,
struct platform_device *orion_ge)
{
@@ -247,6 +248,8 @@ static __init void ge_complete(
orion_ge->dev.platform_data = eth_data;
platform_device_register(orion_ge_shared);
+ if (orion_ge_mvmdio)
+ platform_device_register(orion_ge_mvmdio);
platform_device_register(orion_ge);
}
@@ -258,8 +261,6 @@ struct mv643xx_eth_shared_platform_data orion_ge00_shared_data;
static struct resource orion_ge00_shared_resources[] = {
{
.name = "ge00 base",
- }, {
- .name = "ge00 err irq",
},
};
@@ -271,6 +272,19 @@ static struct platform_device orion_ge00_shared = {
},
};
+static struct resource orion_ge_mvmdio_resources[] = {
+ {
+ .name = "ge00 mvmdio base",
+ }, {
+ .name = "ge00 mvmdio err irq",
+ },
+};
+
+static struct platform_device orion_ge_mvmdio = {
+ .name = "orion-mdio",
+ .id = -1,
+};
+
static struct resource orion_ge00_resources[] = {
{
.name = "ge00 irq",
@@ -295,26 +309,25 @@ void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
unsigned int tx_csum_limit)
{
fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
- mapbase + 0x2000, SZ_16K - 1, irq_err);
+ mapbase + 0x2000, SZ_16K - 1, NO_IRQ);
+ fill_resources(&orion_ge_mvmdio, orion_ge_mvmdio_resources,
+ mapbase + 0x2004, 0x84 - 1, irq_err);
orion_ge00_shared_data.tx_csum_limit = tx_csum_limit;
ge_complete(&orion_ge00_shared_data,
orion_ge00_resources, irq, &orion_ge00_shared,
+ &orion_ge_mvmdio,
eth_data, &orion_ge00);
}
/*****************************************************************************
* GE01
****************************************************************************/
-struct mv643xx_eth_shared_platform_data orion_ge01_shared_data = {
- .shared_smi = &orion_ge00_shared,
-};
+struct mv643xx_eth_shared_platform_data orion_ge01_shared_data;
static struct resource orion_ge01_shared_resources[] = {
{
.name = "ge01 base",
- }, {
- .name = "ge01 err irq",
- },
+ }
};
static struct platform_device orion_ge01_shared = {
@@ -349,26 +362,23 @@ void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
unsigned int tx_csum_limit)
{
fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
- mapbase + 0x2000, SZ_16K - 1, irq_err);
+ mapbase + 0x2000, SZ_16K - 1, NO_IRQ);
orion_ge01_shared_data.tx_csum_limit = tx_csum_limit;
ge_complete(&orion_ge01_shared_data,
orion_ge01_resources, irq, &orion_ge01_shared,
+ NULL,
eth_data, &orion_ge01);
}
/*****************************************************************************
* GE10
****************************************************************************/
-struct mv643xx_eth_shared_platform_data orion_ge10_shared_data = {
- .shared_smi = &orion_ge00_shared,
-};
+struct mv643xx_eth_shared_platform_data orion_ge10_shared_data;
static struct resource orion_ge10_shared_resources[] = {
{
.name = "ge10 base",
- }, {
- .name = "ge10 err irq",
- },
+ }
};
static struct platform_device orion_ge10_shared = {
@@ -402,24 +412,21 @@ void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long irq_err)
{
fill_resources(&orion_ge10_shared, orion_ge10_shared_resources,
- mapbase + 0x2000, SZ_16K - 1, irq_err);
+ mapbase + 0x2000, SZ_16K - 1, NO_IRQ);
ge_complete(&orion_ge10_shared_data,
orion_ge10_resources, irq, &orion_ge10_shared,
+ NULL,
eth_data, &orion_ge10);
}
/*****************************************************************************
* GE11
****************************************************************************/
-struct mv643xx_eth_shared_platform_data orion_ge11_shared_data = {
- .shared_smi = &orion_ge00_shared,
-};
+struct mv643xx_eth_shared_platform_data orion_ge11_shared_data;
static struct resource orion_ge11_shared_resources[] = {
{
.name = "ge11 base",
- }, {
- .name = "ge11 err irq",
},
};
@@ -454,9 +461,10 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long irq_err)
{
fill_resources(&orion_ge11_shared, orion_ge11_shared_resources,
- mapbase + 0x2000, SZ_16K - 1, irq_err);
+ mapbase + 0x2000, SZ_16K - 1, NO_IRQ);
ge_complete(&orion_ge11_shared_data,
orion_ge11_resources, irq, &orion_ge11_shared,
+ NULL,
eth_data, &orion_ge11);
}
diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h
index 51c6401582ea..37401f535126 100644
--- a/arch/avr32/include/uapi/asm/socket.h
+++ b/arch/avr32/include/uapi/asm/socket.h
@@ -72,4 +72,6 @@
#define SO_LOCK_FILTER 44
+#define SO_SELECT_ERR_QUEUE 45
+
#endif /* __ASM_AVR32_SOCKET_H */
diff --git a/arch/cris/include/uapi/asm/socket.h b/arch/cris/include/uapi/asm/socket.h
index 50692b738c75..ba409c9947bc 100644
--- a/arch/cris/include/uapi/asm/socket.h
+++ b/arch/cris/include/uapi/asm/socket.h
@@ -74,6 +74,8 @@
#define SO_LOCK_FILTER 44
+#define SO_SELECT_ERR_QUEUE 45
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h
index 595391f0f98c..31dbb5d8e13d 100644
--- a/arch/frv/include/uapi/asm/socket.h
+++ b/arch/frv/include/uapi/asm/socket.h
@@ -72,5 +72,7 @@
#define SO_LOCK_FILTER 44
+#define SO_SELECT_ERR_QUEUE 45
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/h8300/include/uapi/asm/socket.h b/arch/h8300/include/uapi/asm/socket.h
index 43e32621da7d..5d1c6d0870e6 100644
--- a/arch/h8300/include/uapi/asm/socket.h
+++ b/arch/h8300/include/uapi/asm/socket.h
@@ -72,4 +72,6 @@
#define SO_LOCK_FILTER 44
+#define SO_SELECT_ERR_QUEUE 45
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index c567adc8bea5..6b4329f18b29 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -81,4 +81,6 @@
#define SO_LOCK_FILTER 44
+#define SO_SELECT_ERR_QUEUE 45
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h
index 519afa2755db..2a3b59e0e171 100644
--- a/arch/m32r/include/uapi/asm/socket.h
+++ b/arch/m32r/include/uapi/asm/socket.h
@@ -72,4 +72,6 @@
#define SO_LOCK_FILTER 44
+#define SO_SELECT_ERR_QUEUE 45
+
#endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 47132f44c955..3b211507be7f 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -90,4 +90,6 @@
#define SO_LOCK_FILTER 44
+#define SO_SELECT_ERR_QUEUE 45
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h
index 5c7c7c988544..b4ce844c9391 100644
--- a/arch/mn10300/include/uapi/asm/socket.h
+++ b/arch/mn10300/include/uapi/asm/socket.h
@@ -72,4 +72,6 @@
#define SO_LOCK_FILTER 44
+#define SO_SELECT_ERR_QUEUE 45
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 526e4b9aece0..70c512a386f7 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -71,6 +71,8 @@
#define SO_LOCK_FILTER 0x4025
+#define SO_SELECT_ERR_QUEUE 0x4026
+
/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
* have to define SOCK_NONBLOCK to a different value here.
*/
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index a26dcaece509..a36daf3c6f9a 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -79,4 +79,6 @@
#define SO_LOCK_FILTER 44
+#define SO_SELECT_ERR_QUEUE 45
+
#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index e834f1ec23c8..c427ae36374a 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -671,16 +671,12 @@ void bpf_jit_compile(struct sk_filter *fp)
}
if (bpf_jit_enable > 1)
- pr_info("flen=%d proglen=%u pass=%d image=%p\n",
- flen, proglen, pass, image);
+ /* Note that we output the base address of the code_base
+ * rather than image, since opcodes are in code_base.
+ */
+ bpf_jit_dump(flen, proglen, pass, code_base);
if (image) {
- if (bpf_jit_enable > 1)
- print_hex_dump(KERN_ERR, "JIT code: ",
- DUMP_PREFIX_ADDRESS,
- 16, 1, code_base,
- proglen, false);
-
bpf_flush_icache(code_base, code_base + (proglen/4));
/* Function descriptor nastiness: Address + TOC */
((u64 *)image)[0] = (u64)code_base;
diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c
index 039fc8e82199..2b4dc6abde6c 100644
--- a/arch/powerpc/platforms/chrp/pegasos_eth.c
+++ b/arch/powerpc/platforms/chrp/pegasos_eth.c
@@ -47,6 +47,25 @@ static struct platform_device mv643xx_eth_shared_device = {
.resource = mv643xx_eth_shared_resources,
};
+/*
+ * The orion mdio driver only covers shared + 0x4 up to shared + 0x84 - 1
+ */
+static struct resource mv643xx_eth_mvmdio_resources[] = {
+ [0] = {
+ .name = "ethernet mdio base",
+ .start = 0xf1000000 + MV643XX_ETH_SHARED_REGS + 0x4,
+ .end = 0xf1000000 + MV643XX_ETH_SHARED_REGS + 0x83,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device mv643xx_eth_mvmdio_device = {
+ .name = "orion-mdio",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(mv643xx_eth_mvmdio_resources),
+ .resource = mv643xx_eth_shared_resources,
+};
+
static struct resource mv643xx_eth_port1_resources[] = {
[0] = {
.name = "eth port1 irq",
@@ -82,6 +101,7 @@ static struct platform_device eth_port1_device = {
static struct platform_device *mv643xx_eth_pd_devs[] __initdata = {
&mv643xx_eth_shared_device,
+ &mv643xx_eth_mvmdio_device,
&eth_port1_device,
};
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
index 0f6af41ebb44..4a25c26f0bf4 100644
--- a/arch/powerpc/sysdev/mv64x60_dev.c
+++ b/arch/powerpc/sysdev/mv64x60_dev.c
@@ -214,15 +214,27 @@ static struct platform_device * __init mv64x60_eth_register_shared_pdev(
struct device_node *np, int id)
{
struct platform_device *pdev;
- struct resource r[1];
+ struct resource r[2];
int err;
err = of_address_to_resource(np, 0, &r[0]);
if (err)
return ERR_PTR(err);
+ /* register an orion mdio bus driver */
+ r[1].start = r[0].start + 0x4;
+ r[1].end = r[0].start + 0x84 - 1;
+ r[1].flags = IORESOURCE_MEM;
+
+ if (id == 0) {
+ pdev = platform_device_register_simple("orion-mdio", -1, &r[1], 1);
+ if (!pdev)
+ return pdev;
+ }
+
pdev = platform_device_register_simple(MV643XX_ETH_SHARED_NAME, id,
- r, 1);
+ &r[0], 1);
+
return pdev;
}
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index f99eea7fff0f..2dacb306835c 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -78,4 +78,6 @@
#define SO_LOCK_FILTER 44
+#define SO_SELECT_ERR_QUEUE 45
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index cbbad74b2e06..89f49b68a21c 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -68,6 +68,8 @@
#define SO_LOCK_FILTER 0x0028
+#define SO_SELECT_ERR_QUEUE 0x0029
+
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 3109ca684a99..d36a85ebb5e0 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -795,13 +795,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
}
if (bpf_jit_enable > 1)
- pr_err("flen=%d proglen=%u pass=%d image=%p\n",
- flen, proglen, pass, image);
+ bpf_jit_dump(flen, proglen, pass, image);
if (image) {
- if (bpf_jit_enable > 1)
- print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
- 16, 1, image, proglen, false);
bpf_flush_icache(image, image + proglen);
fp->bpf_func = (void *)image;
}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 3cbe45381bbb..f66b54086ce5 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -725,17 +725,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
}
oldproglen = proglen;
}
+
if (bpf_jit_enable > 1)
- pr_err("flen=%d proglen=%u pass=%d image=%p\n",
- flen, proglen, pass, image);
+ bpf_jit_dump(flen, proglen, pass, image);
if (image) {
- if (bpf_jit_enable > 1)
- print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
- 16, 1, image, proglen, false);
-
bpf_flush_icache(image, image + proglen);
-
fp->bpf_func = (void *)image;
}
out:
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index 35905cb6e419..a8f44f50e651 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -83,4 +83,6 @@
#define SO_LOCK_FILTER 44
+#define SO_SELECT_ERR_QUEUE 45
+
#endif /* _XTENSA_SOCKET_H */
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
index 03bbe104338f..17b26ce7e051 100644
--- a/drivers/bcma/core.c
+++ b/drivers/bcma/core.c
@@ -104,7 +104,13 @@ void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
if (i)
bcma_err(core->bus, "PLL enable timeout\n");
} else {
- bcma_warn(core->bus, "Disabling PLL not supported yet!\n");
+ /*
+ * Mask the PLL but don't wait for it to be disabled. PLL may be
+ * shared between cores and will be still up if there is another
+ * core using it.
+ */
+ bcma_mask32(core, BCMA_CLKCTLST, ~req);
+ bcma_read32(core, BCMA_CLKCTLST);
}
}
EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 9a6188add590..f72f52b4b1dd 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -120,6 +120,11 @@ static int bcma_register_cores(struct bcma_bus *bus)
continue;
}
+ /* Only first GMAC core on BCM4706 is connected and working */
+ if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
+ core->core_unit > 0)
+ continue;
+
core->dev.release = bcma_release_core_dev;
core->dev.bus = &bcma_bus_type;
dev_set_name(&core->dev, "bcma%d:%d", bus->num, dev_id);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 1110478dd0fd..08ae128cce9b 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -232,6 +232,31 @@ void proc_comm_connector(struct task_struct *task)
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
+void proc_coredump_connector(struct task_struct *task)
+{
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ __u8 buffer[CN_PROC_MSG_SIZE];
+ struct timespec ts;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->what = PROC_EVENT_COREDUMP;
+ ev->event_data.coredump.process_pid = task->pid;
+ ev->event_data.coredump.process_tgid = task->tgid;
+
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+}
+
void proc_exit_connector(struct task_struct *task)
{
struct cn_msg *msg;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index f1b7e244bfc1..6ecfa758942c 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/list.h>
#include <linux/skbuff.h>
-#include <linux/netlink.h>
+#include <net/netlink.h>
#include <linux/moduleparam.h>
#include <linux/connector.h>
#include <linux/slab.h>
@@ -95,13 +95,13 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
if (!netlink_has_listeners(dev->nls, group))
return -ESRCH;
- size = NLMSG_SPACE(sizeof(*msg) + msg->len);
+ size = sizeof(*msg) + msg->len;
- skb = alloc_skb(size, gfp_mask);
+ skb = nlmsg_new(size, gfp_mask);
if (!skb)
return -ENOMEM;
- nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh), 0);
+ nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
if (!nlh) {
kfree_skb(skb);
return -EMSGSIZE;
@@ -124,7 +124,7 @@ static int cn_call_callback(struct sk_buff *skb)
{
struct cn_callback_entry *i, *cbq = NULL;
struct cn_dev *dev = &cdev;
- struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb));
+ struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
int err = -ENODEV;
@@ -162,7 +162,7 @@ static void cn_rx_skb(struct sk_buff *__skb)
skb = skb_get(__skb);
- if (skb->len >= NLMSG_SPACE(0)) {
+ if (skb->len >= NLMSG_HDRLEN) {
nlh = nlmsg_hdr(skb);
if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 9b041858d10d..9e84d5bc9307 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -470,8 +470,10 @@ struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
}
if (!dca2_tag_map_valid(ioatdca->tag_map)) {
- dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, "
- "disabling DCA\n");
+ WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
+ "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
+ dev_driver_string(&pdev->dev),
+ dev_name(&pdev->dev));
free_dca_provider(dca);
return NULL;
}
@@ -689,7 +691,10 @@ struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
}
if (dca3_tag_map_invalid(ioatdca->tag_map)) {
- dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n");
+ WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
+ "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
+ dev_driver_string(&pdev->dev),
+ dev_name(&pdev->dev));
free_dca_provider(dca);
return NULL;
}
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 7224533e8ca6..7a701a58bbf0 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -47,9 +47,9 @@ config FIREWIRE_NET
tristate "IP networking over 1394"
depends on FIREWIRE && INET
help
- This enables IPv4 over IEEE 1394, providing IP connectivity with
- other implementations of RFC 2734 as found on several operating
- systems. Multicast support is currently limited.
+ This enables IPv4/IPv6 over IEEE 1394, providing IP connectivity
+ with other implementations of RFC 2734/3146 as found on several
+ operating systems. Multicast support is currently limited.
To compile this driver as a module, say M here: The module will be
called firewire-net.
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 2b27bff2591a..4d565365e476 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -1,5 +1,6 @@
/*
* IPv4 over IEEE 1394, per RFC 2734
+ * IPv6 over IEEE 1394, per RFC 3146
*
* Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com>
*
@@ -28,6 +29,7 @@
#include <asm/unaligned.h>
#include <net/arp.h>
+#include <net/firewire.h>
/* rx limits */
#define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */
@@ -45,6 +47,7 @@
#define IANA_SPECIFIER_ID 0x00005eU
#define RFC2734_SW_VERSION 0x000001U
+#define RFC3146_SW_VERSION 0x000002U
#define IEEE1394_GASP_HDR_SIZE 8
@@ -57,32 +60,10 @@
#define RFC2374_HDR_LASTFRAG 2 /* last fragment */
#define RFC2374_HDR_INTFRAG 3 /* interior fragment */
-#define RFC2734_HW_ADDR_LEN 16
-
-struct rfc2734_arp {
- __be16 hw_type; /* 0x0018 */
- __be16 proto_type; /* 0x0806 */
- u8 hw_addr_len; /* 16 */
- u8 ip_addr_len; /* 4 */
- __be16 opcode; /* ARP Opcode */
- /* Above is exactly the same format as struct arphdr */
-
- __be64 s_uniq_id; /* Sender's 64bit EUI */
- u8 max_rec; /* Sender's max packet size */
- u8 sspd; /* Sender's max speed */
- __be16 fifo_hi; /* hi 16bits of sender's FIFO addr */
- __be32 fifo_lo; /* lo 32bits of sender's FIFO addr */
- __be32 sip; /* Sender's IP Address */
- __be32 tip; /* IP Address of requested hw addr */
-} __packed;
-
-/* This header format is specific to this driver implementation. */
-#define FWNET_ALEN 8
-#define FWNET_HLEN 10
-struct fwnet_header {
- u8 h_dest[FWNET_ALEN]; /* destination address */
- __be16 h_proto; /* packet type ID field */
-} __packed;
+static bool fwnet_hwaddr_is_multicast(u8 *ha)
+{
+ return !!(*ha & 1);
+}
/* IPv4 and IPv6 encapsulation header */
struct rfc2734_header {
@@ -191,8 +172,6 @@ struct fwnet_peer {
struct list_head peer_link;
struct fwnet_device *dev;
u64 guid;
- u64 fifo;
- __be32 ip;
/* guarded by dev->lock */
struct list_head pd_list; /* received partial datagrams */
@@ -222,6 +201,15 @@ struct fwnet_packet_task {
};
/*
+ * Get fifo address embedded in hwaddr
+ */
+static __u64 fwnet_hwaddr_fifo(union fwnet_hwaddr *ha)
+{
+ return (u64)get_unaligned_be16(&ha->uc.fifo_hi) << 32
+ | get_unaligned_be32(&ha->uc.fifo_lo);
+}
+
+/*
* saddr == NULL means use device source address.
* daddr == NULL means leave destination address (eg unresolved arp).
*/
@@ -513,10 +501,20 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
bool is_broadcast, u16 ether_type)
{
struct fwnet_device *dev;
- static const __be64 broadcast_hw = cpu_to_be64(~0ULL);
int status;
__be64 guid;
+ switch (ether_type) {
+ case ETH_P_ARP:
+ case ETH_P_IP:
+#if IS_ENABLED(CONFIG_IPV6)
+ case ETH_P_IPV6:
+#endif
+ break;
+ default:
+ goto err;
+ }
+
dev = netdev_priv(net);
/* Write metadata, and then pass to the receive level */
skb->dev = net;
@@ -524,92 +522,11 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
/*
* Parse the encapsulation header. This actually does the job of
- * converting to an ethernet frame header, as well as arp
- * conversion if needed. ARP conversion is easier in this
- * direction, since we are using ethernet as our backend.
+ * converting to an ethernet-like pseudo frame header.
*/
- /*
- * If this is an ARP packet, convert it. First, we want to make
- * use of some of the fields, since they tell us a little bit
- * about the sending machine.
- */
- if (ether_type == ETH_P_ARP) {
- struct rfc2734_arp *arp1394;
- struct arphdr *arp;
- unsigned char *arp_ptr;
- u64 fifo_addr;
- u64 peer_guid;
- unsigned sspd;
- u16 max_payload;
- struct fwnet_peer *peer;
- unsigned long flags;
-
- arp1394 = (struct rfc2734_arp *)skb->data;
- arp = (struct arphdr *)skb->data;
- arp_ptr = (unsigned char *)(arp + 1);
- peer_guid = get_unaligned_be64(&arp1394->s_uniq_id);
- fifo_addr = (u64)get_unaligned_be16(&arp1394->fifo_hi) << 32
- | get_unaligned_be32(&arp1394->fifo_lo);
-
- sspd = arp1394->sspd;
- /* Sanity check. OS X 10.3 PPC reportedly sends 131. */
- if (sspd > SCODE_3200) {
- dev_notice(&net->dev, "sspd %x out of range\n", sspd);
- sspd = SCODE_3200;
- }
- max_payload = fwnet_max_payload(arp1394->max_rec, sspd);
-
- spin_lock_irqsave(&dev->lock, flags);
- peer = fwnet_peer_find_by_guid(dev, peer_guid);
- if (peer) {
- peer->fifo = fifo_addr;
-
- if (peer->speed > sspd)
- peer->speed = sspd;
- if (peer->max_payload > max_payload)
- peer->max_payload = max_payload;
-
- peer->ip = arp1394->sip;
- }
- spin_unlock_irqrestore(&dev->lock, flags);
-
- if (!peer) {
- dev_notice(&net->dev,
- "no peer for ARP packet from %016llx\n",
- (unsigned long long)peer_guid);
- goto no_peer;
- }
-
- /*
- * Now that we're done with the 1394 specific stuff, we'll
- * need to alter some of the data. Believe it or not, all
- * that needs to be done is sender_IP_address needs to be
- * moved, the destination hardware address get stuffed
- * in and the hardware address length set to 8.
- *
- * IMPORTANT: The code below overwrites 1394 specific data
- * needed above so keep the munging of the data for the
- * higher level IP stack last.
- */
-
- arp->ar_hln = 8;
- /* skip over sender unique id */
- arp_ptr += arp->ar_hln;
- /* move sender IP addr */
- put_unaligned(arp1394->sip, (u32 *)arp_ptr);
- /* skip over sender IP addr */
- arp_ptr += arp->ar_pln;
-
- if (arp->ar_op == htons(ARPOP_REQUEST))
- memset(arp_ptr, 0, sizeof(u64));
- else
- memcpy(arp_ptr, net->dev_addr, sizeof(u64));
- }
-
- /* Now add the ethernet header. */
guid = cpu_to_be64(dev->card->guid);
if (dev_hard_header(skb, net, ether_type,
- is_broadcast ? &broadcast_hw : &guid,
+ is_broadcast ? net->broadcast : net->dev_addr,
NULL, skb->len) >= 0) {
struct fwnet_header *eth;
u16 *rawp;
@@ -618,7 +535,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
skb_reset_mac_header(skb);
skb_pull(skb, sizeof(*eth));
eth = (struct fwnet_header *)skb_mac_header(skb);
- if (*eth->h_dest & 1) {
+ if (fwnet_hwaddr_is_multicast(eth->h_dest)) {
if (memcmp(eth->h_dest, net->broadcast,
net->addr_len) == 0)
skb->pkt_type = PACKET_BROADCAST;
@@ -630,7 +547,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
if (memcmp(eth->h_dest, net->dev_addr, net->addr_len))
skb->pkt_type = PACKET_OTHERHOST;
}
- if (ntohs(eth->h_proto) >= 1536) {
+ if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
protocol = eth->h_proto;
} else {
rawp = (u16 *)skb->data;
@@ -652,7 +569,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
return 0;
- no_peer:
+ err:
net->stats.rx_errors++;
net->stats.rx_dropped++;
@@ -856,7 +773,12 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
- if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
+ if (specifier_id == IANA_SPECIFIER_ID &&
+ (ver == RFC2734_SW_VERSION
+#if IS_ENABLED(CONFIG_IPV6)
+ || ver == RFC3146_SW_VERSION
+#endif
+ )) {
buf_ptr += 2;
length -= IEEE1394_GASP_HDR_SIZE;
fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
@@ -1059,16 +981,27 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
u8 *p;
int generation;
int node_id;
+ unsigned int sw_version;
/* ptask->generation may not have been set yet */
generation = dev->card->generation;
smp_rmb();
node_id = dev->card->node_id;
+ switch (ptask->skb->protocol) {
+ default:
+ sw_version = RFC2734_SW_VERSION;
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ sw_version = RFC3146_SW_VERSION;
+#endif
+ }
+
p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
- | RFC2734_SW_VERSION, &p[4]);
+ | sw_version, &p[4]);
/* We should not transmit if broadcast_channel.valid == 0. */
fw_send_request(dev->card, &ptask->transaction,
@@ -1116,6 +1049,62 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
return 0;
}
+static void fwnet_fifo_stop(struct fwnet_device *dev)
+{
+ if (dev->local_fifo == FWNET_NO_FIFO_ADDR)
+ return;
+
+ fw_core_remove_address_handler(&dev->handler);
+ dev->local_fifo = FWNET_NO_FIFO_ADDR;
+}
+
+static int fwnet_fifo_start(struct fwnet_device *dev)
+{
+ int retval;
+
+ if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
+ return 0;
+
+ dev->handler.length = 4096;
+ dev->handler.address_callback = fwnet_receive_packet;
+ dev->handler.callback_data = dev;
+
+ retval = fw_core_add_address_handler(&dev->handler,
+ &fw_high_memory_region);
+ if (retval < 0)
+ return retval;
+
+ dev->local_fifo = dev->handler.offset;
+
+ return 0;
+}
+
+static void __fwnet_broadcast_stop(struct fwnet_device *dev)
+{
+ unsigned u;
+
+ if (dev->broadcast_state != FWNET_BROADCAST_ERROR) {
+ for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++)
+ kunmap(dev->broadcast_rcv_buffer.pages[u]);
+ fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
+ }
+ if (dev->broadcast_rcv_context) {
+ fw_iso_context_destroy(dev->broadcast_rcv_context);
+ dev->broadcast_rcv_context = NULL;
+ }
+ kfree(dev->broadcast_rcv_buffer_ptrs);
+ dev->broadcast_rcv_buffer_ptrs = NULL;
+ dev->broadcast_state = FWNET_BROADCAST_ERROR;
+}
+
+static void fwnet_broadcast_stop(struct fwnet_device *dev)
+{
+ if (dev->broadcast_state == FWNET_BROADCAST_ERROR)
+ return;
+ fw_iso_context_stop(dev->broadcast_rcv_context);
+ __fwnet_broadcast_stop(dev);
+}
+
static int fwnet_broadcast_start(struct fwnet_device *dev)
{
struct fw_iso_context *context;
@@ -1124,60 +1113,47 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
unsigned max_receive;
struct fw_iso_packet packet;
unsigned long offset;
+ void **ptrptr;
unsigned u;
- if (dev->local_fifo == FWNET_NO_FIFO_ADDR) {
- dev->handler.length = 4096;
- dev->handler.address_callback = fwnet_receive_packet;
- dev->handler.callback_data = dev;
-
- retval = fw_core_add_address_handler(&dev->handler,
- &fw_high_memory_region);
- if (retval < 0)
- goto failed_initial;
-
- dev->local_fifo = dev->handler.offset;
- }
+ if (dev->broadcast_state != FWNET_BROADCAST_ERROR)
+ return 0;
max_receive = 1U << (dev->card->max_receive + 1);
num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive;
- if (!dev->broadcast_rcv_context) {
- void **ptrptr;
-
- context = fw_iso_context_create(dev->card,
- FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL,
- dev->card->link_speed, 8, fwnet_receive_broadcast, dev);
- if (IS_ERR(context)) {
- retval = PTR_ERR(context);
- goto failed_context_create;
- }
+ ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL);
+ if (!ptrptr) {
+ retval = -ENOMEM;
+ goto failed;
+ }
+ dev->broadcast_rcv_buffer_ptrs = ptrptr;
+
+ context = fw_iso_context_create(dev->card, FW_ISO_CONTEXT_RECEIVE,
+ IEEE1394_BROADCAST_CHANNEL,
+ dev->card->link_speed, 8,
+ fwnet_receive_broadcast, dev);
+ if (IS_ERR(context)) {
+ retval = PTR_ERR(context);
+ goto failed;
+ }
- retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer,
- dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
- if (retval < 0)
- goto failed_buffer_init;
+ retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, dev->card,
+ FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
+ if (retval < 0)
+ goto failed;
- ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL);
- if (!ptrptr) {
- retval = -ENOMEM;
- goto failed_ptrs_alloc;
- }
+ dev->broadcast_state = FWNET_BROADCAST_STOPPED;
- dev->broadcast_rcv_buffer_ptrs = ptrptr;
- for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
- void *ptr;
- unsigned v;
+ for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
+ void *ptr;
+ unsigned v;
- ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
- for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
- *ptrptr++ = (void *)
- ((char *)ptr + v * max_receive);
- }
- dev->broadcast_rcv_context = context;
- } else {
- context = dev->broadcast_rcv_context;
+ ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
+ for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
+ *ptrptr++ = (void *) ((char *)ptr + v * max_receive);
}
+ dev->broadcast_rcv_context = context;
packet.payload_length = max_receive;
packet.interrupt = 1;
@@ -1191,7 +1167,7 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
retval = fw_iso_context_queue(context, &packet,
&dev->broadcast_rcv_buffer, offset);
if (retval < 0)
- goto failed_rcv_queue;
+ goto failed;
offset += max_receive;
}
@@ -1201,7 +1177,7 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
retval = fw_iso_context_start(context, -1, 0,
FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */
if (retval < 0)
- goto failed_rcv_queue;
+ goto failed;
/* FIXME: adjust it according to the min. speed of all known peers? */
dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100
@@ -1210,19 +1186,8 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
return 0;
- failed_rcv_queue:
- kfree(dev->broadcast_rcv_buffer_ptrs);
- dev->broadcast_rcv_buffer_ptrs = NULL;
- failed_ptrs_alloc:
- fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
- failed_buffer_init:
- fw_iso_context_destroy(context);
- dev->broadcast_rcv_context = NULL;
- failed_context_create:
- fw_core_remove_address_handler(&dev->handler);
- failed_initial:
- dev->local_fifo = FWNET_NO_FIFO_ADDR;
-
+ failed:
+ __fwnet_broadcast_stop(dev);
return retval;
}
@@ -1240,11 +1205,10 @@ static int fwnet_open(struct net_device *net)
struct fwnet_device *dev = netdev_priv(net);
int ret;
- if (dev->broadcast_state == FWNET_BROADCAST_ERROR) {
- ret = fwnet_broadcast_start(dev);
- if (ret)
- return ret;
- }
+ ret = fwnet_broadcast_start(dev);
+ if (ret)
+ return ret;
+
netif_start_queue(net);
spin_lock_irq(&dev->lock);
@@ -1257,9 +1221,10 @@ static int fwnet_open(struct net_device *net)
/* ifdown */
static int fwnet_stop(struct net_device *net)
{
- netif_stop_queue(net);
+ struct fwnet_device *dev = netdev_priv(net);
- /* Deallocate iso context for use by other applications? */
+ netif_stop_queue(net);
+ fwnet_broadcast_stop(dev);
return 0;
}
@@ -1299,19 +1264,27 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
* We might need to rebuild the header on tx failure.
*/
memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
- skb_pull(skb, sizeof(hdr_buf));
-
proto = hdr_buf.h_proto;
+
+ switch (proto) {
+ case htons(ETH_P_ARP):
+ case htons(ETH_P_IP):
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+#endif
+ break;
+ default:
+ goto fail;
+ }
+
+ skb_pull(skb, sizeof(hdr_buf));
dg_size = skb->len;
/*
* Set the transmission type for the packet. ARP packets and IP
* broadcast packets are sent via GASP.
*/
- if (memcmp(hdr_buf.h_dest, net->broadcast, FWNET_ALEN) == 0
- || proto == htons(ETH_P_ARP)
- || (proto == htons(ETH_P_IP)
- && IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) {
+ if (fwnet_hwaddr_is_multicast(hdr_buf.h_dest)) {
max_payload = dev->broadcast_xmt_max_payload;
datagram_label_ptr = &dev->broadcast_xmt_datagramlabel;
@@ -1320,11 +1293,12 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
ptask->dest_node = IEEE1394_ALL_NODES;
ptask->speed = SCODE_100;
} else {
- __be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest);
+ union fwnet_hwaddr *ha = (union fwnet_hwaddr *)hdr_buf.h_dest;
+ __be64 guid = get_unaligned(&ha->uc.uniq_id);
u8 generation;
peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
- if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR)
+ if (!peer)
goto fail;
generation = peer->generation;
@@ -1332,32 +1306,12 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
max_payload = peer->max_payload;
datagram_label_ptr = &peer->datagram_label;
- ptask->fifo_addr = peer->fifo;
+ ptask->fifo_addr = fwnet_hwaddr_fifo(ha);
ptask->generation = generation;
ptask->dest_node = dest_node;
ptask->speed = peer->speed;
}
- /* If this is an ARP packet, convert it */
- if (proto == htons(ETH_P_ARP)) {
- struct arphdr *arp = (struct arphdr *)skb->data;
- unsigned char *arp_ptr = (unsigned char *)(arp + 1);
- struct rfc2734_arp *arp1394 = (struct rfc2734_arp *)skb->data;
- __be32 ipaddr;
-
- ipaddr = get_unaligned((__be32 *)(arp_ptr + FWNET_ALEN));
-
- arp1394->hw_addr_len = RFC2734_HW_ADDR_LEN;
- arp1394->max_rec = dev->card->max_receive;
- arp1394->sspd = dev->card->link_speed;
-
- put_unaligned_be16(dev->local_fifo >> 32,
- &arp1394->fifo_hi);
- put_unaligned_be32(dev->local_fifo & 0xffffffff,
- &arp1394->fifo_lo);
- put_unaligned(ipaddr, &arp1394->sip);
- }
-
ptask->hdr.w0 = 0;
ptask->hdr.w1 = 0;
ptask->skb = skb;
@@ -1472,8 +1426,6 @@ static int fwnet_add_peer(struct fwnet_device *dev,
peer->dev = dev;
peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
- peer->fifo = FWNET_NO_FIFO_ADDR;
- peer->ip = 0;
INIT_LIST_HEAD(&peer->pd_list);
peer->pdg_size = 0;
peer->datagram_label = 0;
@@ -1503,6 +1455,7 @@ static int fwnet_probe(struct device *_dev)
struct fwnet_device *dev;
unsigned max_mtu;
int ret;
+ union fwnet_hwaddr *ha;
mutex_lock(&fwnet_device_mutex);
@@ -1533,6 +1486,11 @@ static int fwnet_probe(struct device *_dev)
dev->card = card;
dev->netdev = net;
+ ret = fwnet_fifo_start(dev);
+ if (ret < 0)
+ goto out;
+ dev->local_fifo = dev->handler.offset;
+
/*
* Use the RFC 2734 default 1500 octets or the maximum payload
* as initial MTU
@@ -1542,24 +1500,31 @@ static int fwnet_probe(struct device *_dev)
net->mtu = min(1500U, max_mtu);
/* Set our hardware address while we're at it */
- put_unaligned_be64(card->guid, net->dev_addr);
- put_unaligned_be64(~0ULL, net->broadcast);
+ ha = (union fwnet_hwaddr *)net->dev_addr;
+ put_unaligned_be64(card->guid, &ha->uc.uniq_id);
+ ha->uc.max_rec = dev->card->max_receive;
+ ha->uc.sspd = dev->card->link_speed;
+ put_unaligned_be16(dev->local_fifo >> 32, &ha->uc.fifo_hi);
+ put_unaligned_be32(dev->local_fifo & 0xffffffff, &ha->uc.fifo_lo);
+
+ memset(net->broadcast, -1, net->addr_len);
+
ret = register_netdev(net);
if (ret)
goto out;
list_add_tail(&dev->dev_link, &fwnet_device_list);
- dev_notice(&net->dev, "IPv4 over IEEE 1394 on card %s\n",
+ dev_notice(&net->dev, "IP over IEEE 1394 on card %s\n",
dev_name(card->device));
have_dev:
ret = fwnet_add_peer(dev, unit, device);
if (ret && allocated_netdev) {
unregister_netdev(net);
list_del(&dev->dev_link);
- }
out:
- if (ret && allocated_netdev)
+ fwnet_fifo_stop(dev);
free_netdev(net);
+ }
mutex_unlock(&fwnet_device_mutex);
@@ -1592,22 +1557,14 @@ static int fwnet_remove(struct device *_dev)
mutex_lock(&fwnet_device_mutex);
net = dev->netdev;
- if (net && peer->ip)
- arp_invalidate(net, peer->ip);
fwnet_remove_peer(peer, dev);
if (list_empty(&dev->peer_list)) {
unregister_netdev(net);
- if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
- fw_core_remove_address_handler(&dev->handler);
- if (dev->broadcast_rcv_context) {
- fw_iso_context_stop(dev->broadcast_rcv_context);
- fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer,
- dev->card);
- fw_iso_context_destroy(dev->broadcast_rcv_context);
- }
+ fwnet_fifo_stop(dev);
+
for (i = 0; dev->queued_datagrams && i < 5; i++)
ssleep(1);
WARN_ON(dev->queued_datagrams);
@@ -1646,6 +1603,14 @@ static const struct ieee1394_device_id fwnet_id_table[] = {
.specifier_id = IANA_SPECIFIER_ID,
.version = RFC2734_SW_VERSION,
},
+#if IS_ENABLED(CONFIG_IPV6)
+ {
+ .match_flags = IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .specifier_id = IANA_SPECIFIER_ID,
+ .version = RFC3146_SW_VERSION,
+ },
+#endif
{ }
};
@@ -1683,6 +1648,30 @@ static struct fw_descriptor rfc2374_unit_directory = {
.data = rfc2374_unit_directory_data
};
+#if IS_ENABLED(CONFIG_IPV6)
+static const u32 rfc3146_unit_directory_data[] = {
+ 0x00040000, /* directory_length */
+ 0x1200005e, /* unit_specifier_id: IANA */
+ 0x81000003, /* textual descriptor offset */
+ 0x13000002, /* unit_sw_version: RFC 3146 */
+ 0x81000005, /* textual descriptor offset */
+ 0x00030000, /* descriptor_length */
+ 0x00000000, /* text */
+ 0x00000000, /* minimal ASCII, en */
+ 0x49414e41, /* I A N A */
+ 0x00030000, /* descriptor_length */
+ 0x00000000, /* text */
+ 0x00000000, /* minimal ASCII, en */
+ 0x49507636, /* I P v 6 */
+};
+
+static struct fw_descriptor rfc3146_unit_directory = {
+ .length = ARRAY_SIZE(rfc3146_unit_directory_data),
+ .key = (CSR_DIRECTORY | CSR_UNIT) << 24,
+ .data = rfc3146_unit_directory_data
+};
+#endif
+
static int __init fwnet_init(void)
{
int err;
@@ -1691,11 +1680,17 @@ static int __init fwnet_init(void)
if (err)
return err;
+#if IS_ENABLED(CONFIG_IPV6)
+ err = fw_core_add_descriptor(&rfc3146_unit_directory);
+ if (err)
+ goto out;
+#endif
+
fwnet_packet_task_cache = kmem_cache_create("packet_task",
sizeof(struct fwnet_packet_task), 0, 0, NULL);
if (!fwnet_packet_task_cache) {
err = -ENOMEM;
- goto out;
+ goto out2;
}
err = driver_register(&fwnet_driver.driver);
@@ -1703,7 +1698,11 @@ static int __init fwnet_init(void)
return 0;
kmem_cache_destroy(fwnet_packet_task_cache);
+out2:
+#if IS_ENABLED(CONFIG_IPV6)
+ fw_core_remove_descriptor(&rfc3146_unit_directory);
out:
+#endif
fw_core_remove_descriptor(&rfc2374_unit_directory);
return err;
@@ -1714,11 +1713,14 @@ static void __exit fwnet_cleanup(void)
{
driver_unregister(&fwnet_driver.driver);
kmem_cache_destroy(fwnet_packet_task_cache);
+#if IS_ENABLED(CONFIG_IPV6)
+ fw_core_remove_descriptor(&rfc3146_unit_directory);
+#endif
fw_core_remove_descriptor(&rfc2374_unit_directory);
}
module_exit(fwnet_cleanup);
MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>");
-MODULE_DESCRIPTION("IPv4 over IEEE1394 as per RFC 2734");
+MODULE_DESCRIPTION("IP over IEEE1394 as per RFC 2734/3146");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index a3fde52840ca..65c30ea8c1a1 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -511,12 +511,16 @@ static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req;
+ struct cpl_t5_act_open_req *t5_req;
struct sk_buff *skb;
u64 opt0;
u32 opt2;
unsigned int mtu_idx;
int wscale;
- int wrlen = roundup(sizeof *req, 16);
+ int size = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
+ sizeof(struct cpl_act_open_req) :
+ sizeof(struct cpl_t5_act_open_req);
+ int wrlen = roundup(size, 16);
PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
@@ -552,17 +556,36 @@ static int send_connect(struct c4iw_ep *ep)
opt2 |= WND_SCALE_EN(1);
t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
- req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
- INIT_TP_WR(req, 0);
- OPCODE_TID(req) = cpu_to_be32(
- MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
- req->local_port = ep->com.local_addr.sin_port;
- req->peer_port = ep->com.remote_addr.sin_port;
- req->local_ip = ep->com.local_addr.sin_addr.s_addr;
- req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
- req->opt0 = cpu_to_be64(opt0);
- req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t));
- req->opt2 = cpu_to_be32(opt2);
+ if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
+ req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(
+ MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ ((ep->rss_qid << 14) | ep->atid)));
+ req->local_port = ep->com.local_addr.sin_port;
+ req->peer_port = ep->com.remote_addr.sin_port;
+ req->local_ip = ep->com.local_addr.sin_addr.s_addr;
+ req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
+ req->opt0 = cpu_to_be64(opt0);
+ req->params = cpu_to_be32(select_ntuple(ep->com.dev,
+ ep->dst, ep->l2t));
+ req->opt2 = cpu_to_be32(opt2);
+ } else {
+ t5_req = (struct cpl_t5_act_open_req *) skb_put(skb, wrlen);
+ INIT_TP_WR(t5_req, 0);
+ OPCODE_TID(t5_req) = cpu_to_be32(
+ MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ ((ep->rss_qid << 14) | ep->atid)));
+ t5_req->local_port = ep->com.local_addr.sin_port;
+ t5_req->peer_port = ep->com.remote_addr.sin_port;
+ t5_req->local_ip = ep->com.local_addr.sin_addr.s_addr;
+ t5_req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
+ t5_req->opt0 = cpu_to_be64(opt0);
+ t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
+ select_ntuple(ep->com.dev, ep->dst, ep->l2t)));
+ t5_req->opt2 = cpu_to_be32(opt2);
+ }
+
set_bit(ACT_OPEN_REQ, &ep->com.history);
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
@@ -1676,9 +1699,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
case CPL_ERR_CONN_TIMEDOUT:
break;
case CPL_ERR_TCAM_FULL:
+ dev->rdev.stats.tcam_full++;
if (dev->rdev.lldi.enable_fw_ofld_conn) {
mutex_lock(&dev->rdev.stats.lock);
- dev->rdev.stats.tcam_full++;
mutex_unlock(&dev->rdev.stats.lock);
send_fw_act_open_req(ep,
GET_TID_TID(GET_AOPEN_ATID(
@@ -2875,12 +2898,14 @@ static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
{
u32 l2info;
- u16 vlantag, len, hdr_len;
+ u16 vlantag, len, hdr_len, eth_hdr_len;
u8 intf;
struct cpl_rx_pkt *cpl = cplhdr(skb);
struct cpl_pass_accept_req *req;
struct tcp_options_received tmp_opt;
+ struct c4iw_dev *dev;
+ dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
/* Store values from cpl_rx_pkt in temporary location. */
vlantag = (__force u16) cpl->vlan;
len = (__force u16) cpl->len;
@@ -2896,7 +2921,7 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
*/
memset(&tmp_opt, 0, sizeof(tmp_opt));
tcp_clear_options(&tmp_opt);
- tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL);
+ tcp_parse_options(skb, &tmp_opt, 0, NULL);
req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
@@ -2904,14 +2929,16 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
V_SYN_MAC_IDX(G_RX_MACIDX(
(__force int) htonl(l2info))) |
F_SYN_XACT_MATCH);
+ eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
+ G_RX_ETHHDR_LEN((__force int) htonl(l2info)) :
+ G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info));
req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
(__force int) htonl(l2info))) |
V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
(__force int) htons(hdr_len))) |
V_IP_HDR_LEN(G_RX_IPHDR_LEN(
(__force int) htons(hdr_len))) |
- V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(
- (__force int) htonl(l2info))));
+ V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len)));
req->vlan = (__force __be16) vlantag;
req->len = (__force __be16) len;
req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
@@ -2999,7 +3026,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
u16 window;
struct port_info *pi;
struct net_device *pdev;
- u16 rss_qid;
+ u16 rss_qid, eth_hdr_len;
int step;
u32 tx_chan;
struct neighbour *neigh;
@@ -3028,7 +3055,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
- if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) {
+ eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
+ G_RX_ETHHDR_LEN(htonl(cpl->l2info)) :
+ G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info));
+ if (eth_hdr_len == ETH_HLEN) {
eh = (struct ethhdr *)(req + 1);
iph = (struct iphdr *)(eh + 1);
} else {
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 80069ad595c1..ae656016e1ae 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -41,10 +41,20 @@
#define DRV_VERSION "0.1"
MODULE_AUTHOR("Steve Wise");
-MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
+MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
+static int allow_db_fc_on_t5;
+module_param(allow_db_fc_on_t5, int, 0644);
+MODULE_PARM_DESC(allow_db_fc_on_t5,
+ "Allow DB Flow Control on T5 (default = 0)");
+
+static int allow_db_coalescing_on_t5;
+module_param(allow_db_coalescing_on_t5, int, 0644);
+MODULE_PARM_DESC(allow_db_coalescing_on_t5,
+ "Allow DB Coalescing on T5 (default = 0)");
+
struct uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
@@ -614,7 +624,7 @@ static int rdma_supported(const struct cxgb4_lld_info *infop)
{
return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
- infop->vr->cq.size > 0 && infop->vr->ocq.size > 0;
+ infop->vr->cq.size > 0;
}
static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
@@ -627,6 +637,22 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
pci_name(infop->pdev));
return ERR_PTR(-ENOSYS);
}
+ if (!ocqp_supported(infop))
+ pr_info("%s: On-Chip Queues not supported on this device.\n",
+ pci_name(infop->pdev));
+
+ if (!is_t4(infop->adapter_type)) {
+ if (!allow_db_fc_on_t5) {
+ db_fc_threshold = 100000;
+ pr_info("DB Flow Control Disabled.\n");
+ }
+
+ if (!allow_db_coalescing_on_t5) {
+ db_coalescing_threshold = -1;
+ pr_info("DB Coalescing Disabled.\n");
+ }
+ }
+
devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
if (!devp) {
printk(KERN_ERR MOD "Cannot allocate ib device\n");
@@ -678,8 +704,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
int i;
if (!vers_printed++)
- printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
- DRV_VERSION);
+ pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
+ DRV_VERSION);
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx) {
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7eec5e13fa8c..485183ad34cd 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -162,7 +162,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
}
-#define C4IW_WR_TO (10*HZ)
+#define C4IW_WR_TO (30*HZ)
struct c4iw_wr_wait {
struct completion completion;
@@ -369,7 +369,6 @@ struct c4iw_fr_page_list {
DEFINE_DMA_UNMAP_ADDR(mapping);
dma_addr_t dma_addr;
struct c4iw_dev *dev;
- int size;
};
static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
@@ -817,6 +816,15 @@ static inline int compute_wscale(int win)
return wscale;
}
+static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
+{
+#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
+ return infop->vr->ocq.size > 0;
+#else
+ return 0;
+#endif
+}
+
u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
@@ -930,6 +938,8 @@ extern struct cxgb4_client t4c_client;
extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
extern int c4iw_max_read_depth;
extern int db_fc_threshold;
+extern int db_coalescing_threshold;
+extern int use_dsgl;
#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 903a92d6f91d..4cb8eb24497c 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -30,16 +30,76 @@
* SOFTWARE.
*/
+#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <rdma/ib_umem.h>
#include <linux/atomic.h>
#include "iw_cxgb4.h"
+int use_dsgl = 1;
+module_param(use_dsgl, int, 0644);
+MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1)");
+
#define T4_ULPTX_MIN_IO 32
#define C4IW_MAX_INLINE_SIZE 96
+#define T4_ULPTX_MAX_DMA 1024
+#define C4IW_INLINE_THRESHOLD 128
-static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data)
+static int inline_threshold = C4IW_INLINE_THRESHOLD;
+module_param(inline_threshold, int, 0644);
+MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
+
+static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
+ u32 len, dma_addr_t data, int wait)
+{
+ struct sk_buff *skb;
+ struct ulp_mem_io *req;
+ struct ulptx_sgl *sgl;
+ u8 wr_len;
+ int ret = 0;
+ struct c4iw_wr_wait wr_wait;
+
+ addr &= 0x7FFFFFF;
+
+ if (wait)
+ c4iw_init_wr_wait(&wr_wait);
+ wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
+
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
+ memset(req, 0, wr_len);
+ INIT_ULPTX_WR(req, wr_len, 0, 0);
+ req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
+ (wait ? FW_WR_COMPL(1) : 0));
+ req->wr.wr_lo = wait ? (__force __be64)&wr_wait : 0;
+ req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
+ req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
+ req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
+ req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5));
+ req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
+ req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr));
+
+ sgl = (struct ulptx_sgl *)(req + 1);
+ sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE(1));
+ sgl->len0 = cpu_to_be32(len);
+ sgl->addr0 = cpu_to_be64(data);
+
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret)
+ return ret;
+ if (wait)
+ ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
+ return ret;
+}
+
+static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
+ void *data)
{
struct sk_buff *skb;
struct ulp_mem_io *req;
@@ -47,6 +107,12 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
u8 wr_len, *to_dp, *from_dp;
int copy_len, num_wqe, i, ret = 0;
struct c4iw_wr_wait wr_wait;
+ __be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
+
+ if (is_t4(rdev->lldi.adapter_type))
+ cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1));
+ else
+ cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1));
addr &= 0x7FFFFFF;
PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
@@ -77,7 +143,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
req->wr.wr_mid = cpu_to_be32(
FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
- req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23));
+ req->cmd = cmd;
req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
@@ -107,6 +173,67 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
return ret;
}
+int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
+{
+ u32 remain = len;
+ u32 dmalen;
+ int ret = 0;
+ dma_addr_t daddr;
+ dma_addr_t save;
+
+ daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
+ return -1;
+ save = daddr;
+
+ while (remain > inline_threshold) {
+ if (remain < T4_ULPTX_MAX_DMA) {
+ if (remain & ~T4_ULPTX_MIN_IO)
+ dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
+ else
+ dmalen = remain;
+ } else
+ dmalen = T4_ULPTX_MAX_DMA;
+ remain -= dmalen;
+ ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
+ !remain);
+ if (ret)
+ goto out;
+ addr += dmalen >> 5;
+ data += dmalen;
+ daddr += dmalen;
+ }
+ if (remain)
+ ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
+out:
+ dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
+ return ret;
+}
+
+/*
+ * write len bytes of data into addr (32B aligned address)
+ * If data is NULL, clear len byte of memory to zero.
+ */
+static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
+ void *data)
+{
+ if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
+ if (len > inline_threshold) {
+ if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
+ printk_ratelimited(KERN_WARNING
+ "%s: dma map"
+ " failure (non fatal)\n",
+ pci_name(rdev->lldi.pdev));
+ return _c4iw_write_mem_inline(rdev, addr, len,
+ data);
+ } else
+ return 0;
+ } else
+ return _c4iw_write_mem_inline(rdev, addr, len, data);
+ } else
+ return _c4iw_write_mem_inline(rdev, addr, len, data);
+}
+
/*
* Build and write a TPT entry.
* IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
@@ -760,19 +887,23 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
struct c4iw_fr_page_list *c4pl;
struct c4iw_dev *dev = to_c4iw_dev(device);
dma_addr_t dma_addr;
- int size = sizeof *c4pl + page_list_len * sizeof(u64);
+ int pll_len = roundup(page_list_len * sizeof(u64), 32);
- c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size,
- &dma_addr, GFP_KERNEL);
+ c4pl = kmalloc(sizeof(*c4pl), GFP_KERNEL);
if (!c4pl)
return ERR_PTR(-ENOMEM);
+ c4pl->ibpl.page_list = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev,
+ pll_len, &dma_addr,
+ GFP_KERNEL);
+ if (!c4pl->ibpl.page_list) {
+ kfree(c4pl);
+ return ERR_PTR(-ENOMEM);
+ }
dma_unmap_addr_set(c4pl, mapping, dma_addr);
c4pl->dma_addr = dma_addr;
c4pl->dev = dev;
- c4pl->size = size;
- c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
- c4pl->ibpl.max_page_list_len = page_list_len;
+ c4pl->ibpl.max_page_list_len = pll_len;
return &c4pl->ibpl;
}
@@ -781,8 +912,10 @@ void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
{
struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
- dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
- c4pl, dma_unmap_addr(c4pl, mapping));
+ dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev,
+ c4pl->ibpl.max_page_list_len,
+ c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping));
+ kfree(c4pl);
}
int c4iw_dereg_mr(struct ib_mr *ib_mr)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index e084fdc6da7f..7e94c9a656a1 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -162,8 +162,14 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
*/
if (addr >= rdev->oc_mw_pa)
vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
- else
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ else {
+ if (is_t5(rdev->lldi.adapter_type))
+ vma->vm_page_prot =
+ t4_pgprot_wc(vma->vm_page_prot);
+ else
+ vma->vm_page_prot =
+ pgprot_noncached(vma->vm_page_prot);
+ }
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
@@ -263,7 +269,7 @@ static int c4iw_query_device(struct ib_device *ibdev,
dev = to_c4iw_dev(ibdev);
memset(props, 0, sizeof *props);
memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
- props->hw_ver = dev->rdev.lldi.adapter_type;
+ props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
props->fw_ver = dev->rdev.lldi.fw_vers;
props->device_cap_flags = dev->device_cap_flags;
props->page_size_cap = T4_PAGESIZE_MASK;
@@ -346,7 +352,8 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
PDBG("%s dev 0x%p\n", __func__, dev);
- return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type);
+ return sprintf(buf, "%d\n",
+ CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
}
static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 70b1808a08f4..5b059e2d80cc 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -42,10 +42,21 @@ static int ocqp_support = 1;
module_param(ocqp_support, int, 0644);
MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
-int db_fc_threshold = 2000;
+int db_fc_threshold = 1000;
module_param(db_fc_threshold, int, 0644);
-MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic "
- "db flow control mode (default = 2000)");
+MODULE_PARM_DESC(db_fc_threshold,
+ "QP count/threshold that triggers"
+ " automatic db flow control mode (default = 1000)");
+
+int db_coalescing_threshold;
+module_param(db_coalescing_threshold, int, 0644);
+MODULE_PARM_DESC(db_coalescing_threshold,
+ "QP count/threshold that triggers"
+ " disabling db coalescing (default = 0)");
+
+static int max_fr_immd = T4_MAX_FR_IMMD;
+module_param(max_fr_immd, int, 0644);
+MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
{
@@ -76,7 +87,7 @@ static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
{
- if (!ocqp_support || !t4_ocqp_supported())
+ if (!ocqp_support || !ocqp_supported(&rdev->lldi))
return -ENOSYS;
sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
if (!sq->dma_addr)
@@ -129,7 +140,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
int wr_len;
struct c4iw_wr_wait wr_wait;
struct sk_buff *skb;
- int ret;
+ int ret = 0;
int eqsize;
wq->sq.qid = c4iw_get_qpid(rdev, uctx);
@@ -169,17 +180,14 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
}
if (user) {
- ret = alloc_oc_sq(rdev, &wq->sq);
- if (ret)
+ if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
goto free_hwaddr;
-
- ret = alloc_host_sq(rdev, &wq->sq);
- if (ret)
- goto free_sq;
- } else
+ } else {
ret = alloc_host_sq(rdev, &wq->sq);
if (ret)
goto free_hwaddr;
+ }
+
memset(wq->sq.queue, 0, wq->sq.memsize);
dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
@@ -534,7 +542,7 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
}
static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
- struct ib_send_wr *wr, u8 *len16)
+ struct ib_send_wr *wr, u8 *len16, u8 t5dev)
{
struct fw_ri_immd *imdp;
@@ -556,28 +564,51 @@ static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
0xffffffff);
- WARN_ON(pbllen > T4_MAX_FR_IMMD);
- imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
- imdp->op = FW_RI_DATA_IMMD;
- imdp->r1 = 0;
- imdp->r2 = 0;
- imdp->immdlen = cpu_to_be32(pbllen);
- p = (__be64 *)(imdp + 1);
- rem = pbllen;
- for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
- *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
- rem -= sizeof *p;
- if (++p == (__be64 *)&sq->queue[sq->size])
- p = (__be64 *)sq->queue;
- }
- BUG_ON(rem < 0);
- while (rem) {
- *p = 0;
- rem -= sizeof *p;
- if (++p == (__be64 *)&sq->queue[sq->size])
- p = (__be64 *)sq->queue;
+
+ if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
+ struct c4iw_fr_page_list *c4pl =
+ to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
+ struct fw_ri_dsgl *sglp;
+
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ wr->wr.fast_reg.page_list->page_list[i] = (__force u64)
+ cpu_to_be64((u64)
+ wr->wr.fast_reg.page_list->page_list[i]);
+ }
+
+ sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
+ sglp->op = FW_RI_DATA_DSGL;
+ sglp->r1 = 0;
+ sglp->nsge = cpu_to_be16(1);
+ sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
+ sglp->len0 = cpu_to_be32(pbllen);
+
+ *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
+ } else {
+ imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
+ imdp->op = FW_RI_DATA_IMMD;
+ imdp->r1 = 0;
+ imdp->r2 = 0;
+ imdp->immdlen = cpu_to_be32(pbllen);
+ p = (__be64 *)(imdp + 1);
+ rem = pbllen;
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ *p = cpu_to_be64(
+ (u64)wr->wr.fast_reg.page_list->page_list[i]);
+ rem -= sizeof(*p);
+ if (++p == (__be64 *)&sq->queue[sq->size])
+ p = (__be64 *)sq->queue;
+ }
+ BUG_ON(rem < 0);
+ while (rem) {
+ *p = 0;
+ rem -= sizeof(*p);
+ if (++p == (__be64 *)&sq->queue[sq->size])
+ p = (__be64 *)sq->queue;
+ }
+ *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
+ + pbllen, 16);
}
- *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
return 0;
}
@@ -678,7 +709,10 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_FAST_REG_MR:
fw_opcode = FW_RI_FR_NSMR_WR;
swsqe->opcode = FW_RI_FAST_REGISTER;
- err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16);
+ err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
+ is_t5(
+ qhp->rhp->rdev.lldi.adapter_type) ?
+ 1 : 0);
break;
case IB_WR_LOCAL_INV:
if (wr->send_flags & IB_SEND_FENCE)
@@ -1450,6 +1484,9 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
rhp->db_state = NORMAL;
idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
}
+ if (db_coalescing_threshold >= 0)
+ if (rhp->qpcnt <= db_coalescing_threshold)
+ cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
spin_unlock_irq(&rhp->lock);
atomic_dec(&qhp->refcnt);
wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
@@ -1561,11 +1598,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
spin_lock_irq(&rhp->lock);
if (rhp->db_state != NORMAL)
t4_disable_wq_db(&qhp->wq);
- if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
+ rhp->qpcnt++;
+ if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
rhp->rdev.stats.db_state_transitions++;
rhp->db_state = FLOW_CONTROL;
idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
}
+ if (db_coalescing_threshold >= 0)
+ if (rhp->qpcnt > db_coalescing_threshold)
+ cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
spin_unlock_irq(&rhp->lock);
if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 16f26ab29302..ebcb03bd1b72 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -84,7 +84,7 @@ struct t4_status_page {
sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
sizeof(struct fw_ri_immd)) & ~31UL)
-#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
+#define T4_MAX_FR_DEPTH (1024 / sizeof(u64))
#define T4_RQ_NUM_SLOTS 2
#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
@@ -280,15 +280,6 @@ static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
#endif
}
-static inline int t4_ocqp_supported(void)
-{
-#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
- return 1;
-#else
- return 0;
-#endif
-}
-
enum {
T4_SQ_ONCHIP = (1<<0),
};
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 832bc807ed20..cc9f1927a322 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -469,8 +469,7 @@ static int capidrv_add_ack(struct capidrv_ncci *nccip,
{
struct ncci_datahandle_queue *n, **pp;
- n = (struct ncci_datahandle_queue *)
- kmalloc(sizeof(struct ncci_datahandle_queue), GFP_ATOMIC);
+ n = kmalloc(sizeof(struct ncci_datahandle_queue), GFP_ATOMIC);
if (!n) {
printk(KERN_ERR "capidrv: kmalloc ncci_datahandle failed\n");
return -1;
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index db432e635496..50749a70c5ca 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -441,8 +441,7 @@ static int isdn_divert_icall(isdn_ctrl *ic)
switch (dv->rule.action) {
case DEFLECT_IGNORE:
- return (0);
- break;
+ return 0;
case DEFLECT_ALERT:
case DEFLECT_PROCEED:
@@ -510,10 +509,9 @@ static int isdn_divert_icall(isdn_ctrl *ic)
break;
default:
- return (0); /* ignore call */
- break;
+ return 0; /* ignore call */
} /* switch action */
- break;
+ break; /* will break the 'for' looping */
} /* scan_table */
if (cs) {
diff --git a/drivers/isdn/hisax/fsm.c b/drivers/isdn/hisax/fsm.c
index 1bb291021fdb..c7a94713e9ec 100644
--- a/drivers/isdn/hisax/fsm.c
+++ b/drivers/isdn/hisax/fsm.c
@@ -26,7 +26,7 @@ FsmNew(struct Fsm *fsm, struct FsmNode *fnlist, int fncount)
{
int i;
- fsm->jumpmatrix = (FSMFNPTR *)
+ fsm->jumpmatrix =
kzalloc(sizeof(FSMFNPTR) * fsm->state_count * fsm->event_count, GFP_KERNEL);
if (!fsm->jumpmatrix)
return -ENOMEM;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 90f34ae2b80f..dc4574f735ef 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -1479,7 +1479,7 @@ int setup_hfcsx(struct IsdnCard *card)
release_region(cs->hw.hfcsx.base, 2);
return (0);
}
- if (!(cs->hw.hfcsx.extra = (void *)
+ if (!(cs->hw.hfcsx.extra =
kmalloc(sizeof(struct hfcsx_extra), GFP_ATOMIC))) {
release_region(cs->hw.hfcsx.base, 2);
printk(KERN_WARNING "HFC-SX: unable to allocate memory\n");
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index babc621a07fb..88d657dff474 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1385,7 +1385,7 @@ isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev)
if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
skb->pkt_type = PACKET_OTHERHOST;
}
- if (ntohs(eth->h_proto) >= 1536)
+ if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
return eth->h_proto;
rawp = skb->data;
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 44225b186f6d..83a23afb13ab 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -185,7 +185,7 @@ static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
skb->pkt_type=PACKET_MULTICAST;
}
- if (ntohs(eth->h_proto) >= 1536)
+ if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
return eth->h_proto;
rawp = skb->data;
@@ -228,9 +228,9 @@ static int ule_test_sndu( struct dvb_net_priv *p )
static int ule_bridged_sndu( struct dvb_net_priv *p )
{
struct ethhdr *hdr = (struct ethhdr*) p->ule_next_hdr;
- if(ntohs(hdr->h_proto) < 1536) {
+ if(ntohs(hdr->h_proto) < ETH_P_802_3_MIN) {
int framelen = p->ule_sndu_len - ((p->ule_next_hdr+sizeof(struct ethhdr)) - p->ule_skb->data);
- /* A frame Type < 1536 for a bridged frame, introduces a LLC Length field. */
+ /* A frame Type < ETH_P_802_3_MIN for a bridged frame, introduces a LLC Length field. */
if(framelen != ntohs(hdr->h_proto)) {
return -1;
}
@@ -320,7 +320,7 @@ static int handle_ule_extensions( struct dvb_net_priv *p )
(int) p->ule_sndu_type, l, total_ext_len);
#endif
- } while (p->ule_sndu_type < 1536);
+ } while (p->ule_sndu_type < ETH_P_802_3_MIN);
return total_ext_len;
}
@@ -712,7 +712,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
}
/* Handle ULE Extension Headers. */
- if (priv->ule_sndu_type < 1536) {
+ if (priv->ule_sndu_type < ETH_P_802_3_MIN) {
/* There is an extension header. Handle it accordingly. */
int l = handle_ule_extensions(priv);
if (l < 0) {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 87f1d39ca551..3835321b8cf3 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -151,6 +151,7 @@ config MACVTAP
config VXLAN
tristate "Virtual eXtensible Local Area Network (VXLAN)"
depends on INET
+ select NET_IP_TUNNEL
---help---
This allows one to create vxlan virtual interfaces that provide
Layer 2 Networks over Layer 3 Networks. VXLAN is often used
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index f5a89164e779..4ce6ca5f3d36 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -106,20 +106,4 @@ config IPDDP_ENCAP
IP packets inside AppleTalk frames; this is useful if your Linux box
is stuck on an AppleTalk network (which hopefully contains a
decapsulator somewhere). Please see
- <file:Documentation/networking/ipddp.txt> for more information. If
- you said Y to "AppleTalk-IP driver support" above and you say Y
- here, then you cannot say Y to "AppleTalk-IP to IP Decapsulation
- support", below.
-
-config IPDDP_DECAP
- bool "Appletalk-IP to IP Decapsulation support"
- depends on IPDDP
- help
- If you say Y here, the AppleTalk-IP code will be able to decapsulate
- AppleTalk-IP frames to IP packets; this is useful if you want your
- Linux box to act as an Internet gateway for an AppleTalk network.
- Please see <file:Documentation/networking/ipddp.txt> for more
- information. If you said Y to "AppleTalk-IP driver support" above
- and you say Y here, then you cannot say Y to "IP to AppleTalk-IP
- Encapsulation support", above.
-
+ <file:Documentation/networking/ipddp.txt> for more information.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a51241b2e621..78c9e2d33eb7 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -796,9 +796,8 @@ static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mcast_work.work);
- rcu_read_lock();
+
bond_resend_igmp_join_requests(bond);
- rcu_read_unlock();
}
/*
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 60c2142373c9..a966128c2a7a 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -32,13 +32,6 @@ config CAIF_SPI_SYNC
help to synchronize to the next transfer in case of over or under-runs.
This option also needs to be enabled on the modem.
-config CAIF_SHM
- tristate "CAIF shared memory protocol driver"
- depends on CAIF && U5500_MBOX
- default n
- ---help---
- The CAIF shared memory protocol driver for the STE UX5500 platform.
-
config CAIF_HSI
tristate "CAIF HSI transport driver"
depends on CAIF
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 91dff861560f..15a9d2fc753d 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -7,9 +7,5 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
cfspi_slave-objs := caif_spi.o caif_spi_slave.o
obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
-# Shared memory
-caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
-obj-$(CONFIG_CAIF_SHM) += caif_shm.o
-
# HSI interface
obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
deleted file mode 100644
index 89d76b7b325a..000000000000
--- a/drivers/net/caif/caif_shm_u5500.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
- * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <mach/mbox-db5500.h>
-#include <net/caif/caif_shm.h>
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
-
-#define MAX_SHM_INSTANCES 1
-
-enum {
- MBX_ACC0,
- MBX_ACC1,
- MBX_DSP
-};
-
-static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
-
-static unsigned int shm_start;
-static unsigned int shm_size;
-
-module_param(shm_size, uint , 0440);
-MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
-
-module_param(shm_start, uint , 0440);
-MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
-
-static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
-{
- /* Always block until msg is written successfully */
- mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
- return 0;
-}
-
-static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
- void *pshm_drv)
-{
- /*
- * For UX5500, we have only 1 SHM instance which uses MBX0
- * for communication with the peer modem
- */
- pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
-
- if (!pshm_dev->hmbx)
- return -ENODEV;
- else
- return 0;
-}
-
-static int __init caif_shmdev_init(void)
-{
- int i, result;
-
- /* Loop is currently overkill, there is only one instance */
- for (i = 0; i < MAX_SHM_INSTANCES; i++) {
-
- shmdev_lyr[i].shm_base_addr = shm_start;
- shmdev_lyr[i].shm_total_sz = shm_size;
-
- if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
- || (shmdev_lyr[i].shm_total_sz <= 0)) {
- pr_warn("ERROR,"
- "Shared memory Address and/or Size incorrect"
- ", Bailing out ...\n");
- result = -EINVAL;
- goto clean;
- }
-
- pr_info("SHM AREA (instance %d) STARTS"
- " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
-
- shmdev_lyr[i].shm_id = i;
- shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
- shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
-
- /*
- * Finally, CAIF core module is called with details in place:
- * 1. SHM base address
- * 2. SHM size
- * 3. MBX handle
- */
- result = caif_shmcore_probe(&shmdev_lyr[i]);
- if (result) {
- pr_warn("ERROR[%d],"
- "Could not probe SHM core (instance %d)"
- " Bailing out ...\n", result, i);
- goto clean;
- }
- }
-
- return 0;
-
-clean:
- /*
- * For now, we assume that even if one instance of SHM fails, we bail
- * out of the driver support completely. For this, we need to release
- * any memory allocated and unregister any instance of SHM net device.
- */
- for (i = 0; i < MAX_SHM_INSTANCES; i++) {
- if (shmdev_lyr[i].pshm_netdev)
- unregister_netdev(shmdev_lyr[i].pshm_netdev);
- }
- return result;
-}
-
-static void __exit caif_shmdev_exit(void)
-{
- int i;
-
- for (i = 0; i < MAX_SHM_INSTANCES; i++) {
- caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
- kfree((void *)shmdev_lyr[i].shm_base_addr);
- }
-
-}
-
-module_init(caif_shmdev_init);
-module_exit(caif_shmdev_exit);
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
deleted file mode 100644
index bce8bac311c9..000000000000
--- a/drivers/net/caif/caif_shmcore.c
+++ /dev/null
@@ -1,747 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
- * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
- * Daniel Martensson / daniel.martensson@stericsson.com
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
-
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/list.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/io.h>
-
-#include <net/caif/caif_device.h>
-#include <net/caif/caif_shm.h>
-
-#define NR_TX_BUF 6
-#define NR_RX_BUF 6
-#define TX_BUF_SZ 0x2000
-#define RX_BUF_SZ 0x2000
-
-#define CAIF_NEEDED_HEADROOM 32
-
-#define CAIF_FLOW_ON 1
-#define CAIF_FLOW_OFF 0
-
-#define LOW_WATERMARK 3
-#define HIGH_WATERMARK 4
-
-/* Maximum number of CAIF buffers per shared memory buffer. */
-#define SHM_MAX_FRMS_PER_BUF 10
-
-/*
- * Size in bytes of the descriptor area
- * (With end of descriptor signalling)
- */
-#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
- sizeof(struct shm_pck_desc))
-
-/*
- * Offset to the first CAIF frame within a shared memory buffer.
- * Aligned on 32 bytes.
- */
-#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
-
-/* Number of bytes for CAIF shared memory header. */
-#define SHM_HDR_LEN 1
-
-/* Number of padding bytes for the complete CAIF frame. */
-#define SHM_FRM_PAD_LEN 4
-
-#define CAIF_MAX_MTU 4096
-
-#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
-#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
-
-#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
-#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
-
-#define SHM_FULL_MASK (0x0F << 0)
-#define SHM_EMPTY_MASK (0x0F << 4)
-
-struct shm_pck_desc {
- /*
- * Offset from start of shared memory area to start of
- * shared memory CAIF frame.
- */
- u32 frm_ofs;
- u32 frm_len;
-};
-
-struct buf_list {
- unsigned char *desc_vptr;
- u32 phy_addr;
- u32 index;
- u32 len;
- u32 frames;
- u32 frm_ofs;
- struct list_head list;
-};
-
-struct shm_caif_frm {
- /* Number of bytes of padding before the CAIF frame. */
- u8 hdr_ofs;
-};
-
-struct shmdrv_layer {
- /* caif_dev_common must always be first in the structure*/
- struct caif_dev_common cfdev;
-
- u32 shm_tx_addr;
- u32 shm_rx_addr;
- u32 shm_base_addr;
- u32 tx_empty_available;
- spinlock_t lock;
-
- struct list_head tx_empty_list;
- struct list_head tx_pend_list;
- struct list_head tx_full_list;
- struct list_head rx_empty_list;
- struct list_head rx_pend_list;
- struct list_head rx_full_list;
-
- struct workqueue_struct *pshm_tx_workqueue;
- struct workqueue_struct *pshm_rx_workqueue;
-
- struct work_struct shm_tx_work;
- struct work_struct shm_rx_work;
-
- struct sk_buff_head sk_qhead;
- struct shmdev_layer *pshm_dev;
-};
-
-static int shm_netdev_open(struct net_device *shm_netdev)
-{
- netif_wake_queue(shm_netdev);
- return 0;
-}
-
-static int shm_netdev_close(struct net_device *shm_netdev)
-{
- netif_stop_queue(shm_netdev);
- return 0;
-}
-
-int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
-{
- struct buf_list *pbuf;
- struct shmdrv_layer *pshm_drv;
- struct list_head *pos;
- u32 avail_emptybuff = 0;
- unsigned long flags = 0;
-
- pshm_drv = priv;
-
- /* Check for received buffers. */
- if (mbx_msg & SHM_FULL_MASK) {
- int idx;
-
- spin_lock_irqsave(&pshm_drv->lock, flags);
-
- /* Check whether we have any outstanding buffers. */
- if (list_empty(&pshm_drv->rx_empty_list)) {
-
- /* Release spin lock. */
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* We print even in IRQ context... */
- pr_warn("No empty Rx buffers to fill: "
- "mbx_msg:%x\n", mbx_msg);
-
- /* Bail out. */
- goto err_sync;
- }
-
- pbuf =
- list_entry(pshm_drv->rx_empty_list.next,
- struct buf_list, list);
- idx = pbuf->index;
-
- /* Check buffer synchronization. */
- if (idx != SHM_GET_FULL(mbx_msg)) {
-
- /* We print even in IRQ context... */
- pr_warn(
- "phyif_shm_mbx_msg_cb: RX full out of sync:"
- " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
- idx, mbx_msg, SHM_GET_FULL(mbx_msg));
-
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* Bail out. */
- goto err_sync;
- }
-
- list_del_init(&pbuf->list);
- list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
-
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* Schedule RX work queue. */
- if (!work_pending(&pshm_drv->shm_rx_work))
- queue_work(pshm_drv->pshm_rx_workqueue,
- &pshm_drv->shm_rx_work);
- }
-
- /* Check for emptied buffers. */
- if (mbx_msg & SHM_EMPTY_MASK) {
- int idx;
-
- spin_lock_irqsave(&pshm_drv->lock, flags);
-
- /* Check whether we have any outstanding buffers. */
- if (list_empty(&pshm_drv->tx_full_list)) {
-
- /* We print even in IRQ context... */
- pr_warn("No TX to empty: msg:%x\n", mbx_msg);
-
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* Bail out. */
- goto err_sync;
- }
-
- pbuf =
- list_entry(pshm_drv->tx_full_list.next,
- struct buf_list, list);
- idx = pbuf->index;
-
- /* Check buffer synchronization. */
- if (idx != SHM_GET_EMPTY(mbx_msg)) {
-
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* We print even in IRQ context... */
- pr_warn("TX empty "
- "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
-
- /* Bail out. */
- goto err_sync;
- }
- list_del_init(&pbuf->list);
-
- /* Reset buffer parameters. */
- pbuf->frames = 0;
- pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
-
- list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
-
- /* Check the available no. of buffers in the empty list */
- list_for_each(pos, &pshm_drv->tx_empty_list)
- avail_emptybuff++;
-
- /* Check whether we have to wake up the transmitter. */
- if ((avail_emptybuff > HIGH_WATERMARK) &&
- (!pshm_drv->tx_empty_available)) {
- pshm_drv->tx_empty_available = 1;
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
- pshm_drv->cfdev.flowctrl
- (pshm_drv->pshm_dev->pshm_netdev,
- CAIF_FLOW_ON);
-
-
- /* Schedule the work queue. if required */
- if (!work_pending(&pshm_drv->shm_tx_work))
- queue_work(pshm_drv->pshm_tx_workqueue,
- &pshm_drv->shm_tx_work);
- } else
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
- }
-
- return 0;
-
-err_sync:
- return -EIO;
-}
-
-static void shm_rx_work_func(struct work_struct *rx_work)
-{
- struct shmdrv_layer *pshm_drv;
- struct buf_list *pbuf;
- unsigned long flags = 0;
- struct sk_buff *skb;
- char *p;
- int ret;
-
- pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
-
- while (1) {
-
- struct shm_pck_desc *pck_desc;
-
- spin_lock_irqsave(&pshm_drv->lock, flags);
-
- /* Check for received buffers. */
- if (list_empty(&pshm_drv->rx_full_list)) {
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
- break;
- }
-
- pbuf =
- list_entry(pshm_drv->rx_full_list.next, struct buf_list,
- list);
- list_del_init(&pbuf->list);
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* Retrieve pointer to start of the packet descriptor area. */
- pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
-
- /*
- * Check whether descriptor contains a CAIF shared memory
- * frame.
- */
- while (pck_desc->frm_ofs) {
- unsigned int frm_buf_ofs;
- unsigned int frm_pck_ofs;
- unsigned int frm_pck_len;
- /*
- * Check whether offset is within buffer limits
- * (lower).
- */
- if (pck_desc->frm_ofs <
- (pbuf->phy_addr - pshm_drv->shm_base_addr))
- break;
- /*
- * Check whether offset is within buffer limits
- * (higher).
- */
- if (pck_desc->frm_ofs >
- ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
- pbuf->len))
- break;
-
- /* Calculate offset from start of buffer. */
- frm_buf_ofs =
- pck_desc->frm_ofs - (pbuf->phy_addr -
- pshm_drv->shm_base_addr);
-
- /*
- * Calculate offset and length of CAIF packet while
- * taking care of the shared memory header.
- */
- frm_pck_ofs =
- frm_buf_ofs + SHM_HDR_LEN +
- (*(pbuf->desc_vptr + frm_buf_ofs));
- frm_pck_len =
- (pck_desc->frm_len - SHM_HDR_LEN -
- (*(pbuf->desc_vptr + frm_buf_ofs)));
-
- /* Check whether CAIF packet is within buffer limits */
- if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
- break;
-
- /* Get a suitable CAIF packet and copy in data. */
- skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
- frm_pck_len + 1);
-
- if (skb == NULL) {
- pr_info("OOM: Try next frame in descriptor\n");
- break;
- }
-
- p = skb_put(skb, frm_pck_len);
- memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
-
- skb->protocol = htons(ETH_P_CAIF);
- skb_reset_mac_header(skb);
- skb->dev = pshm_drv->pshm_dev->pshm_netdev;
-
- /* Push received packet up the stack. */
- ret = netif_rx_ni(skb);
-
- if (!ret) {
- pshm_drv->pshm_dev->pshm_netdev->stats.
- rx_packets++;
- pshm_drv->pshm_dev->pshm_netdev->stats.
- rx_bytes += pck_desc->frm_len;
- } else
- ++pshm_drv->pshm_dev->pshm_netdev->stats.
- rx_dropped;
- /* Move to next packet descriptor. */
- pck_desc++;
- }
-
- spin_lock_irqsave(&pshm_drv->lock, flags);
- list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
-
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- }
-
- /* Schedule the work queue. if required */
- if (!work_pending(&pshm_drv->shm_tx_work))
- queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
-
-}
-
-static void shm_tx_work_func(struct work_struct *tx_work)
-{
- u32 mbox_msg;
- unsigned int frmlen, avail_emptybuff, append = 0;
- unsigned long flags = 0;
- struct buf_list *pbuf = NULL;
- struct shmdrv_layer *pshm_drv;
- struct shm_caif_frm *frm;
- struct sk_buff *skb;
- struct shm_pck_desc *pck_desc;
- struct list_head *pos;
-
- pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
-
- do {
- /* Initialize mailbox message. */
- mbox_msg = 0x00;
- avail_emptybuff = 0;
-
- spin_lock_irqsave(&pshm_drv->lock, flags);
-
- /* Check for pending receive buffers. */
- if (!list_empty(&pshm_drv->rx_pend_list)) {
-
- pbuf = list_entry(pshm_drv->rx_pend_list.next,
- struct buf_list, list);
-
- list_del_init(&pbuf->list);
- list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
- /*
- * Value index is never changed,
- * so read access should be safe.
- */
- mbox_msg |= SHM_SET_EMPTY(pbuf->index);
- }
-
- skb = skb_peek(&pshm_drv->sk_qhead);
-
- if (skb == NULL)
- goto send_msg;
- /* Check the available no. of buffers in the empty list */
- list_for_each(pos, &pshm_drv->tx_empty_list)
- avail_emptybuff++;
-
- if ((avail_emptybuff < LOW_WATERMARK) &&
- pshm_drv->tx_empty_available) {
- /* Update blocking condition. */
- pshm_drv->tx_empty_available = 0;
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
- pshm_drv->cfdev.flowctrl
- (pshm_drv->pshm_dev->pshm_netdev,
- CAIF_FLOW_OFF);
- spin_lock_irqsave(&pshm_drv->lock, flags);
- }
- /*
- * We simply return back to the caller if we do not have space
- * either in Tx pending list or Tx empty list. In this case,
- * we hold the received skb in the skb list, waiting to
- * be transmitted once Tx buffers become available
- */
- if (list_empty(&pshm_drv->tx_empty_list))
- goto send_msg;
-
- /* Get the first free Tx buffer. */
- pbuf = list_entry(pshm_drv->tx_empty_list.next,
- struct buf_list, list);
- do {
- if (append) {
- skb = skb_peek(&pshm_drv->sk_qhead);
- if (skb == NULL)
- break;
- }
-
- frm = (struct shm_caif_frm *)
- (pbuf->desc_vptr + pbuf->frm_ofs);
-
- frm->hdr_ofs = 0;
- frmlen = 0;
- frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
-
- /* Add tail padding if needed. */
- if (frmlen % SHM_FRM_PAD_LEN)
- frmlen += SHM_FRM_PAD_LEN -
- (frmlen % SHM_FRM_PAD_LEN);
-
- /*
- * Verify that packet, header and additional padding
- * can fit within the buffer frame area.
- */
- if (frmlen >= (pbuf->len - pbuf->frm_ofs))
- break;
-
- if (!append) {
- list_del_init(&pbuf->list);
- append = 1;
- }
-
- skb = skb_dequeue(&pshm_drv->sk_qhead);
- if (skb == NULL)
- break;
- /* Copy in CAIF frame. */
- skb_copy_bits(skb, 0, pbuf->desc_vptr +
- pbuf->frm_ofs + SHM_HDR_LEN +
- frm->hdr_ofs, skb->len);
-
- pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
- pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
- frmlen;
- dev_kfree_skb_irq(skb);
-
- /* Fill in the shared memory packet descriptor area. */
- pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
- /* Forward to current frame. */
- pck_desc += pbuf->frames;
- pck_desc->frm_ofs = (pbuf->phy_addr -
- pshm_drv->shm_base_addr) +
- pbuf->frm_ofs;
- pck_desc->frm_len = frmlen;
- /* Terminate packet descriptor area. */
- pck_desc++;
- pck_desc->frm_ofs = 0;
- /* Update buffer parameters. */
- pbuf->frames++;
- pbuf->frm_ofs += frmlen + (frmlen % 32);
-
- } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
-
- /* Assign buffer as full. */
- list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
- append = 0;
- mbox_msg |= SHM_SET_FULL(pbuf->index);
-send_msg:
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- if (mbox_msg)
- pshm_drv->pshm_dev->pshmdev_mbxsend
- (pshm_drv->pshm_dev->shm_id, mbox_msg);
- } while (mbox_msg);
-}
-
-static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
-{
- struct shmdrv_layer *pshm_drv;
-
- pshm_drv = netdev_priv(shm_netdev);
-
- skb_queue_tail(&pshm_drv->sk_qhead, skb);
-
- /* Schedule Tx work queue. for deferred processing of skbs*/
- if (!work_pending(&pshm_drv->shm_tx_work))
- queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
-
- return 0;
-}
-
-static const struct net_device_ops netdev_ops = {
- .ndo_open = shm_netdev_open,
- .ndo_stop = shm_netdev_close,
- .ndo_start_xmit = shm_netdev_tx,
-};
-
-static void shm_netdev_setup(struct net_device *pshm_netdev)
-{
- struct shmdrv_layer *pshm_drv;
- pshm_netdev->netdev_ops = &netdev_ops;
-
- pshm_netdev->mtu = CAIF_MAX_MTU;
- pshm_netdev->type = ARPHRD_CAIF;
- pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
- pshm_netdev->tx_queue_len = 0;
- pshm_netdev->destructor = free_netdev;
-
- pshm_drv = netdev_priv(pshm_netdev);
-
- /* Initialize structures in a clean state. */
- memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
-
- pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
-}
-
-int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
-{
- int result, j;
- struct shmdrv_layer *pshm_drv = NULL;
-
- pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
- "cfshm%d", shm_netdev_setup);
- if (!pshm_dev->pshm_netdev)
- return -ENOMEM;
-
- pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
- pshm_drv->pshm_dev = pshm_dev;
-
- /*
- * Initialization starts with the verification of the
- * availability of MBX driver by calling its setup function.
- * MBX driver must be available by this time for proper
- * functioning of SHM driver.
- */
- if ((pshm_dev->pshmdev_mbxsetup
- (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
- pr_warn("Could not config. SHM Mailbox,"
- " Bailing out.....\n");
- free_netdev(pshm_dev->pshm_netdev);
- return -ENODEV;
- }
-
- skb_queue_head_init(&pshm_drv->sk_qhead);
-
- pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
- " INSTANCE AT pshm_drv =0x%p\n",
- pshm_drv->pshm_dev->shm_id, pshm_drv);
-
- if (pshm_dev->shm_total_sz <
- (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
-
- pr_warn("ERROR, Amount of available"
- " Phys. SHM cannot accommodate current SHM "
- "driver configuration, Bailing out ...\n");
- free_netdev(pshm_dev->pshm_netdev);
- return -ENOMEM;
- }
-
- pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
- pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
-
- if (pshm_dev->shm_loopback)
- pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
- else
- pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
- (NR_TX_BUF * TX_BUF_SZ);
-
- spin_lock_init(&pshm_drv->lock);
- INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
- INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
- INIT_LIST_HEAD(&pshm_drv->tx_full_list);
-
- INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
- INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
- INIT_LIST_HEAD(&pshm_drv->rx_full_list);
-
- INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
- INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
-
- pshm_drv->pshm_tx_workqueue =
- create_singlethread_workqueue("shm_tx_work");
- pshm_drv->pshm_rx_workqueue =
- create_singlethread_workqueue("shm_rx_work");
-
- for (j = 0; j < NR_TX_BUF; j++) {
- struct buf_list *tx_buf =
- kmalloc(sizeof(struct buf_list), GFP_KERNEL);
-
- if (tx_buf == NULL) {
- free_netdev(pshm_dev->pshm_netdev);
- return -ENOMEM;
- }
- tx_buf->index = j;
- tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
- tx_buf->len = TX_BUF_SZ;
- tx_buf->frames = 0;
- tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
-
- if (pshm_dev->shm_loopback)
- tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
- else
- /*
- * FIXME: the result of ioremap is not a pointer - arnd
- */
- tx_buf->desc_vptr =
- ioremap(tx_buf->phy_addr, TX_BUF_SZ);
-
- list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
- }
-
- for (j = 0; j < NR_RX_BUF; j++) {
- struct buf_list *rx_buf =
- kmalloc(sizeof(struct buf_list), GFP_KERNEL);
-
- if (rx_buf == NULL) {
- free_netdev(pshm_dev->pshm_netdev);
- return -ENOMEM;
- }
- rx_buf->index = j;
- rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
- rx_buf->len = RX_BUF_SZ;
-
- if (pshm_dev->shm_loopback)
- rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
- else
- rx_buf->desc_vptr =
- ioremap(rx_buf->phy_addr, RX_BUF_SZ);
- list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
- }
-
- pshm_drv->tx_empty_available = 1;
- result = register_netdev(pshm_dev->pshm_netdev);
- if (result)
- pr_warn("ERROR[%d], SHM could not, "
- "register with NW FRMWK Bailing out ...\n", result);
-
- return result;
-}
-
-void caif_shmcore_remove(struct net_device *pshm_netdev)
-{
- struct buf_list *pbuf;
- struct shmdrv_layer *pshm_drv = NULL;
-
- pshm_drv = netdev_priv(pshm_netdev);
-
- while (!(list_empty(&pshm_drv->tx_pend_list))) {
- pbuf =
- list_entry(pshm_drv->tx_pend_list.next,
- struct buf_list, list);
-
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- while (!(list_empty(&pshm_drv->tx_full_list))) {
- pbuf =
- list_entry(pshm_drv->tx_full_list.next,
- struct buf_list, list);
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- while (!(list_empty(&pshm_drv->tx_empty_list))) {
- pbuf =
- list_entry(pshm_drv->tx_empty_list.next,
- struct buf_list, list);
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- while (!(list_empty(&pshm_drv->rx_full_list))) {
- pbuf =
- list_entry(pshm_drv->tx_full_list.next,
- struct buf_list, list);
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- while (!(list_empty(&pshm_drv->rx_pend_list))) {
- pbuf =
- list_entry(pshm_drv->tx_pend_list.next,
- struct buf_list, list);
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- while (!(list_empty(&pshm_drv->rx_empty_list))) {
- pbuf =
- list_entry(pshm_drv->rx_empty_list.next,
- struct buf_list, list);
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- /* Destroy work queues. */
- destroy_workqueue(pshm_drv->pshm_tx_workqueue);
- destroy_workqueue(pshm_drv->pshm_rx_workqueue);
-
- unregister_netdev(pshm_netdev);
-}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9862b2e07644..e456b70933c2 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -65,7 +65,7 @@ config CAN_LEDS
config CAN_AT91
tristate "Atmel AT91 onchip CAN controller"
- depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9X5
+ depends on ARM
---help---
This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
and AT91SAM9X5 processors.
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 44f363792b59..db52f4414def 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
@@ -155,19 +156,20 @@ struct at91_priv {
canid_t mb0_id;
};
-static const struct at91_devtype_data at91_devtype_data[] = {
- [AT91_DEVTYPE_SAM9263] = {
- .rx_first = 1,
- .rx_split = 8,
- .rx_last = 11,
- .tx_shift = 2,
- },
- [AT91_DEVTYPE_SAM9X5] = {
- .rx_first = 0,
- .rx_split = 4,
- .rx_last = 5,
- .tx_shift = 1,
- },
+static const struct at91_devtype_data at91_at91sam9263_data = {
+ .rx_first = 1,
+ .rx_split = 8,
+ .rx_last = 11,
+ .tx_shift = 2,
+ .type = AT91_DEVTYPE_SAM9263,
+};
+
+static const struct at91_devtype_data at91_at91sam9x5_data = {
+ .rx_first = 0,
+ .rx_split = 4,
+ .rx_last = 5,
+ .tx_shift = 1,
+ .type = AT91_DEVTYPE_SAM9X5,
};
static const struct can_bittiming_const at91_bittiming_const = {
@@ -1249,10 +1251,42 @@ static struct attribute_group at91_sysfs_attr_group = {
.attrs = at91_sysfs_attrs,
};
+#if defined(CONFIG_OF)
+static const struct of_device_id at91_can_dt_ids[] = {
+ {
+ .compatible = "atmel,at91sam9x5-can",
+ .data = &at91_at91sam9x5_data,
+ }, {
+ .compatible = "atmel,at91sam9263-can",
+ .data = &at91_at91sam9263_data,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, at91_can_dt_ids);
+#else
+#define at91_can_dt_ids NULL
+#endif
+
+static const struct at91_devtype_data *at91_can_get_driver_data(struct platform_device *pdev)
+{
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_node(at91_can_dt_ids, pdev->dev.of_node);
+ if (!match) {
+ dev_err(&pdev->dev, "no matching node found in dtb\n");
+ return NULL;
+ }
+ return (const struct at91_devtype_data *)match->data;
+ }
+ return (const struct at91_devtype_data *)
+ platform_get_device_id(pdev)->driver_data;
+}
+
static int at91_can_probe(struct platform_device *pdev)
{
const struct at91_devtype_data *devtype_data;
- enum at91_devtype devtype;
struct net_device *dev;
struct at91_priv *priv;
struct resource *res;
@@ -1260,8 +1294,12 @@ static int at91_can_probe(struct platform_device *pdev)
void __iomem *addr;
int err, irq;
- devtype = pdev->id_entry->driver_data;
- devtype_data = &at91_devtype_data[devtype];
+ devtype_data = at91_can_get_driver_data(pdev);
+ if (!devtype_data) {
+ dev_err(&pdev->dev, "no driver data\n");
+ err = -ENODEV;
+ goto exit;
+ }
clk = clk_get(&pdev->dev, "can_clk");
if (IS_ERR(clk)) {
@@ -1310,7 +1348,6 @@ static int at91_can_probe(struct platform_device *pdev)
priv->dev = dev;
priv->reg_base = addr;
priv->devtype_data = *devtype_data;
- priv->devtype_data.type = devtype;
priv->clk = clk;
priv->pdata = pdev->dev.platform_data;
priv->mb0_id = 0x7ff;
@@ -1373,10 +1410,10 @@ static int at91_can_remove(struct platform_device *pdev)
static const struct platform_device_id at91_can_id_table[] = {
{
.name = "at91_can",
- .driver_data = AT91_DEVTYPE_SAM9263,
+ .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
}, {
.name = "at91sam9x5_can",
- .driver_data = AT91_DEVTYPE_SAM9X5,
+ .driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
}, {
/* sentinel */
}
@@ -1389,6 +1426,7 @@ static struct platform_driver at91_can_driver = {
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
+ .of_match_table = at91_can_dt_ids,
},
.id_table = at91_can_id_table,
};
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 6a0532176b69..d4a15e82bfc0 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -412,7 +412,7 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
return 0;
}
-irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
+static irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct bfin_can_priv *priv = netdev_priv(dev);
@@ -504,7 +504,7 @@ static int bfin_can_close(struct net_device *dev)
return 0;
}
-struct net_device *alloc_bfin_candev(void)
+static struct net_device *alloc_bfin_candev(void)
{
struct net_device *dev;
struct bfin_can_priv *priv;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index f32b9fc6a983..2b620c8aa13a 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -1138,9 +1138,11 @@ static int mcp251x_can_remove(struct spi_device *spi)
return 0;
}
-#ifdef CONFIG_PM
-static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+
+static int mcp251x_can_suspend(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
struct net_device *net = priv->net;
@@ -1170,8 +1172,9 @@ static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
return 0;
}
-static int mcp251x_can_resume(struct spi_device *spi)
+static int mcp251x_can_resume(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
@@ -1191,9 +1194,13 @@ static int mcp251x_can_resume(struct spi_device *spi)
enable_irq(spi->irq);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
+ mcp251x_can_resume);
+#define MCP251X_PM_OPS (&mcp251x_can_pm_ops)
+
#else
-#define mcp251x_can_suspend NULL
-#define mcp251x_can_resume NULL
+#define MCP251X_PM_OPS NULL
#endif
static const struct spi_device_id mcp251x_id_table[] = {
@@ -1207,29 +1214,15 @@ MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
static struct spi_driver mcp251x_can_driver = {
.driver = {
.name = DEVICE_NAME,
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = MCP251X_PM_OPS,
},
.id_table = mcp251x_id_table,
.probe = mcp251x_can_probe,
.remove = mcp251x_can_remove,
- .suspend = mcp251x_can_suspend,
- .resume = mcp251x_can_resume,
};
-
-static int __init mcp251x_can_init(void)
-{
- return spi_register_driver(&mcp251x_can_driver);
-}
-
-static void __exit mcp251x_can_exit(void)
-{
- spi_unregister_driver(&mcp251x_can_driver);
-}
-
-module_init(mcp251x_can_init);
-module_exit(mcp251x_can_exit);
+module_spi_driver(mcp251x_can_driver);
MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
"Christian Pellegrin <chripell@evolware.org>");
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index a175d0be1ae1..ee705771bd2c 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -188,10 +188,9 @@ static int desc_list_init(struct net_device *dev)
/* allocate a new skb for next time receive */
new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
- if (!new_skb) {
- pr_notice("init: low on mem - packet dropped\n");
+ if (!new_skb)
goto init_error;
- }
+
skb_reserve(new_skb, NET_IP_ALIGN);
/* Invidate the data cache of skb->data range when it is write back
* cache. It will prevent overwritting the new data from DMA
@@ -1236,7 +1235,6 @@ static void bfin_mac_rx(struct net_device *dev)
new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
- netdev_notice(dev, "rx: low on mem - packet dropped\n");
dev->stats.rx_dropped++;
goto out;
}
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 0be2195e5034..269295403fc4 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1464,35 +1464,23 @@ static int greth_of_probe(struct platform_device *ofdev)
}
/* Allocate TX descriptor ring in coherent memory */
- greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
- 1024,
- &greth->tx_bd_base_phys,
- GFP_KERNEL);
-
+ greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
+ &greth->tx_bd_base_phys,
+ GFP_KERNEL | __GFP_ZERO);
if (!greth->tx_bd_base) {
- if (netif_msg_probe(greth))
- dev_err(&dev->dev, "could not allocate descriptor memory.\n");
err = -ENOMEM;
goto error3;
}
- memset(greth->tx_bd_base, 0, 1024);
-
/* Allocate RX descriptor ring in coherent memory */
- greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
- 1024,
- &greth->rx_bd_base_phys,
- GFP_KERNEL);
-
+ greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
+ &greth->rx_bd_base_phys,
+ GFP_KERNEL | __GFP_ZERO);
if (!greth->rx_bd_base) {
- if (netif_msg_probe(greth))
- dev_err(greth->dev, "could not allocate descriptor memory.\n");
err = -ENOMEM;
goto error4;
}
- memset(greth->rx_bd_base, 0, 1024);
-
/* Get MAC address from: module param, OF property or ID prom */
for (i = 0; i < 6; i++) {
if (macaddr[i] != 0)
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 6e722dc37db7..65926a956575 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -318,8 +318,6 @@ static int lance_rx (struct net_device *dev)
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
if (!skb) {
- printk ("%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 3789affbc0e5..0866e7627433 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -293,7 +293,6 @@ static int lance_rx(struct net_device *dev)
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
if (!skb) {
- netdev_warn(dev, "Memory squeeze, deferring packet\n");
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 60e2b701afe7..9793767996a2 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -528,7 +528,6 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
dev->stats.rx_packets++;
} else {
am_writeword (dev, hdraddr + 2, RMD_OWN);
- printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
break;
}
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 98f4522fd17b..c178eb4c8166 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -193,7 +193,6 @@ static int ariadne_rx(struct net_device *dev)
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- netdev_warn(dev, "Memory squeeze, deferring packet\n");
for (i = 0; i < RX_RING_SIZE; i++)
if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
break;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 84219df72f51..e8d0ef508f48 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -996,8 +996,6 @@ static int lance_rx( struct net_device *dev )
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
- dev->name ));
for( i = 0; i < RX_RING_SIZE; i++ )
if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
RMD1_OWN_CHIP)
@@ -1149,9 +1147,7 @@ static struct net_device *atarilance_dev;
static int __init atarilance_module_init(void)
{
atarilance_dev = atarilance_probe(-1);
- if (IS_ERR(atarilance_dev))
- return PTR_ERR(atarilance_dev);
- return 0;
+ return PTR_RET(atarilance_dev);
}
static void __exit atarilance_module_exit(void)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index de774d419144..688aede742c7 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -727,7 +727,6 @@ static int au1000_rx(struct net_device *dev)
frmlen -= 4; /* Remove FCS */
skb = netdev_alloc_skb(dev, frmlen + 2);
if (skb == NULL) {
- netdev_err(dev, "Memory squeeze, dropping packet.\n");
dev->stats.rx_dropped++;
continue;
}
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index baca0bd1b393..3d86ffeb4e15 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -607,8 +607,6 @@ static int lance_rx(struct net_device *dev)
skb = netdev_alloc_skb(dev, len + 2);
if (skb == 0) {
- printk("%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
*rds_ptr(rd, mblength, lp->type) = 0;
*rds_ptr(rd, rmd1, lp->type) =
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 9af3c307862c..a51497c9d2af 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -188,9 +188,7 @@ static struct net_device *dev_mvme147_lance;
int __init init_module(void)
{
dev_mvme147_lance = mvme147lance_probe(-1);
- if (IS_ERR(dev_mvme147_lance))
- return PTR_ERR(dev_mvme147_lance);
- return 0;
+ return PTR_RET(dev_mvme147_lance);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 013b65108536..26fc0ce0faa3 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -1238,7 +1238,7 @@ MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
int __init init_module(void)
{
dev_ni65 = ni65_probe(-1);
- return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
+ return PTR_RET(dev_ni65);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 797f847edf13..ed2130727643 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1166,7 +1166,6 @@ static void pcnet32_rx_entry(struct net_device *dev,
skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
if (skb == NULL) {
- netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
return;
}
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 74b3891b6483..4375abe61da1 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -812,9 +812,6 @@ static int lance_rx( struct net_device *dev )
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
- dev->name ));
-
dev->stats.rx_dropped++;
head->msg_length = 0;
head->flag |= RMD1_OWN_CHIP;
@@ -943,9 +940,7 @@ static struct net_device *sun3lance_dev;
int __init init_module(void)
{
sun3lance_dev = sun3lance_probe(-1);
- if (IS_ERR(sun3lance_dev))
- return PTR_ERR(sun3lance_dev);
- return 0;
+ return PTR_RET(sun3lance_dev);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 6a40290d3727..f47b780892e9 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -536,8 +536,6 @@ static void lance_rx_dvma(struct net_device *dev)
skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
@@ -708,8 +706,6 @@ static void lance_rx_pio(struct net_device *dev)
skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
sbus_writew(0, &rd->mblength);
sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
@@ -1377,10 +1373,9 @@ static int sparc_lance_probe_one(struct platform_device *op,
dma_alloc_coherent(&op->dev,
sizeof(struct lance_init_block),
&lp->init_block_dvma, GFP_ATOMIC);
- if (!lp->init_block_mem) {
- printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n");
+ if (!lp->init_block_mem)
goto fail;
- }
+
lp->pio_buffer = 0;
lp->init_ring = lance_init_ring_dvma;
lp->rx = lance_rx_dvma;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index a206779c68cf..4ce8ceb62205 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -386,20 +386,16 @@ static int mace_open(struct net_device *dev)
/* Allocate the DMA ring buffers */
mp->tx_ring = dma_alloc_coherent(mp->device,
- N_TX_RING * MACE_BUFF_SIZE,
- &mp->tx_ring_phys, GFP_KERNEL);
- if (mp->tx_ring == NULL) {
- printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
+ N_TX_RING * MACE_BUFF_SIZE,
+ &mp->tx_ring_phys, GFP_KERNEL);
+ if (mp->tx_ring == NULL)
goto out1;
- }
mp->rx_ring = dma_alloc_coherent(mp->device,
- N_RX_RING * MACE_BUFF_SIZE,
- &mp->rx_ring_phys, GFP_KERNEL);
- if (mp->rx_ring == NULL) {
- printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
+ N_RX_RING * MACE_BUFF_SIZE,
+ &mp->rx_ring_phys, GFP_KERNEL);
+ if (mp->rx_ring == NULL)
goto out2;
- }
mace_dma_off(dev);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index f73d5609439a..7e0a822289c3 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1420,11 +1420,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
RRS_PKT_SIZE_MASK) - 4; /* CRC */
skb = netdev_alloc_skb_ip_align(netdev, packet_size);
- if (skb == NULL) {
- netdev_warn(netdev,
- "Memory squeeze, deferring packet\n");
+ if (skb == NULL)
goto skip_pkt;
- }
+
memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
skb_put(skb, packet_size);
skb->protocol = eth_type_trans(skb, netdev);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 5b0d9931c720..9948fee28ae5 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2774,7 +2774,7 @@ static int atl1_close(struct net_device *netdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int atl1_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 1278b47022e0..a046b6ff847c 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -437,9 +437,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
/* alloc new buffer */
skb = netdev_alloc_skb_ip_align(netdev, rx_size);
if (NULL == skb) {
- printk(KERN_WARNING
- "%s: Mem squeeze, deferring packet.\n",
- netdev->name);
/*
* Check that some rx space is free. If not,
* free one and mark stats->rx_dropped++.
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 7d81e059e811..0b3e23ec37f7 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -862,27 +862,25 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!p) {
- dev_err(kdev, "cannot allocate rx ring %u\n", size);
ret = -ENOMEM;
goto out_freeirq_tx;
}
- memset(p, 0, size);
priv->rx_desc_alloc_size = size;
priv->rx_desc_cpu = p;
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!p) {
- dev_err(kdev, "cannot allocate tx ring\n");
ret = -ENOMEM;
goto out_free_rx_ring;
}
- memset(p, 0, size);
priv->tx_desc_alloc_size = size;
priv->tx_desc_cpu = p;
@@ -1619,7 +1617,6 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
struct mii_bus *bus;
const char *clk_name;
- unsigned int iomem_size;
int i, ret;
/* stop if shared driver failed, assume driver->probe will be
@@ -1644,17 +1641,12 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (ret)
goto out;
- iomem_size = resource_size(res_mem);
- if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
- ret = -EBUSY;
- goto out;
- }
-
- priv->base = ioremap(res_mem->start, iomem_size);
+ priv->base = devm_request_and_ioremap(&pdev->dev, res_mem);
if (priv->base == NULL) {
ret = -ENOMEM;
- goto out_release_mem;
+ goto out;
}
+
dev->irq = priv->irq = res_irq->start;
priv->irq_rx = res_irq_rx->start;
priv->irq_tx = res_irq_tx->start;
@@ -1674,9 +1666,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->mac_clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(priv->mac_clk)) {
ret = PTR_ERR(priv->mac_clk);
- goto out_unmap;
+ goto out;
}
- clk_enable(priv->mac_clk);
+ clk_prepare_enable(priv->mac_clk);
/* initialize default and fetch platform data */
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
@@ -1705,7 +1697,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->phy_clk = NULL;
goto out_put_clk_mac;
}
- clk_enable(priv->phy_clk);
+ clk_prepare_enable(priv->phy_clk);
}
/* do minimal hardware init to be able to probe mii bus */
@@ -1733,7 +1725,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
* if a slave is not present on hw */
bus->phy_mask = ~(1 << priv->phy_id);
- bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
+ GFP_KERNEL);
if (!bus->irq) {
ret = -ENOMEM;
goto out_free_mdio;
@@ -1794,10 +1787,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
return 0;
out_unregister_mdio:
- if (priv->mii_bus) {
+ if (priv->mii_bus)
mdiobus_unregister(priv->mii_bus);
- kfree(priv->mii_bus->irq);
- }
out_free_mdio:
if (priv->mii_bus)
@@ -1807,19 +1798,13 @@ out_uninit_hw:
/* turn off mdc clock */
enet_writel(priv, 0, ENET_MIISC_REG);
if (priv->phy_clk) {
- clk_disable(priv->phy_clk);
+ clk_disable_unprepare(priv->phy_clk);
clk_put(priv->phy_clk);
}
out_put_clk_mac:
- clk_disable(priv->mac_clk);
+ clk_disable_unprepare(priv->mac_clk);
clk_put(priv->mac_clk);
-
-out_unmap:
- iounmap(priv->base);
-
-out_release_mem:
- release_mem_region(res_mem->start, iomem_size);
out:
free_netdev(dev);
return ret;
@@ -1833,7 +1818,6 @@ static int bcm_enet_remove(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
- struct resource *res;
/* stop netdevice */
dev = platform_get_drvdata(pdev);
@@ -1845,7 +1829,6 @@ static int bcm_enet_remove(struct platform_device *pdev)
if (priv->has_phy) {
mdiobus_unregister(priv->mii_bus);
- kfree(priv->mii_bus->irq);
mdiobus_free(priv->mii_bus);
} else {
struct bcm63xx_enet_platform_data *pd;
@@ -1856,17 +1839,12 @@ static int bcm_enet_remove(struct platform_device *pdev)
bcm_enet_mdio_write_mii);
}
- /* release device resources */
- iounmap(priv->base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
/* disable hw block clocks */
if (priv->phy_clk) {
- clk_disable(priv->phy_clk);
+ clk_disable_unprepare(priv->phy_clk);
clk_put(priv->phy_clk);
}
- clk_disable(priv->mac_clk);
+ clk_disable_unprepare(priv->mac_clk);
clk_put(priv->mac_clk);
platform_set_drvdata(pdev, NULL);
@@ -1889,31 +1867,20 @@ struct platform_driver bcm63xx_enet_driver = {
static int bcm_enet_shared_probe(struct platform_device *pdev)
{
struct resource *res;
- unsigned int iomem_size;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- iomem_size = resource_size(res);
- if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
- return -EBUSY;
-
- bcm_enet_shared_base = ioremap(res->start, iomem_size);
- if (!bcm_enet_shared_base) {
- release_mem_region(res->start, iomem_size);
+ bcm_enet_shared_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!bcm_enet_shared_base)
return -ENOMEM;
- }
+
return 0;
}
static int bcm_enet_shared_remove(struct platform_device *pdev)
{
- struct resource *res;
-
- iounmap(bcm_enet_shared_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index da5f4397f87c..eec0af45b859 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/mii.h>
+#include <linux/phy.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <bcm47xx_nvram.h>
@@ -244,10 +245,8 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
/* Alloc skb */
slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
- if (!slot->skb) {
- bgmac_err(bgmac, "Allocation of skb failed!\n");
+ if (!slot->skb)
return -ENOMEM;
- }
/* Poison - if everything goes fine, hardware will overwrite it */
rx = (struct bgmac_rx_header *)slot->skb->data;
@@ -1313,6 +1312,73 @@ static const struct ethtool_ops bgmac_ethtool_ops = {
};
/**************************************************
+ * MII
+ **************************************************/
+
+static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ return bgmac_phy_read(bus->priv, mii_id, regnum);
+}
+
+static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ return bgmac_phy_write(bus->priv, mii_id, regnum, value);
+}
+
+static int bgmac_mii_register(struct bgmac *bgmac)
+{
+ struct mii_bus *mii_bus;
+ int i, err = 0;
+
+ mii_bus = mdiobus_alloc();
+ if (!mii_bus)
+ return -ENOMEM;
+
+ mii_bus->name = "bgmac mii bus";
+ sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
+ bgmac->core->core_unit);
+ mii_bus->priv = bgmac;
+ mii_bus->read = bgmac_mii_read;
+ mii_bus->write = bgmac_mii_write;
+ mii_bus->parent = &bgmac->core->dev;
+ mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
+
+ mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
+ if (!mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_free_bus;
+ }
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ mii_bus->irq[i] = PHY_POLL;
+
+ err = mdiobus_register(mii_bus);
+ if (err) {
+ bgmac_err(bgmac, "Registration of mii bus failed\n");
+ goto err_free_irq;
+ }
+
+ bgmac->mii_bus = mii_bus;
+
+ return err;
+
+err_free_irq:
+ kfree(mii_bus->irq);
+err_free_bus:
+ mdiobus_free(mii_bus);
+ return err;
+}
+
+static void bgmac_mii_unregister(struct bgmac *bgmac)
+{
+ struct mii_bus *mii_bus = bgmac->mii_bus;
+
+ mdiobus_unregister(mii_bus);
+ kfree(mii_bus->irq);
+ mdiobus_free(mii_bus);
+}
+
+/**************************************************
* BCMA bus ops
**************************************************/
@@ -1404,11 +1470,18 @@ static int bgmac_probe(struct bcma_device *core)
if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
+ err = bgmac_mii_register(bgmac);
+ if (err) {
+ bgmac_err(bgmac, "Cannot register MDIO\n");
+ err = -ENOTSUPP;
+ goto err_dma_free;
+ }
+
err = register_netdev(bgmac->net_dev);
if (err) {
bgmac_err(bgmac, "Cannot register net device\n");
err = -ENOTSUPP;
- goto err_dma_free;
+ goto err_mii_unregister;
}
netif_carrier_off(net_dev);
@@ -1417,6 +1490,8 @@ static int bgmac_probe(struct bcma_device *core)
return 0;
+err_mii_unregister:
+ bgmac_mii_unregister(bgmac);
err_dma_free:
bgmac_dma_free(bgmac);
@@ -1433,6 +1508,7 @@ static void bgmac_remove(struct bcma_device *core)
netif_napi_del(&bgmac->napi);
unregister_netdev(bgmac->net_dev);
+ bgmac_mii_unregister(bgmac);
bgmac_dma_free(bgmac);
bcma_set_drvdata(core, NULL);
free_netdev(bgmac->net_dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4ede614c81f8..98d4b5fcc070 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -399,6 +399,7 @@ struct bgmac {
struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */
struct net_device *net_dev;
struct napi_struct napi;
+ struct mii_bus *mii_bus;
/* DMA */
struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2f0ba8f2fd6c..e709296e3b85 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -854,12 +854,11 @@ bnx2_alloc_mem(struct bnx2 *bp)
sizeof(struct statistics_block);
status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
- &bp->status_blk_mapping, GFP_KERNEL);
+ &bp->status_blk_mapping,
+ GFP_KERNEL | __GFP_ZERO);
if (status_blk == NULL)
goto alloc_mem_err;
- memset(status_blk, 0, bp->status_stats_size);
-
bnapi = &bp->bnx2_napi[0];
bnapi->status_blk.msi = status_blk;
bnapi->hw_tx_cons_ptr =
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e4605a965084..c6303428f9e9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -492,7 +492,6 @@ enum bnx2x_tpa_mode_t {
struct bnx2x_fastpath {
struct bnx2x *bp; /* parent */
-#define BNX2X_NAPI_WEIGHT 128
struct napi_struct napi;
union host_hc_status_block status_blk;
/* chip independed shortcuts into sb structure */
@@ -613,9 +612,10 @@ struct bnx2x_fastpath {
* START_BD - describes packed
* START_BD(splitted) - includes unpaged data segment for GSO
* PARSING_BD - for TSO and CSUM data
+ * PARSING_BD2 - for encapsulation data
* Frag BDs - decribes pages for frags
*/
-#define BDS_PER_TX_PKT 3
+#define BDS_PER_TX_PKT 4
#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
/* max BDs per tx packet including next pages */
#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \
@@ -730,18 +730,24 @@ struct bnx2x_fastpath {
#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
skb->csum_offset))
-#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
+#define pbd_tcp_flags(tcp_hdr) (ntohl(tcp_flag_word(tcp_hdr))>>16 & 0xff)
-#define XMIT_PLAIN 0
-#define XMIT_CSUM_V4 0x1
-#define XMIT_CSUM_V6 0x2
-#define XMIT_CSUM_TCP 0x4
-#define XMIT_GSO_V4 0x8
-#define XMIT_GSO_V6 0x10
+#define XMIT_PLAIN 0
+#define XMIT_CSUM_V4 (1 << 0)
+#define XMIT_CSUM_V6 (1 << 1)
+#define XMIT_CSUM_TCP (1 << 2)
+#define XMIT_GSO_V4 (1 << 3)
+#define XMIT_GSO_V6 (1 << 4)
+#define XMIT_CSUM_ENC_V4 (1 << 5)
+#define XMIT_CSUM_ENC_V6 (1 << 6)
+#define XMIT_GSO_ENC_V4 (1 << 7)
+#define XMIT_GSO_ENC_V6 (1 << 8)
-#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6)
-#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6)
+#define XMIT_CSUM_ENC (XMIT_CSUM_ENC_V4 | XMIT_CSUM_ENC_V6)
+#define XMIT_GSO_ENC (XMIT_GSO_ENC_V4 | XMIT_GSO_ENC_V6)
+#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6 | XMIT_CSUM_ENC)
+#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6 | XMIT_GSO_ENC)
/* stuff added to make the code fit 80Col */
#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
@@ -1215,14 +1221,16 @@ enum {
BNX2X_SP_RTNL_ENABLE_SRIOV,
BNX2X_SP_RTNL_VFPF_MCAST,
BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ BNX2X_SP_RTNL_HYPERVISOR_VLAN,
};
struct bnx2x_prev_path_list {
+ struct list_head list;
u8 bus;
u8 slot;
u8 path;
- struct list_head list;
+ u8 aer;
u8 undi;
};
@@ -1269,6 +1277,8 @@ struct bnx2x {
#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
#ifdef CONFIG_BNX2X_SRIOV
+ /* protects vf2pf mailbox from simultaneous access */
+ struct mutex vf2pf_mutex;
/* vf pf channel mailbox contains request and response buffers */
struct bnx2x_vf_mbx_msg *vf2pf_mbox;
dma_addr_t vf2pf_mbox_mapping;
@@ -1281,6 +1291,8 @@ struct bnx2x {
dma_addr_t pf2vf_bulletin_mapping;
struct pf_vf_bulletin_content old_bulletin;
+
+ u16 requested_nr_virtfn;
#endif /* CONFIG_BNX2X_SRIOV */
struct net_device *dev;
@@ -1944,12 +1956,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
bool is_pf);
-#define BNX2X_ILT_ZALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x) \
- memset(x, 0, size); \
- } while (0)
+#define BNX2X_ILT_ZALLOC(x, y, size) \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
+ GFP_KERNEL | __GFP_ZERO)
#define BNX2X_ILT_FREE(x, y, size) \
do { \
@@ -2286,7 +2295,7 @@ static const u32 dmae_reg_go_c[] = {
DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
};
-void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev);
void bnx2x_notify_link_changed(struct bnx2x *bp);
#define BNX2X_MF_SD_PROTOCOL(bp) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4046f97378c2..352e58ede4d5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -451,7 +451,8 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
* Compute number of aggregated segments, and gso_type.
*/
static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
- u16 len_on_bd, unsigned int pkt_len)
+ u16 len_on_bd, unsigned int pkt_len,
+ u16 num_of_coalesced_segs)
{
/* TPA aggregation won't have either IP options or TCP options
* other than timestamp or IPv6 extension headers.
@@ -480,8 +481,7 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
* to skb_shinfo(skb)->gso_segs
*/
- NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len,
- skb_shinfo(skb)->gso_size);
+ NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
}
static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@ -537,7 +537,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* This is needed in order to enable forwarding support */
if (frag_size)
bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
- le16_to_cpu(cqe->pkt_len));
+ le16_to_cpu(cqe->pkt_len),
+ le16_to_cpu(cqe->num_of_coalesced_segs));
#ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
@@ -2009,7 +2010,7 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
* Cleans the object that have internal lists without sending
* ramrods. Should be run when interrutps are disabled.
*/
-static void bnx2x_squeeze_objects(struct bnx2x *bp)
+void bnx2x_squeeze_objects(struct bnx2x *bp)
{
int rc;
unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
@@ -2774,7 +2775,7 @@ load_error0:
#endif /* ! BNX2X_STOP_ON_ERROR */
}
-static int bnx2x_drain_tx_queues(struct bnx2x *bp)
+int bnx2x_drain_tx_queues(struct bnx2x *bp)
{
u8 rc = 0, cos, i;
@@ -3086,11 +3087,11 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
* to ease the pain of our fellow microcode engineers
* we use one mapping for both BDs
*/
-static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
- struct bnx2x_fp_txdata *txdata,
- struct sw_tx_bd *tx_buf,
- struct eth_tx_start_bd **tx_bd, u16 hlen,
- u16 bd_prod, int nbd)
+static u16 bnx2x_tx_split(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata,
+ struct sw_tx_bd *tx_buf,
+ struct eth_tx_start_bd **tx_bd, u16 hlen,
+ u16 bd_prod)
{
struct eth_tx_start_bd *h_tx_bd = *tx_bd;
struct eth_tx_bd *d_tx_bd;
@@ -3098,11 +3099,10 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
int old_len = le16_to_cpu(h_tx_bd->nbytes);
/* first fix first BD */
- h_tx_bd->nbd = cpu_to_le16(nbd);
h_tx_bd->nbytes = cpu_to_le16(hlen);
- DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
- h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
+ DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
+ h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
/* now get a new data BD
* (after the pbd) and fill it */
@@ -3131,7 +3131,7 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
-static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
{
__sum16 tsum = (__force __sum16) csum;
@@ -3146,30 +3146,47 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
return bswab16(tsum);
}
-static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
+static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
{
u32 rc;
+ __u8 prot = 0;
+ __be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
- rc = XMIT_PLAIN;
+ return XMIT_PLAIN;
- else {
- if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
- rc = XMIT_CSUM_V6;
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- rc |= XMIT_CSUM_TCP;
+ protocol = vlan_get_protocol(skb);
+ if (protocol == htons(ETH_P_IPV6)) {
+ rc = XMIT_CSUM_V6;
+ prot = ipv6_hdr(skb)->nexthdr;
+ } else {
+ rc = XMIT_CSUM_V4;
+ prot = ip_hdr(skb)->protocol;
+ }
+ if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
+ if (inner_ip_hdr(skb)->version == 6) {
+ rc |= XMIT_CSUM_ENC_V6;
+ if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
} else {
- rc = XMIT_CSUM_V4;
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ rc |= XMIT_CSUM_ENC_V4;
+ if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
rc |= XMIT_CSUM_TCP;
}
}
+ if (prot == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
- if (skb_is_gso_v6(skb))
- rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
- else if (skb_is_gso(skb))
- rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
+ if (skb_is_gso_v6(skb)) {
+ rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V6;
+ } else if (skb_is_gso(skb)) {
+ rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V4;
+ }
return rc;
}
@@ -3254,14 +3271,23 @@ exit_lbl:
}
#endif
-static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
- u32 xmit_type)
+static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+ u32 xmit_type)
{
+ struct ipv6hdr *ipv6;
+
*parsing_data |= (skb_shinfo(skb)->gso_size <<
ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
ETH_TX_PARSE_BD_E2_LSO_MSS;
- if ((xmit_type & XMIT_GSO_V6) &&
- (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+
+ if (xmit_type & XMIT_GSO_ENC_V6)
+ ipv6 = inner_ipv6_hdr(skb);
+ else if (xmit_type & XMIT_GSO_V6)
+ ipv6 = ipv6_hdr(skb);
+ else
+ ipv6 = NULL;
+
+ if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
@@ -3272,13 +3298,13 @@ static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
* @pbd: parse BD
* @xmit_type: xmit flags
*/
-static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
- struct eth_tx_parse_bd_e1x *pbd,
- u32 xmit_type)
+static void bnx2x_set_pbd_gso(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
{
pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
- pbd->tcp_flags = pbd_tcp_flags(skb);
+ pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
if (xmit_type & XMIT_GSO_V4) {
pbd->ip_id = bswab16(ip_hdr(skb)->id);
@@ -3298,6 +3324,40 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
}
/**
+ * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
+ *
+ * @bp: driver handle
+ * @skb: packet skb
+ * @parsing_data: data to be updated
+ * @xmit_type: xmit flags
+ *
+ * 57712/578xx related, when skb has encapsulation
+ */
+static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
+ u32 *parsing_data, u32 xmit_type)
+{
+ *parsing_data |=
+ ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
+
+ if (xmit_type & XMIT_CSUM_TCP) {
+ *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
+
+ return skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) - skb->data;
+ }
+
+ /* We support checksum offload for TCP and UDP only.
+ * No need to pass the UDP header length - it's a constant.
+ */
+ return skb_inner_transport_header(skb) +
+ sizeof(struct udphdr) - skb->data;
+}
+
+/**
* bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
*
* @bp: driver handle
@@ -3305,15 +3365,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
* @parsing_data: data to be updated
* @xmit_type: xmit flags
*
- * 57712 related
+ * 57712/578xx related
*/
-static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
- u32 *parsing_data, u32 xmit_type)
+static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
+ u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
if (xmit_type & XMIT_CSUM_TCP) {
*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
@@ -3328,17 +3388,15 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
}
-static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
+/* set FW indication according to inner or outer protocols if tunneled */
+static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_start_bd *tx_start_bd,
+ u32 xmit_type)
{
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
- if (xmit_type & XMIT_CSUM_V4)
- tx_start_bd->bd_flags.as_bitfield |=
- ETH_TX_BD_FLAGS_IP_CSUM;
- else
- tx_start_bd->bd_flags.as_bitfield |=
- ETH_TX_BD_FLAGS_IPV6;
+ if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
if (!(xmit_type & XMIT_CSUM_TCP))
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
@@ -3352,9 +3410,9 @@ static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
* @pbd: parse BD to be updated
* @xmit_type: xmit flags
*/
-static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_parse_bd_e1x *pbd,
- u32 xmit_type)
+static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
{
u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
@@ -3400,6 +3458,70 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
return hlen;
}
+static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e2 *pbd_e2,
+ struct eth_tx_parse_2nd_bd *pbd2,
+ u16 *global_data,
+ u32 xmit_type)
+{
+ u16 hlen_w = 0;
+ u8 outerip_off, outerip_len = 0;
+ /* from outer IP to transport */
+ hlen_w = (skb_inner_transport_header(skb) -
+ skb_network_header(skb)) >> 1;
+
+ /* transport len */
+ if (xmit_type & XMIT_CSUM_TCP)
+ hlen_w += inner_tcp_hdrlen(skb) >> 1;
+ else
+ hlen_w += sizeof(struct udphdr) >> 1;
+
+ pbd2->fw_ip_hdr_to_payload_w = hlen_w;
+
+ if (xmit_type & XMIT_CSUM_ENC_V4) {
+ struct iphdr *iph = ip_hdr(skb);
+ pbd2->fw_ip_csum_wo_len_flags_frag =
+ bswab16(csum_fold((~iph->check) -
+ iph->tot_len - iph->frag_off));
+ } else {
+ pbd2->fw_ip_hdr_to_payload_w =
+ hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
+ }
+
+ pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
+
+ pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
+
+ if (xmit_type & XMIT_GSO_V4) {
+ pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
+
+ pbd_e2->data.tunnel_data.pseudo_csum =
+ bswab16(~csum_tcpudp_magic(
+ inner_ip_hdr(skb)->saddr,
+ inner_ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+
+ outerip_len = ip_hdr(skb)->ihl << 1;
+ } else {
+ pbd_e2->data.tunnel_data.pseudo_csum =
+ bswab16(~csum_ipv6_magic(
+ &inner_ipv6_hdr(skb)->saddr,
+ &inner_ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+ }
+
+ outerip_off = (skb_network_header(skb) - skb->data) >> 1;
+
+ *global_data |=
+ outerip_off |
+ (!!(xmit_type & XMIT_CSUM_V6) <<
+ ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
+ (outerip_len <<
+ ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
+ ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+ ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
+}
+
/* called with netif_tx_lock
* bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
* netif_wake_queue()
@@ -3415,6 +3537,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ struct eth_tx_parse_2nd_bd *pbd2 = NULL;
u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
int nbd, txq_index;
@@ -3482,7 +3605,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mac_type = MULTICAST_ADDRESS;
}
-#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
/* First, check if we need to linearize the skb (due to FW
restrictions). No need to check fragmentation if page size > 8K
(there will be no violation to FW restrictions) */
@@ -3530,12 +3653,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
first_bd = tx_start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- SET_FLAG(tx_start_bd->general_data,
- ETH_TX_START_BD_PARSE_NBDS,
- 0);
- /* header nbd */
- SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
+ /* header nbd: indirectly zero other flags! */
+ tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
/* remember the first BD of the packet */
tx_buf->first_bd = txdata->tx_bd_prod;
@@ -3555,19 +3675,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* when transmitting in a vf, start bd must hold the ethertype
* for fw to enforce it
*/
-#ifndef BNX2X_STOP_ON_ERROR
- if (IS_VF(bp)) {
-#endif
+ if (IS_VF(bp))
tx_start_bd->vlan_or_ethertype =
cpu_to_le16(ntohs(eth->h_proto));
-#ifndef BNX2X_STOP_ON_ERROR
- } else {
+ else
/* used by FW for packet accounting */
tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
- }
-#endif
}
+ nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
+
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
@@ -3577,23 +3694,58 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!CHIP_IS_E1x(bp)) {
pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
- /* Set PBD in checksum offload case */
- if (xmit_type & XMIT_CSUM)
+
+ if (xmit_type & XMIT_CSUM_ENC) {
+ u16 global_data = 0;
+
+ /* Set PBD in enc checksum offload case */
+ hlen = bnx2x_set_pbd_csum_enc(bp, skb,
+ &pbd_e2_parsing_data,
+ xmit_type);
+
+ /* turn on 2nd parsing and get a BD */
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+ pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
+
+ memset(pbd2, 0, sizeof(*pbd2));
+
+ pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
+ (skb_inner_network_header(skb) -
+ skb->data) >> 1;
+
+ if (xmit_type & XMIT_GSO_ENC)
+ bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
+ &global_data,
+ xmit_type);
+
+ pbd2->global_data = cpu_to_le16(global_data);
+
+ /* add addition parse BD indication to start BD */
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_PARSE_NBDS, 1);
+ /* set encapsulation flag in start BD */
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_TUNNEL_EXIST, 1);
+ nbd++;
+ } else if (xmit_type & XMIT_CSUM) {
+ /* Set PBD in checksum offload case w/o encapsulation */
hlen = bnx2x_set_pbd_csum_e2(bp, skb,
&pbd_e2_parsing_data,
xmit_type);
+ }
- if (IS_MF_SI(bp) || IS_VF(bp)) {
- /* fill in the MAC addresses in the PBD - for local
- * switching
- */
- bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
- &pbd_e2->src_mac_addr_mid,
- &pbd_e2->src_mac_addr_lo,
+ /* Add the macs to the parsing BD this is a vf */
+ if (IS_VF(bp)) {
+ /* override GRE parameters in BD */
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
+ &pbd_e2->data.mac_addr.src_mid,
+ &pbd_e2->data.mac_addr.src_lo,
eth->h_source);
- bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
- &pbd_e2->dst_mac_addr_mid,
- &pbd_e2->dst_mac_addr_lo,
+
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
+ &pbd_e2->data.mac_addr.dst_mid,
+ &pbd_e2->data.mac_addr.dst_lo,
eth->h_dest);
}
@@ -3615,14 +3767,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Setup the data pointer of the first BD of the packet */
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
- nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
pkt_size = tx_start_bd->nbytes;
DP(NETIF_MSG_TX_QUEUED,
- "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
+ "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
- le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
+ le16_to_cpu(tx_start_bd->nbytes),
tx_start_bd->bd_flags.as_bitfield,
le16_to_cpu(tx_start_bd->vlan_or_ethertype));
@@ -3635,10 +3786,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
- if (unlikely(skb_headlen(skb) > hlen))
+ if (unlikely(skb_headlen(skb) > hlen)) {
+ nbd++;
bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
&tx_start_bd, hlen,
- bd_prod, ++nbd);
+ bd_prod);
+ }
if (!CHIP_IS_E1x(bp))
bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
xmit_type);
@@ -3728,9 +3881,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pbd_e2)
DP(NETIF_MSG_TX_QUEUED,
"PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
- pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
- pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
- pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
+ pbd_e2,
+ pbd_e2->data.mac_addr.dst_hi,
+ pbd_e2->data.mac_addr.dst_mid,
+ pbd_e2->data.mac_addr.dst_lo,
+ pbd_e2->data.mac_addr.src_hi,
+ pbd_e2->data.mac_addr.src_mid,
+ pbd_e2->data.mac_addr.src_lo,
pbd_e2->parsing_data);
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index aee7671ff4c1..54e1b149acb3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -50,13 +50,13 @@ extern int int_mode;
} \
} while (0)
-#define BNX2X_PCI_ALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- memset((void *)x, 0, size); \
- } while (0)
+#define BNX2X_PCI_ALLOC(x, y, size) \
+do { \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
+ GFP_KERNEL | __GFP_ZERO); \
+ if (x == NULL) \
+ goto alloc_mem_err; \
+} while (0)
#define BNX2X_ALLOC(x, size) \
do { \
@@ -496,7 +496,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
/* setup_tc callback */
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
+int bnx2x_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi);
int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
+int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
@@ -834,7 +837,7 @@ static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
/* Add NAPI objects */
for_each_rx_queue_cnic(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, BNX2X_NAPI_WEIGHT);
+ bnx2x_poll, NAPI_POLL_WEIGHT);
}
static inline void bnx2x_add_all_napi(struct bnx2x *bp)
@@ -844,7 +847,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
/* Add NAPI objects */
for_each_eth_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, BNX2X_NAPI_WEIGHT);
+ bnx2x_poll, NAPI_POLL_WEIGHT);
}
static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
@@ -970,6 +973,9 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
+ start_params->gre_tunnel_mode = IPGRE_TUNNEL;
+ start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
+
return bnx2x_func_state_change(bp, &func_params);
}
@@ -1396,4 +1402,8 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
*
*/
void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
+
+int bnx2x_drain_tx_queues(struct bnx2x *bp);
+void bnx2x_squeeze_objects(struct bnx2x *bp);
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index edfa67adf2f9..129d6b21317c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1393,10 +1393,9 @@ static int bnx2x_get_module_eeprom(struct net_device *dev,
u8 *data)
{
struct bnx2x *bp = netdev_priv(dev);
- int rc = 0, phy_idx;
+ int rc = -EINVAL, phy_idx;
u8 *user_data = data;
- int remaining_len = ee->len, xfer_size;
- unsigned int page_off = ee->offset;
+ unsigned int start_addr = ee->offset, xfer_size = 0;
if (!netif_running(dev)) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
@@ -1405,21 +1404,52 @@ static int bnx2x_get_module_eeprom(struct net_device *dev,
}
phy_idx = bnx2x_get_cur_phy_idx(bp);
- bnx2x_acquire_phy_lock(bp);
- while (!rc && remaining_len > 0) {
- xfer_size = (remaining_len > SFP_EEPROM_PAGE_SIZE) ?
- SFP_EEPROM_PAGE_SIZE : remaining_len;
+
+ /* Read A0 section */
+ if (start_addr < ETH_MODULE_SFF_8079_LEN) {
+ /* Limit transfer size to the A0 section boundary */
+ if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN)
+ xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr;
+ else
+ xfer_size = ee->len;
+ bnx2x_acquire_phy_lock(bp);
rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
&bp->link_params,
- page_off,
+ I2C_DEV_ADDR_A0,
+ start_addr,
xfer_size,
user_data);
- remaining_len -= xfer_size;
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n");
+
+ return -EINVAL;
+ }
user_data += xfer_size;
- page_off += xfer_size;
+ start_addr += xfer_size;
}
- bnx2x_release_phy_lock(bp);
+ /* Read A2 section */
+ if ((start_addr >= ETH_MODULE_SFF_8079_LEN) &&
+ (start_addr < ETH_MODULE_SFF_8472_LEN)) {
+ xfer_size = ee->len - xfer_size;
+ /* Limit transfer size to the A2 section boundary */
+ if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN)
+ xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr;
+ start_addr -= ETH_MODULE_SFF_8079_LEN;
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A2,
+ start_addr,
+ xfer_size,
+ user_data);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n");
+ return -EINVAL;
+ }
+ }
return rc;
}
@@ -1427,24 +1457,50 @@ static int bnx2x_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
struct bnx2x *bp = netdev_priv(dev);
- int phy_idx;
+ int phy_idx, rc;
+ u8 sff8472_comp, diag_type;
+
if (!netif_running(dev)) {
- DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"cannot access eeprom when the interface is down\n");
return -EAGAIN;
}
-
phy_idx = bnx2x_get_cur_phy_idx(bp);
- switch (bp->link_params.phy[phy_idx].media_type) {
- case ETH_PHY_SFPP_10G_FIBER:
- case ETH_PHY_SFP_1G_FIBER:
- case ETH_PHY_DA_TWINAX:
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A0,
+ SFP_EEPROM_SFF_8472_COMP_ADDR,
+ SFP_EEPROM_SFF_8472_COMP_SIZE,
+ &sff8472_comp);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n");
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A0,
+ SFP_EEPROM_DIAG_TYPE_ADDR,
+ SFP_EEPROM_DIAG_TYPE_SIZE,
+ &diag_type);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n");
+ return -EINVAL;
+ }
+
+ if (!sff8472_comp ||
+ (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) {
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- return 0;
- default:
- return -EOPNOTSUPP;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
}
+ return 0;
}
static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
@@ -3232,7 +3288,32 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
};
-void bnx2x_set_ethtool_ops(struct net_device *netdev)
+static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
+ .get_settings = bnx2x_get_settings,
+ .set_settings = bnx2x_set_settings,
+ .get_drvinfo = bnx2x_get_drvinfo,
+ .get_msglevel = bnx2x_get_msglevel,
+ .set_msglevel = bnx2x_set_msglevel,
+ .get_link = bnx2x_get_link,
+ .get_coalesce = bnx2x_get_coalesce,
+ .get_ringparam = bnx2x_get_ringparam,
+ .set_ringparam = bnx2x_set_ringparam,
+ .get_sset_count = bnx2x_get_sset_count,
+ .get_strings = bnx2x_get_strings,
+ .get_ethtool_stats = bnx2x_get_ethtool_stats,
+ .get_rxnfc = bnx2x_get_rxnfc,
+ .set_rxnfc = bnx2x_set_rxnfc,
+ .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
+ .get_rxfh_indir = bnx2x_get_rxfh_indir,
+ .set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_channels = bnx2x_get_channels,
+ .set_channels = bnx2x_set_channels,
+};
+
+void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
+ if (IS_PF(bp))
+ SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
+ else /* vf */
+ SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index e5f808377c91..40f22c6794cd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -30,31 +30,31 @@
* IRO[138].m2) + ((sbId) * IRO[138].m3))
#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[316].base + ((pfId) * IRO[316].m1))
-#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[317].base + ((pfId) * IRO[317].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[318].base + ((pfId) * IRO[318].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
- (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
+ (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+ (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+ (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
- (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
+ (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
- (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
+ (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
- (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
+ (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
- (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+ (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[315].base + ((pfId) * IRO[315].m1))
+ (IRO[316].base + ((pfId) * IRO[316].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[307].base + ((pfId) * IRO[307].m1))
+ (IRO[308].base + ((pfId) * IRO[308].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[306].base + ((pfId) * IRO[306].m1))
+ (IRO[307].base + ((pfId) * IRO[307].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[305].base + ((pfId) * IRO[305].m1))
+ (IRO[306].base + ((pfId) * IRO[306].m1))
#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[151].base + ((funcId) * IRO[151].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
@@ -114,7 +114,7 @@
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[268].base + ((pfId) * IRO[268].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
- (IRO[277].base + ((pfId) * IRO[277].m1))
+ (IRO[278].base + ((pfId) * IRO[278].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[264].base + ((pfId) * IRO[264].m1))
#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
@@ -136,35 +136,32 @@
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[176].base + ((assertListEntry) * IRO[176].m1))
-#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
- (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
- IRO[205].m2))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[183].base + ((portId) * IRO[183].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
- (IRO[318].base + ((pfId) * IRO[318].m1))
+ (IRO[319].base + ((pfId) * IRO[319].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[178].base + ((funcId) * IRO[178].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[282].base + ((pfId) * IRO[282].m1))
-#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[283].base + ((pfId) * IRO[283].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[284].base + ((pfId) * IRO[284].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[287].base + ((pfId) * IRO[287].m1))
+ (IRO[288].base + ((pfId) * IRO[288].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
- (IRO[284].base + ((pfId) * IRO[284].m1))
+ (IRO[285].base + ((pfId) * IRO[285].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[280].base + ((pfId) * IRO[280].m1))
+ (IRO[281].base + ((pfId) * IRO[281].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[279].base + ((pfId) * IRO[279].m1))
+ (IRO[280].base + ((pfId) * IRO[280].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[278].base + ((pfId) * IRO[278].m1))
+ (IRO[279].base + ((pfId) * IRO[279].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[281].base + ((pfId) * IRO[281].m1))
+ (IRO[282].base + ((pfId) * IRO[282].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
- (IRO[285].base + ((pfId) * IRO[285].m1))
-#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[286].base + ((pfId) * IRO[286].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[287].base + ((pfId) * IRO[287].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IRO[182].base + ((pfId) * IRO[182].m1))
#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -190,39 +187,39 @@
#define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[295].base + ((pfId) * IRO[295].m1))
+ (IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
- (IRO[298].base + ((pfId) * IRO[298].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[299].base + ((pfId) * IRO[299].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[300].base + ((pfId) * IRO[300].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[301].base + ((pfId) * IRO[301].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[302].base + ((pfId) * IRO[302].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[303].base + ((pfId) * IRO[303].m1))
-#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[304].base + ((pfId) * IRO[304].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+ (IRO[305].base + ((pfId) * IRO[305].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[294].base + ((pfId) * IRO[294].m1))
+ (IRO[295].base + ((pfId) * IRO[295].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[293].base + ((pfId) * IRO[293].m1))
+ (IRO[294].base + ((pfId) * IRO[294].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[292].base + ((pfId) * IRO[292].m1))
+ (IRO[293].base + ((pfId) * IRO[293].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[297].base + ((pfId) * IRO[297].m1))
+ (IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
- (IRO[296].base + ((pfId) * IRO[296].m1))
+ (IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
- (IRO[291].base + ((pfId) * IRO[291].m1))
+ (IRO[292].base + ((pfId) * IRO[292].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[290].base + ((pfId) * IRO[290].m1))
+ (IRO[291].base + ((pfId) * IRO[291].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
- (IRO[289].base + ((pfId) * IRO[289].m1))
+ (IRO[290].base + ((pfId) * IRO[290].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
- (IRO[288].base + ((pfId) * IRO[288].m1))
+ (IRO[289].base + ((pfId) * IRO[289].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IRO[44].base + ((pfId) * IRO[44].m1))
#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 037860ecc343..12f00a40cdf0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -114,6 +114,10 @@ struct license_key {
#define EPIO_CFG_EPIO30 0x0000001f
#define EPIO_CFG_EPIO31 0x00000020
+struct mac_addr {
+ u32 upper;
+ u32 lower;
+};
struct shared_hw_cfg { /* NVRAM Offset */
/* Up to 16 bytes of NULL-terminated string */
@@ -508,7 +512,22 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000
#define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001
- u32 reserved0[6]; /* 0x178 */
+ /* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2
+ * LOM recommended and tested value is 0xBEB2. Using a different
+ * value means using a value not tested by BRCM
+ */
+ u32 sfi_tap_values; /* 0x178 */
+ #define PORT_HW_CFG_TX_EQUALIZATION_MASK 0x0000FFFF
+ #define PORT_HW_CFG_TX_EQUALIZATION_SHIFT 0
+
+ /* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested
+ * value is 0x2. LOM recommended and tested value is 0x2. Using a
+ * different value means using a value not tested by BRCM
+ */
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16
+
+ u32 reserved0[5]; /* 0x17c */
u32 aeu_int_mask; /* 0x190 */
@@ -2821,8 +2840,8 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 8
-#define BCM_5710_FW_REVISION_VERSION 2
-#define BCM_5710_FW_ENGINEERING_VERSION 0
+#define BCM_5710_FW_REVISION_VERSION 17
+#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3513,11 +3532,14 @@ struct client_init_tx_data {
#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3)
#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
-#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
-#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
+#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF<<4)
+#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4
u8 default_vlan_flg;
u8 force_default_pri_flg;
- __le32 reserved3;
+ u8 tunnel_lso_inc_ip_id;
+ u8 refuse_outband_vlan_flg;
+ u8 tunnel_non_lso_pcsum_location;
+ u8 reserved1;
};
/*
@@ -3551,6 +3573,11 @@ struct client_update_ramrod_data {
__le16 silent_vlan_mask;
u8 silent_vlan_removal_flg;
u8 silent_vlan_change_flg;
+ u8 refuse_outband_vlan_flg;
+ u8 refuse_outband_vlan_change_flg;
+ u8 tx_switching_flg;
+ u8 tx_switching_change_flg;
+ __le32 reserved1;
__le32 echo;
};
@@ -3620,7 +3647,8 @@ struct eth_classify_header {
*/
struct eth_classify_mac_cmd {
struct eth_classify_cmd_header header;
- __le32 reserved0;
+ __le16 reserved0;
+ __le16 inner_mac;
__le16 mac_lsb;
__le16 mac_mid;
__le16 mac_msb;
@@ -3633,7 +3661,8 @@ struct eth_classify_mac_cmd {
*/
struct eth_classify_pair_cmd {
struct eth_classify_cmd_header header;
- __le32 reserved0;
+ __le16 reserved0;
+ __le16 inner_mac;
__le16 mac_lsb;
__le16 mac_mid;
__le16 mac_msb;
@@ -3855,8 +3884,68 @@ struct eth_halt_ramrod_data {
/*
- * Command for setting multicast classification for a client
+ * destination and source mac address.
+ */
+struct eth_mac_addresses {
+#if defined(__BIG_ENDIAN)
+ __le16 dst_mid;
+ __le16 dst_lo;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_lo;
+ __le16 dst_mid;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 src_lo;
+ __le16 dst_hi;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_hi;
+ __le16 src_lo;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 src_hi;
+ __le16 src_mid;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 src_mid;
+ __le16 src_hi;
+#endif
+};
+
+/* tunneling related data */
+struct eth_tunnel_data {
+#if defined(__BIG_ENDIAN)
+ __le16 dst_mid;
+ __le16 dst_lo;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_lo;
+ __le16 dst_mid;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 reserved0;
+ __le16 dst_hi;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_hi;
+ __le16 reserved0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 reserved1;
+ u8 ip_hdr_start_inner_w;
+ __le16 pseudo_csum;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 pseudo_csum;
+ u8 ip_hdr_start_inner_w;
+ u8 reserved1;
+#endif
+};
+
+/* union for mac addresses and for tunneling data.
+ * considered as tunneling data only if (tunnel_exist == 1).
*/
+union eth_mac_addr_or_tunnel_data {
+ struct eth_mac_addresses mac_addr;
+ struct eth_tunnel_data tunnel_data;
+};
+
+/*Command for setting multicast classification for a client */
struct eth_multicast_rules_cmd {
u8 cmd_general_data;
#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0)
@@ -3874,7 +3963,6 @@ struct eth_multicast_rules_cmd {
struct regpair reserved3;
};
-
/*
* parameters for multicast classification ramrod
*/
@@ -3883,7 +3971,6 @@ struct eth_multicast_rules_ramrod_data {
struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
};
-
/*
* Place holder for ramrods protocol specific data
*/
@@ -3947,11 +4034,14 @@ struct eth_rss_update_ramrod_data {
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6)
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6
#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
u8 rss_result_mask;
u8 rss_mode;
- __le32 __reserved2;
+ __le16 udp_4tuple_dst_port_mask;
+ __le16 udp_4tuple_dst_port_value;
u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
__le32 rss_key[T_ETH_RSS_KEY];
__le32 echo;
@@ -4115,6 +4205,23 @@ enum eth_tpa_update_command {
MAX_ETH_TPA_UPDATE_COMMAND
};
+/* In case of LSO over IPv4 tunnel, whether to increment
+ * IP ID on external IP header or internal IP header
+ */
+enum eth_tunnel_lso_inc_ip_id {
+ EXT_HEADER,
+ INT_HEADER,
+ MAX_ETH_TUNNEL_LSO_INC_IP_ID
+};
+
+/* In case tunnel exist and L4 checksum offload,
+ * the pseudo checksum location, on packet or on BD.
+ */
+enum eth_tunnel_non_lso_pcsum_location {
+ PCSUM_ON_PKT,
+ PCSUM_ON_BD,
+ MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION
+};
/*
* Tx regular BD structure
@@ -4166,8 +4273,8 @@ struct eth_tx_start_bd {
#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
-#define ETH_TX_START_BD_RESREVED (0x1<<7)
-#define ETH_TX_START_BD_RESREVED_SHIFT 7
+#define ETH_TX_START_BD_TUNNEL_EXIST (0x1<<7)
+#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7
};
/*
@@ -4216,15 +4323,10 @@ struct eth_tx_parse_bd_e1x {
* Tx parsing BD structure for ETH E2
*/
struct eth_tx_parse_bd_e2 {
- __le16 dst_mac_addr_lo;
- __le16 dst_mac_addr_mid;
- __le16 dst_mac_addr_hi;
- __le16 src_mac_addr_lo;
- __le16 src_mac_addr_mid;
- __le16 src_mac_addr_hi;
+ union eth_mac_addr_or_tunnel_data data;
__le32 parsing_data;
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x7FF<<0)
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF<<0)
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0
#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11)
#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15)
@@ -4236,8 +4338,51 @@ struct eth_tx_parse_bd_e2 {
};
/*
- * The last BD in the BD memory will hold a pointer to the next BD memory
+ * Tx 2nd parsing BD structure for ETH packet
*/
+struct eth_tx_parse_2nd_bd {
+ __le16 global_data;
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5)
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6)
+#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1<<7)
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8
+#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13)
+#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13
+ __le16 reserved1;
+ u8 tcp_flags;
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0)
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1<<1)
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1<<2)
+#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1<<3)
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1<<5)
+#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1<<6)
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7)
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7
+ u8 reserved2;
+ u8 tunnel_udp_hdr_start_w;
+ u8 fw_ip_hdr_to_payload_w;
+ __le16 fw_ip_csum_wo_len_flags_frag;
+ __le16 hw_ip_id;
+ __le32 tcp_send_seq;
+};
+
+/* The last BD in the BD memory will hold a pointer to the next BD memory */
struct eth_tx_next_bd {
__le32 addr_lo;
__le32 addr_hi;
@@ -4252,6 +4397,7 @@ union eth_tx_bd_types {
struct eth_tx_bd reg_bd;
struct eth_tx_parse_bd_e1x parse_bd_e1x;
struct eth_tx_parse_bd_e2 parse_bd_e2;
+ struct eth_tx_parse_2nd_bd parse_2nd_bd;
struct eth_tx_next_bd next_bd;
};
@@ -4663,10 +4809,10 @@ enum common_spqe_cmd_id {
RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
RAMROD_CMD_ID_COMMON_START_TRAFFIC,
RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
+ RAMROD_CMD_ID_COMMON_SET_TIMESYNC,
MAX_COMMON_SPQE_CMD_ID
};
-
/*
* Per-protocol connection types
*/
@@ -4863,7 +5009,7 @@ struct vf_flr_event_data {
*/
struct malicious_vf_event_data {
u8 vf_id;
- u8 reserved0;
+ u8 err_id;
u16 reserved1;
u32 reserved2;
u32 reserved3;
@@ -4969,10 +5115,10 @@ enum event_ring_opcode {
EVENT_RING_OPCODE_CLASSIFICATION_RULES,
EVENT_RING_OPCODE_FILTERS_RULES,
EVENT_RING_OPCODE_MULTICAST_RULES,
+ EVENT_RING_OPCODE_SET_TIMESYNC,
MAX_EVENT_RING_OPCODE
};
-
/*
* Modes for fairness algorithm
*/
@@ -5010,14 +5156,18 @@ struct flow_control_configuration {
*/
struct function_start_data {
u8 function_mode;
- u8 reserved;
+ u8 allow_npar_tx_switching;
__le16 sd_vlan_tag;
__le16 vif_id;
u8 path_id;
u8 network_cos_mode;
+ u8 dmae_cmd_id;
+ u8 gre_tunnel_mode;
+ u8 gre_tunnel_rss;
+ u8 nvgre_clss_en;
+ __le16 reserved1[2];
};
-
struct function_update_data {
u8 vif_id_change_flg;
u8 afex_default_vlan_change_flg;
@@ -5027,14 +5177,19 @@ struct function_update_data {
__le16 afex_default_vlan;
u8 allowed_priorities;
u8 network_cos_mode;
+ u8 lb_mode_en_change_flg;
u8 lb_mode_en;
u8 tx_switch_suspend_change_flg;
u8 tx_switch_suspend;
u8 echo;
- __le16 reserved1;
+ u8 reserved1;
+ u8 update_gre_cfg_flg;
+ u8 gre_tunnel_mode;
+ u8 gre_tunnel_rss;
+ u8 nvgre_clss_en;
+ u32 reserved3;
};
-
/*
* FW version stored in the Xstorm RAM
*/
@@ -5061,6 +5216,22 @@ struct fw_version {
#define __FW_VERSION_RESERVED_SHIFT 4
};
+/* GRE RSS Mode */
+enum gre_rss_mode {
+ GRE_OUTER_HEADERS_RSS,
+ GRE_INNER_HEADERS_RSS,
+ NVGRE_KEY_ENTROPY_RSS,
+ MAX_GRE_RSS_MODE
+};
+
+/* GRE Tunnel Mode */
+enum gre_tunnel_type {
+ NO_GRE_TUNNEL,
+ NVGRE_TUNNEL,
+ L2GRE_TUNNEL,
+ IPGRE_TUNNEL,
+ MAX_GRE_TUNNEL_TYPE
+};
/*
* Dynamic Host-Coalescing - Driver(host) counters
@@ -5224,6 +5395,26 @@ enum ip_ver {
MAX_IP_VER
};
+/*
+ * Malicious VF error ID
+ */
+enum malicious_vf_error_id {
+ VF_PF_CHANNEL_NOT_READY,
+ ETH_ILLEGAL_BD_LENGTHS,
+ ETH_PACKET_TOO_SHORT,
+ ETH_PAYLOAD_TOO_BIG,
+ ETH_ILLEGAL_ETH_TYPE,
+ ETH_ILLEGAL_LSO_HDR_LEN,
+ ETH_TOO_MANY_BDS,
+ ETH_ZERO_HDR_NBDS,
+ ETH_START_BD_NOT_SET,
+ ETH_ILLEGAL_PARSE_NBDS,
+ ETH_IPV6_AND_CHECKSUM,
+ ETH_VLAN_FLG_INCORRECT,
+ ETH_ILLEGAL_LSO_MSS,
+ ETH_TUNNEL_NOT_SUPPORTED,
+ MAX_MALICIOUS_VF_ERROR_ID
+};
/*
* Multi-function modes
@@ -5368,7 +5559,6 @@ struct protocol_common_spe {
union protocol_common_specific_data data;
};
-
/*
* The send queue element
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 77ebae0ac64a..6cc6c6374a92 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -27,6 +27,10 @@
#include "bnx2x.h"
#include "bnx2x_cmn.h"
+typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8);
/********************************************************/
#define ETH_HLEN 14
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
@@ -152,6 +156,7 @@
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
#define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
#define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
+ #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
#define SFP_EEPROM_COMP_CODE_ADDR 0x3
@@ -3127,11 +3132,6 @@ static int bnx2x_bsc_read(struct link_params *params,
int rc = 0;
struct bnx2x *bp = params->bp;
- if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) {
- DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid);
- return -EINVAL;
- }
-
if (xfer_cnt > 16) {
DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
xfer_cnt);
@@ -3629,6 +3629,16 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
* init configuration, and set/clear SGMII flag. Internal
* phy init is done purely in phy_init stage.
*/
+#define WC_TX_DRIVER(post2, idriver, ipre) \
+ ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \
+ (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \
+ (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))
+
+#define WC_TX_FIR(post, main, pre) \
+ ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \
+ (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \
+ (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET))
+
static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -3728,7 +3738,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
if (((vars->line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
(vars->line_speed == SPEED_1000)) {
- u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
+ u16 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
an_adv |= (1<<5);
/* Enable CL37 1G Parallel Detect */
@@ -3753,20 +3763,13 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Set Transmit PMD settings */
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
- ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ WC_TX_DRIVER(0x02, 0x06, 0x09));
/* Configure the next lane if dual mode */
if (phy->flags & FLAGS_WC_DUAL_MODE)
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
- ((0x02 <<
- MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x06 <<
- MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x09 <<
- MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ WC_TX_DRIVER(0x02, 0x06, 0x09));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
0x03f0);
@@ -3909,6 +3912,8 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 misc1_val, tap_val, tx_driver_val, lane, val;
+ u32 cfg_tap_val, tx_drv_brdct, tx_equal;
+
/* Hold rxSeqStart */
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
@@ -3952,23 +3957,33 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
if (is_xfi) {
misc1_val |= 0x5;
- tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
- tx_driver_val =
- ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
-
+ tap_val = WC_TX_FIR(0x08, 0x37, 0x00);
+ tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03);
} else {
+ cfg_tap_val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].
+ sfi_tap_values));
+
+ tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK;
+
+ tx_drv_brdct = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
+ PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
+
misc1_val |= 0x9;
- tap_val = ((0x0f << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x2b << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x02 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
- tx_driver_val =
- ((0x03 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+
+ /* TAP values are controlled by nvram, if value there isn't 0 */
+ if (tx_equal)
+ tap_val = (u16)tx_equal;
+ else
+ tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02);
+
+ if (tx_drv_brdct)
+ tx_driver_val = WC_TX_DRIVER(0x03, (u16)tx_drv_brdct,
+ 0x06);
+ else
+ tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06);
}
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
@@ -4105,15 +4120,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
/* Set Transmit PMD settings */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX_FIR_TAP,
- ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) |
- MDIO_WC_REG_TX_FIR_TAP_ENABLE));
+ (WC_TX_FIR(0x12, 0x2d, 0x00) |
+ MDIO_WC_REG_TX_FIR_TAP_ENABLE));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
- ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ WC_TX_DRIVER(0x02, 0x02, 0x02));
}
static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
@@ -4750,8 +4761,8 @@ void bnx2x_link_status_update(struct link_params *params,
port_mb[port].link_status));
/* Force link UP in non LOOPBACK_EXT loopback mode(s) */
- if (bp->link_params.loopback_mode != LOOPBACK_NONE &&
- bp->link_params.loopback_mode != LOOPBACK_EXT)
+ if (params->loopback_mode != LOOPBACK_NONE &&
+ params->loopback_mode != LOOPBACK_EXT)
vars->link_status |= LINK_STATUS_LINK_UP;
if (bnx2x_eee_has_cap(params))
@@ -7758,7 +7769,8 @@ static void bnx2x_sfp_set_transmitter(struct link_params *params,
static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8 is_init)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
@@ -7771,7 +7783,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Set the read command byte count */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
- (byte_cnt | 0xa000));
+ (byte_cnt | (dev_addr << 8)));
/* Set the read command address */
bnx2x_cl45_write(bp, phy,
@@ -7845,6 +7857,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
}
static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
+ u8 dev_addr,
u16 addr, u8 byte_cnt,
u8 *o_buf, u8 is_init)
{
@@ -7869,7 +7882,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
usleep_range(1000, 2000);
bnx2x_warpcore_power_module(params, 1);
}
- rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
+ rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt,
data_array);
} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -7885,7 +7898,8 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8 is_init)
{
struct bnx2x *bp = params->bp;
u16 val, i;
@@ -7896,6 +7910,15 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
+ /* Set 2-wire transfer rate of SFP+ module EEPROM
+ * to 100Khz since some DACs(direct attached cables) do
+ * not work at 400Khz.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+ ((dev_addr << 8) | 1));
+
/* Need to read from 1.8000 to clear it */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
@@ -7968,26 +7991,44 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
-
int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf)
+ struct link_params *params, u8 dev_addr,
+ u16 addr, u16 byte_cnt, u8 *o_buf)
{
- int rc = -EOPNOTSUPP;
+ int rc = 0;
+ struct bnx2x *bp = params->bp;
+ u8 xfer_size;
+ u8 *user_data = o_buf;
+ read_sfp_module_eeprom_func_p read_func;
+
+ if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) {
+ DP(NETIF_MSG_LINK, "invalid dev_addr 0x%x\n", dev_addr);
+ return -EINVAL;
+ }
+
switch (phy->type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
- break;
+ read_func = bnx2x_8726_read_sfp_module_eeprom;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
- rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
- break;
+ read_func = bnx2x_8727_read_sfp_module_eeprom;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf, 0);
- break;
+ read_func = bnx2x_warpcore_read_sfp_module_eeprom;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ while (!rc && (byte_cnt > 0)) {
+ xfer_size = (byte_cnt > SFP_EEPROM_PAGE_SIZE) ?
+ SFP_EEPROM_PAGE_SIZE : byte_cnt;
+ rc = read_func(phy, params, dev_addr, addr, xfer_size,
+ user_data, 0);
+ byte_cnt -= xfer_size;
+ user_data += xfer_size;
+ addr += xfer_size;
}
return rc;
}
@@ -8004,6 +8045,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
/* First check for copper cable */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_CON_TYPE_ADDR,
2,
(u8 *)val) != 0) {
@@ -8021,6 +8063,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
*/
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_FC_TX_TECH_ADDR,
1,
&copper_module_type) != 0) {
@@ -8049,20 +8092,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
break;
}
case SFP_EEPROM_CON_TYPE_VAL_LC:
+ case SFP_EEPROM_CON_TYPE_VAL_RJ45:
check_limiting_mode = 1;
if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
SFP_EEPROM_COMP_CODE_LR_MASK |
SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
- DP(NETIF_MSG_LINK, "1G Optic module detected\n");
+ DP(NETIF_MSG_LINK, "1G SFP module detected\n");
gport = params->port;
phy->media_type = ETH_PHY_SFP_1G_FIBER;
- phy->req_line_speed = SPEED_1000;
- if (!CHIP_IS_E1x(bp))
- gport = BP_PATH(bp) + (params->port << 1);
- netdev_err(bp->dev, "Warning: Link speed was forced to 1000Mbps."
- " Current SFP module in port %d is not"
- " compliant with 10G Ethernet\n",
- gport);
+ if (phy->req_line_speed != SPEED_1000) {
+ phy->req_line_speed = SPEED_1000;
+ if (!CHIP_IS_E1x(bp)) {
+ gport = BP_PATH(bp) +
+ (params->port << 1);
+ }
+ netdev_err(bp->dev,
+ "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
+ gport);
+ }
} else {
int idx, cfg_idx = 0;
DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8101,6 +8148,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
u8 options[SFP_EEPROM_OPTIONS_SIZE];
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_OPTIONS_ADDR,
SFP_EEPROM_OPTIONS_SIZE,
options) != 0) {
@@ -8167,6 +8215,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
/* Format the warning message */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_VENDOR_NAME_ADDR,
SFP_EEPROM_VENDOR_NAME_SIZE,
(u8 *)vendor_name))
@@ -8175,6 +8224,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_PART_NO_ADDR,
SFP_EEPROM_PART_NO_SIZE,
(u8 *)vendor_pn))
@@ -8205,12 +8255,13 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
for (timeout = 0; timeout < 60; timeout++) {
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
- rc = bnx2x_warpcore_read_sfp_module_eeprom(phy,
- params, 1,
- 1, &val, 1);
+ rc = bnx2x_warpcore_read_sfp_module_eeprom(
+ phy, params, I2C_DEV_ADDR_A0, 1, 1, &val,
+ 1);
else
- rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1,
- &val);
+ rc = bnx2x_read_sfp_module_eeprom(phy, params,
+ I2C_DEV_ADDR_A0,
+ 1, 1, &val);
if (rc == 0) {
DP(NETIF_MSG_LINK,
"SFP+ module initialization took %d ms\n",
@@ -8219,7 +8270,8 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
}
usleep_range(5000, 10000);
}
- rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val);
+ rc = bnx2x_read_sfp_module_eeprom(phy, params, I2C_DEV_ADDR_A0,
+ 1, 1, &val);
return rc;
}
@@ -8376,15 +8428,6 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
val);
-
- /* Set 2-wire transfer rate of SFP+ module EEPROM
- * to 100Khz since some DACs(direct attached cables) do
- * not work at 400Khz.
- */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
- 0xa001);
break;
default:
DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -9528,8 +9571,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
} else {
/* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
/* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- for (i = 0; i < ARRAY_SIZE(reg_set);
- i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad,
reg_set[i].reg, reg_set[i].val);
@@ -10281,7 +10323,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
/* Determine if EEE was negotiated */
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
bnx2x_eee_an_resolve(phy, params, vars);
}
@@ -12242,7 +12285,7 @@ static void bnx2x_init_bmac_loopback(struct link_params *params,
bnx2x_xgxs_deassert(params);
- /* set bmac loopback */
+ /* Set bmac loopback */
bnx2x_bmac_enable(params, vars, 1, 1);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
@@ -12261,7 +12304,7 @@ static void bnx2x_init_emac_loopback(struct link_params *params,
vars->phy_flags = PHY_XGXS_FLAG;
bnx2x_xgxs_deassert(params);
- /* set bmac loopback */
+ /* Set bmac loopback */
bnx2x_emac_enable(params, vars, 1);
bnx2x_emac_program(params, vars);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
@@ -12521,6 +12564,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
params->req_line_speed[0], params->req_flow_ctrl[0]);
DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
params->req_line_speed[1], params->req_flow_ctrl[1]);
+ DP(NETIF_MSG_LINK, "req_adv_flow_ctrl 0x%x\n", params->req_fc_auto_adv);
vars->link_status = 0;
vars->phy_link_up = 0;
vars->link_up = 0;
@@ -13446,8 +13490,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
}
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
- * since some switches tend to reinit the AN process and clear the
- * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * Since some switches tend to reinit the AN process and clear the
+ * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
* and recovered many times
*/
if (vars->check_kr2_recovery_cnt > 0) {
@@ -13465,8 +13509,10 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* CL73 has not begun yet */
if (base_page == 0) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
+ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
bnx2x_kr2_recovery(params, vars, phy);
+ DP(NETIF_MSG_LINK, "No BP\n");
+ }
return;
}
@@ -13482,7 +13528,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
if (!not_kr2_device) {
DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
- next_page);
+ next_page);
bnx2x_kr2_recovery(params, vars, phy);
}
return;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 56c2aae4e2c8..4df45234fdc0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -41,6 +41,9 @@
#define SPEED_AUTO_NEG 0
#define SPEED_20000 20000
+#define I2C_DEV_ADDR_A0 0xa0
+#define I2C_DEV_ADDR_A2 0xa2
+
#define SFP_EEPROM_PAGE_SIZE 16
#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
@@ -54,6 +57,15 @@
#define SFP_EEPROM_SERIAL_SIZE 16
#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */
#define SFP_EEPROM_DATE_SIZE 6
+#define SFP_EEPROM_DIAG_TYPE_ADDR 0x5c
+#define SFP_EEPROM_DIAG_TYPE_SIZE 1
+#define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2)
+#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
+#define SFP_EEPROM_SFF_8472_COMP_SIZE 1
+
+#define SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e
+#define SFP_EEPROM_A2_CC_DMI_ADDR 0x5f
+
#define PWR_FLT_ERR_MSG_LEN 250
#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -420,8 +432,8 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf);
+ struct link_params *params, u8 dev_addr,
+ u16 addr, u16 byte_cnt, u8 *o_buf);
void bnx2x_hw_reset_phy(struct link_params *params);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e81a747ea8ce..fdfe33bc097b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -75,8 +75,6 @@
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
-#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
-
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -2955,14 +2953,16 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
/* tx only connections collect statistics (on the same index as the
- * parent connection). The statistics are zeroed when the parent
- * connection is initialized.
+ * parent connection). The statistics are zeroed when the parent
+ * connection is initialized.
*/
__set_bit(BNX2X_Q_FLG_STATS, &flags);
if (zero_stats)
__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+ __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
+ __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
#ifdef BNX2X_STOP_ON_ERROR
__set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
@@ -3227,16 +3227,29 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
{
struct eth_stats_info *ether_stat =
&bp->slowpath->drv_info_to_mcp.ether_stat;
+ struct bnx2x_vlan_mac_obj *mac_obj =
+ &bp->sp_objs->mac_obj;
+ int i;
strlcpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN);
- bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
- DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
- ether_stat->mac_local);
-
+ /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
+ * mac_local field in ether_stat struct. The base address is offset by 2
+ * bytes to account for the field being 8 bytes but a mac address is
+ * only 6 bytes. Likewise, the stride for the get_n_elements function is
+ * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
+ * allocated by the ether_stat struct, so the macs will land in their
+ * proper positions.
+ */
+ for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
+ memset(ether_stat->mac_local + i, 0,
+ sizeof(ether_stat->mac_local[0]));
+ mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local + MAC_PAD, MAC_PAD,
+ ETH_ALEN);
ether_stat->mtu_size = bp->dev->mtu;
-
if (bp->dev->features & NETIF_F_RXCSUM)
ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
if (bp->dev->features & NETIF_F_TSO)
@@ -3258,8 +3271,7 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
if (!CNIC_LOADED(bp))
return;
- memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
- bp->fip_mac, ETH_ALEN);
+ memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
fcoe_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@ -3361,8 +3373,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
if (!CNIC_LOADED(bp))
return;
- memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
- bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+ memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
+ ETH_ALEN);
iscsi_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@ -6029,9 +6041,10 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
rmb();
bnx2x_init_rx_rings(bp);
bnx2x_init_tx_rings(bp);
-
- if (IS_VF(bp))
+ if (IS_VF(bp)) {
+ bnx2x_memset_stats(bp);
return;
+ }
/* Initialize MOD_ABS interrupts */
bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
@@ -9525,6 +9538,10 @@ sp_rtnl_not_reset:
bnx2x_vfpf_storm_rx_mode(bp);
}
+ if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ &bp->sp_rtnl_state))
+ bnx2x_pf_set_vfs_vlan(bp);
+
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
@@ -9532,8 +9549,10 @@ sp_rtnl_not_reset:
/* enable SR-IOV if applicable */
if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
- &bp->sp_rtnl_state))
+ &bp->sp_rtnl_state)) {
+ bnx2x_disable_sriov(bp);
bnx2x_enable_sriov(bp);
+ }
}
static void bnx2x_period_task(struct work_struct *work)
@@ -9701,6 +9720,31 @@ static struct bnx2x_prev_path_list *
return NULL;
}
+static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
+{
+ struct bnx2x_prev_path_list *tmp_list;
+ int rc;
+
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Received %d when tried to take lock\n", rc);
+ return rc;
+ }
+
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ tmp_list->aer = 1;
+ rc = 0;
+ } else {
+ BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
+ BP_PATH(bp));
+ }
+
+ up(&bnx2x_prev_sem);
+
+ return rc;
+}
+
static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
{
struct bnx2x_prev_path_list *tmp_list;
@@ -9709,14 +9753,15 @@ static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
if (down_trylock(&bnx2x_prev_sem))
return false;
- list_for_each_entry(tmp_list, &bnx2x_prev_list, list) {
- if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
- bp->pdev->bus->number == tmp_list->bus &&
- BP_PATH(bp) == tmp_list->path) {
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ if (tmp_list->aer) {
+ DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
+ BP_PATH(bp));
+ } else {
rc = true;
BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
BP_PATH(bp));
- break;
}
}
@@ -9730,6 +9775,28 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
struct bnx2x_prev_path_list *tmp_list;
int rc;
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Received %d when tried to take lock\n", rc);
+ return rc;
+ }
+
+ /* Check whether the entry for this path already exists */
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ if (!tmp_list->aer) {
+ BNX2X_ERR("Re-Marking the path.\n");
+ } else {
+ DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
+ BP_PATH(bp));
+ tmp_list->aer = 0;
+ }
+ up(&bnx2x_prev_sem);
+ return 0;
+ }
+ up(&bnx2x_prev_sem);
+
+ /* Create an entry for this path and add it */
tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
if (!tmp_list) {
BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
@@ -9739,6 +9806,7 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
tmp_list->bus = bp->pdev->bus->number;
tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
tmp_list->path = BP_PATH(bp);
+ tmp_list->aer = 0;
tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
rc = down_interruptible(&bnx2x_prev_sem);
@@ -9746,8 +9814,8 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
BNX2X_ERR("Received %d when tried to take lock\n", rc);
kfree(tmp_list);
} else {
- BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n",
- BP_PATH(bp));
+ DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
+ BP_PATH(bp));
list_add(&tmp_list->list, &bnx2x_prev_list);
up(&bnx2x_prev_sem);
}
@@ -9986,6 +10054,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
}
do {
+ int aer = 0;
/* Lock MCP using an unload request */
fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
if (!fw) {
@@ -9994,7 +10063,18 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
break;
}
- if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
+ rc);
+ } else {
+ /* If Path is marked by EEH, ignore unload status */
+ aer = !!(bnx2x_prev_path_get_entry(bp) &&
+ bnx2x_prev_path_get_entry(bp)->aer);
+ up(&bnx2x_prev_sem);
+ }
+
+ if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
rc = bnx2x_prev_unload_common(bp);
break;
}
@@ -10034,8 +10114,12 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
id = ((val & 0xffff) << 16);
val = REG_RD(bp, MISC_REG_CHIP_REV);
id |= ((val & 0xf) << 12);
- val = REG_RD(bp, MISC_REG_CHIP_METAL);
- id |= ((val & 0xff) << 4);
+
+ /* Metal is read from PCI regs, but we can't access >=0x400 from
+ * the configuration space (so we need to reg_rd)
+ */
+ val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
+ id |= (((val >> 24) & 0xf) << 4);
val = REG_RD(bp, MISC_REG_BOND_ID);
id |= (val & 0xf);
bp->common.chip_id = id;
@@ -10812,14 +10896,12 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
}
}
- if (IS_MF_STORAGE_SD(bp))
- /* Zero primary MAC configuration */
- memset(bp->dev->dev_addr, 0, ETH_ALEN);
-
- if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp))
- /* use FIP MAC as primary MAC */
+ /* If this is a storage-only interface, use SAN mac as
+ * primary MAC. Notice that for SD this is already the case,
+ * as the SAN mac was copied from the primary MAC.
+ */
+ if (IS_MF_FCOE_AFEX(bp))
memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
-
} else {
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
iscsi_mac_upper);
@@ -11056,6 +11138,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
} else
BNX2X_DEV_INFO("illegal OV for SD\n");
break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
+ bp->mf_config[vn] = 0;
+ break;
default:
/* Unknown configuration: reset mf_config */
bp->mf_config[vn] = 0;
@@ -11402,26 +11487,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
* net_device service functions
*/
-static int bnx2x_open_epilog(struct bnx2x *bp)
-{
- /* Enable sriov via delayed work. This must be done via delayed work
- * because it causes the probe of the vf devices to be run, which invoke
- * register_netdevice which must have rtnl lock taken. As we are holding
- * the lock right now, that could only work if the probe would not take
- * the lock. However, as the probe of the vf may be called from other
- * contexts as well (such as passthrough to vm failes) it can't assume
- * the lock is being held for it. Using delayed work here allows the
- * probe code to simply take the lock (i.e. wait for it to be released
- * if it is being held).
- */
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
-
- return 0;
-}
-
/* called with rtnl_lock */
static int bnx2x_open(struct net_device *dev)
{
@@ -11791,6 +11856,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_setup_tc = bnx2x_setup_tc,
#ifdef CONFIG_BNX2X_SRIOV
.ndo_set_vf_mac = bnx2x_set_vf_mac,
+ .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
+ .ndo_get_vf_config = bnx2x_get_vf_config,
#endif
#ifdef NETDEV_FCOE_WWNN
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
@@ -11953,7 +12020,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;
- bnx2x_set_ethtool_ops(dev);
+ bnx2x_set_ethtool_ops(bp, dev);
dev->priv_flags |= IFF_UNICAST_FLT;
@@ -11961,6 +12028,13 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
+ if (!CHIP_IS_E1x(bp)) {
+ dev->hw_features |= NETIF_F_GSO_GRE;
+ dev->hw_enc_features =
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE;
+ }
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
@@ -12447,7 +12521,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
* l2 connections.
*/
if (IS_VF(bp)) {
- bnx2x_vf_map_doorbells(bp);
+ bp->doorbells = bnx2x_vf_doorbells(bp);
rc = bnx2x_vf_pci_alloc(bp);
if (rc)
goto init_one_exit;
@@ -12475,13 +12549,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
goto init_one_exit;
}
- /* Enable SRIOV if capability found in configuration space.
- * Once the generic SR-IOV framework makes it in from the
- * pci tree this will be revised, to allow dynamic control
- * over the number of VFs. Right now, change the num of vfs
- * param below to enable SR-IOV.
- */
- rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
+ /* Enable SRIOV if capability found in configuration space */
+ rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
if (rc)
goto init_one_exit;
@@ -12493,16 +12562,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
if (CHIP_IS_E1x(bp))
bp->flags |= NO_FCOE_FLAG;
- /* disable FCOE for 57840 device, until FW supports it */
- switch (ent->driver_data) {
- case BCM57840_O:
- case BCM57840_4_10:
- case BCM57840_2_20:
- case BCM57840_MFO:
- case BCM57840_MF:
- bp->flags |= NO_FCOE_FLAG;
- }
-
/* Set bp->num_queues for MSI-X mode*/
bnx2x_set_num_queues(bp);
@@ -12636,9 +12695,7 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
{
- int i;
-
- bp->state = BNX2X_STATE_ERROR;
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
bp->rx_mode = BNX2X_RX_MODE_NONE;
@@ -12647,29 +12704,21 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
/* Stop Tx */
bnx2x_tx_disable(bp);
-
- bnx2x_netif_stop(bp, 0);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
if (CNIC_LOADED(bp))
bnx2x_del_all_napi_cnic(bp);
+ netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer);
+ cancel_delayed_work(&bp->sp_task);
+ cancel_delayed_work(&bp->period_task);
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
- /* Release IRQs */
- bnx2x_free_irq(bp);
-
- /* Free SKBs, SGEs, TPA pool and driver internals */
- bnx2x_free_skbs(bp);
-
- for_each_rx_queue(bp, i)
- bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
-
- bnx2x_free_mem(bp);
+ spin_lock_bh(&bp->stats_lock);
+ bp->stats_state = STATS_STATE_DISABLED;
+ spin_unlock_bh(&bp->stats_lock);
- bp->state = BNX2X_STATE_CLOSED;
+ bnx2x_save_statistics(bp);
netif_carrier_off(bp->dev);
@@ -12705,6 +12754,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
+ BNX2X_ERR("IO error detected\n");
+
netif_device_detach(dev);
if (state == pci_channel_io_perm_failure) {
@@ -12715,6 +12766,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
if (netif_running(dev))
bnx2x_eeh_nic_unload(bp);
+ bnx2x_prev_path_mark_eeh(bp);
+
pci_disable_device(pdev);
rtnl_unlock();
@@ -12733,9 +12786,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2x *bp = netdev_priv(dev);
+ int i;
rtnl_lock();
-
+ BNX2X_ERR("IO slot reset initializing...\n");
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset\n");
@@ -12749,6 +12803,42 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
if (netif_running(dev))
bnx2x_set_power_state(bp, PCI_D0);
+ if (netif_running(dev)) {
+ BNX2X_ERR("IO slot reset --> driver unload\n");
+ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ u32 v;
+
+ v = SHMEM2_RD(bp,
+ drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
+ bnx2x_drain_tx_queues(bp);
+ bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
+ bnx2x_netif_stop(bp, 1);
+ bnx2x_free_irq(bp);
+
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(bp, true);
+
+ bp->sp_state = 0;
+ bp->port.pmf = 0;
+
+ bnx2x_prev_unload(bp);
+
+ /* We should have resetted the engine, so It's fair to
+ * assume the FW will no longer write to the bnx2x driver.
+ */
+ bnx2x_squeeze_objects(bp);
+ bnx2x_free_skbs(bp);
+ for_each_rx_queue(bp, i)
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+ bnx2x_free_fp_mem(bp);
+ bnx2x_free_mem(bp);
+
+ bp->state = BNX2X_STATE_CLOSED;
+ }
+
rtnl_unlock();
return PCI_ERS_RESULT_RECOVERED;
@@ -12775,6 +12865,9 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
bnx2x_eeh_recover(bp);
+ bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
if (netif_running(dev))
bnx2x_nic_load(bp, LOAD_NORMAL);
@@ -12797,6 +12890,9 @@ static struct pci_driver bnx2x_pci_driver = {
.suspend = bnx2x_suspend,
.resume = bnx2x_resume,
.err_handler = &bnx2x_err_handler,
+#ifdef CONFIG_BNX2X_SRIOV
+ .sriov_configure = bnx2x_sriov_configure,
+#endif
};
static int __init bnx2x_init(void)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 791eb2d53011..d22bc40091ec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1491,10 +1491,6 @@
/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
Port. */
#define MISC_REG_BOND_ID 0xa400
-/* [R 8] These bits indicate the metal revision of the chip. This value
- starts at 0x00 for each all-layer tape-out and increments by one for each
- tape-out. */
-#define MISC_REG_CHIP_METAL 0xa404
/* [R 16] These bits indicate the part number for the chip. */
#define MISC_REG_CHIP_NUM 0xa408
/* [R 4] These bits indicate the base revision of the chip. This value
@@ -6331,6 +6327,8 @@
#define PCI_PM_DATA_B 0x414
#define PCI_ID_VAL1 0x434
#define PCI_ID_VAL2 0x438
+#define PCI_ID_VAL3 0x43c
+
#define GRC_CONFIG_REG_PF_INIT_VF 0x624
#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
/* First VF_NUM for PF is encoded in this register.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 7306416bc90d..32a9609cc98b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -30,8 +30,6 @@
#define BNX2X_MAX_EMUL_MULTI 16
-#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
-
/**** Exe Queue interfaces ****/
/**
@@ -444,30 +442,21 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
}
static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
- int n, u8 *buf)
+ int n, u8 *base, u8 stride, u8 size)
{
struct bnx2x_vlan_mac_registry_elem *pos;
- u8 *next = buf;
+ u8 *next = base;
int counter = 0;
/* traverse list */
list_for_each_entry(pos, &o->head, link) {
if (counter < n) {
- /* place leading zeroes in buffer */
- memset(next, 0, MAC_LEADING_ZERO_CNT);
-
- /* place mac after leading zeroes*/
- memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
- ETH_ALEN);
-
- /* calculate address of next element and
- * advance counter
- */
+ memcpy(next, &pos->u, size);
counter++;
- next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
+ DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
+ counter, next);
+ next += stride + size;
- DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
- counter, next, pos->u.mac.mac);
}
}
return counter * ETH_ALEN;
@@ -487,7 +476,8 @@ static int bnx2x_check_mac_add(struct bnx2x *bp,
/* Check if a requested MAC already exists */
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+ if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
+ (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return -EEXIST;
return 0;
@@ -520,7 +510,9 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)))
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
return -EEXIST;
return 0;
@@ -538,7 +530,8 @@ static struct bnx2x_vlan_mac_registry_elem *
DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+ if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
+ (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return pos;
return NULL;
@@ -573,7 +566,9 @@ static struct bnx2x_vlan_mac_registry_elem *
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)))
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
return pos;
return NULL;
@@ -770,6 +765,8 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
&rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac);
+ rule_entry->mac.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
/* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == BNX2X_VLAN_MAC_MOVE) {
@@ -786,6 +783,9 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
&rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac);
+ rule_entry->mac.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.
+ u.mac.is_inner_mac);
}
/* Set the ramrod data header */
@@ -974,7 +974,8 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid,
&rule_entry->pair.mac_lsb, mac);
-
+ rule_entry->pair.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
/* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == BNX2X_VLAN_MAC_MOVE) {
rule_entry++;
@@ -991,6 +992,9 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid,
&rule_entry->pair.mac_lsb, mac);
+ rule_entry->pair.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.
+ vlan_mac.is_inner_mac);
}
/* Set the ramrod data header */
@@ -1854,6 +1858,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
return rc;
}
list_del(&exeq_pos->link);
+ bnx2x_exe_queue_free_elem(bp, exeq_pos);
}
}
@@ -2012,6 +2017,7 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
vlan_obj->check_move = bnx2x_check_move;
vlan_obj->ramrod_cmd =
RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+ vlan_obj->get_n_elements = bnx2x_get_n_elements;
/* Exe Queue */
bnx2x_exe_queue_init(bp,
@@ -4426,6 +4432,12 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
tx_data->force_default_pri_flg =
test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
+ tx_data->tunnel_lso_inc_ip_id =
+ test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
+ tx_data->tunnel_non_lso_pcsum_location =
+ test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
+ PCSUM_ON_BD;
+
tx_data->tx_status_block_id = params->fw_sb_id;
tx_data->tx_sb_index_number = params->sb_cq_index;
tx_data->tss_leading_client_id = params->tss_leading_cl_id;
@@ -5669,17 +5681,18 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
- rdata->function_mode = (u8)start_params->mf_mode;
- rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
- rdata->path_id = BP_PATH(bp);
- rdata->network_cos_mode = start_params->network_cos_mode;
-
- /*
- * No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ rdata->function_mode = (u8)start_params->mf_mode;
+ rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
+ rdata->path_id = BP_PATH(bp);
+ rdata->network_cos_mode = start_params->network_cos_mode;
+ rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
+ rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
+
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
*/
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index ff907609b9fc..43c00bc84a08 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -100,6 +100,7 @@ struct bnx2x_raw_obj {
/************************* VLAN-MAC commands related parameters ***************/
struct bnx2x_mac_ramrod_data {
u8 mac[ETH_ALEN];
+ u8 is_inner_mac;
};
struct bnx2x_vlan_ramrod_data {
@@ -108,6 +109,7 @@ struct bnx2x_vlan_ramrod_data {
struct bnx2x_vlan_mac_ramrod_data {
u8 mac[ETH_ALEN];
+ u8 is_inner_mac;
u16 vlan;
};
@@ -313,8 +315,9 @@ struct bnx2x_vlan_mac_obj {
*
* @return number of copied bytes
*/
- int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
- int n, u8 *buf);
+ int (*get_n_elements)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, int n, u8 *base,
+ u8 stride, u8 size);
/**
* Checks if ADD-ramrod with the given params may be performed.
@@ -824,7 +827,9 @@ enum {
BNX2X_Q_FLG_TX_SEC,
BNX2X_Q_FLG_ANTI_SPOOF,
BNX2X_Q_FLG_SILENT_VLAN_REM,
- BNX2X_Q_FLG_FORCE_DEFAULT_PRI
+ BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
+ BNX2X_Q_FLG_PCSUM_ON_PKT,
+ BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
};
/* Queue type options: queue type may be a compination of below. */
@@ -842,6 +847,7 @@ enum bnx2x_q_type {
#define BNX2X_MULTI_TX_COS_E3B0 3
#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */
+#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
struct bnx2x_queue_init_params {
struct {
@@ -1118,6 +1124,15 @@ struct bnx2x_func_start_params {
/* Function cos mode */
u8 network_cos_mode;
+
+ /* NVGRE classification enablement */
+ u8 nvgre_clss_en;
+
+ /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
+ u8 gre_tunnel_mode;
+
+ /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */
+ u8 gre_tunnel_rss;
};
struct bnx2x_func_switch_update_params {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6adfa2093581..2ce7c7471367 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -20,7 +20,9 @@
#include "bnx2x.h"
#include "bnx2x_init.h"
#include "bnx2x_cmn.h"
+#include "bnx2x_sp.h"
#include <linux/crc32.h>
+#include <linux/if_vlan.h>
/* General service functions */
static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
@@ -555,8 +557,7 @@ static int bnx2x_vfop_config_list(struct bnx2x *bp,
rc = bnx2x_config_vlan_mac(bp, vlan_mac);
if (rc >= 0) {
cnt += pos->add ? 1 : -1;
- list_del(&pos->link);
- list_add(&pos->link, &rollback_list);
+ list_move(&pos->link, &rollback_list);
rc = 0;
} else if (rc == -EEXIST) {
rc = 0;
@@ -958,6 +959,12 @@ op_err:
BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
op_done:
case BNX2X_VFOP_QSETUP_DONE:
+ vf->cfg_flags |= VF_CFG_VLAN;
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
@@ -1459,7 +1466,6 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
return bnx2x_is_pcie_pending(dev);
unknown_dev:
- BNX2X_ERR("Unknown device\n");
return false;
}
@@ -1926,20 +1932,22 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
/* SRIOV can be enabled only with MSIX */
if (int_mode_param == BNX2X_INT_MODE_MSI ||
- int_mode_param == BNX2X_INT_MODE_INTX)
+ int_mode_param == BNX2X_INT_MODE_INTX) {
BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
+ return 0;
+ }
err = -EIO;
/* verify ari is enabled */
if (!bnx2x_ari_enabled(bp->pdev)) {
- BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
- return err;
+ BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
+ return 0;
}
/* verify igu is in normal mode */
if (CHIP_INT_MODE_IS_BC(bp)) {
BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
- return err;
+ return 0;
}
/* allocate the vfs database */
@@ -1964,8 +1972,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
if (iov->total == 0)
goto failed;
- /* calculate the actual number of VFs */
- iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
+ iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
+
+ DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
+ num_vfs_param, iov->nr_virtfn);
/* allocate the vf array */
bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
@@ -2378,8 +2388,8 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
goto get_vf;
case EVENT_RING_OPCODE_MALICIOUS_VF:
abs_vfid = elem->message.data.malicious_vf_event.vf_id;
- DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
- abs_vfid);
+ DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
+ abs_vfid, elem->message.data.malicious_vf_event.err_id);
goto get_vf;
default:
return 1;
@@ -2436,8 +2446,8 @@ get_vf:
/* Do nothing for now */
break;
case EVENT_RING_OPCODE_MALICIOUS_VF:
- DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
- vf->abs_vfid);
+ DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n",
+ abs_vfid, elem->message.data.malicious_vf_event.err_id);
/* Do nothing for now */
break;
}
@@ -3012,21 +3022,138 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->op_current = CHANNEL_TLV_NONE;
}
-void bnx2x_enable_sriov(struct bnx2x *bp)
+int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
{
- int rc = 0;
- /* disbale sriov in case it is still enabled */
+ struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
+
+ DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
+ num_vfs_param, BNX2X_NR_VIRTFN(bp));
+
+ /* HW channel is only operational when PF is up */
+ if (bp->state != BNX2X_STATE_OPEN) {
+ BNX2X_ERR("VF num configurtion via sysfs not supported while PF is down");
+ return -EINVAL;
+ }
+
+ /* we are always bound by the total_vfs in the configuration space */
+ if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
+ num_vfs_param, BNX2X_NR_VIRTFN(bp));
+ num_vfs_param = BNX2X_NR_VIRTFN(bp);
+ }
+
+ bp->requested_nr_virtfn = num_vfs_param;
+ if (num_vfs_param == 0) {
+ pci_disable_sriov(dev);
+ return 0;
+ } else {
+ return bnx2x_enable_sriov(bp);
+ }
+}
+
+int bnx2x_enable_sriov(struct bnx2x *bp)
+{
+ int rc = 0, req_vfs = bp->requested_nr_virtfn;
+
+ rc = pci_enable_sriov(bp->pdev, req_vfs);
+ if (rc) {
+ BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
+ return rc;
+ }
+ DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
+ return req_vfs;
+}
+
+void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
+{
+ int vfidx;
+ struct pf_vf_bulletin_content *bulletin;
+
+ DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
+ for_each_vf(bp, vfidx) {
+ bulletin = BP_VF_BULLETIN(bp, vfidx);
+ if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
+ bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
+ }
+}
+
+void bnx2x_disable_sriov(struct bnx2x *bp)
+{
pci_disable_sriov(bp->pdev);
- DP(BNX2X_MSG_IOV, "sriov disabled\n");
+}
+
+static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
+ struct bnx2x_virtf *vf)
+{
+ if (!IS_SRIOV(bp)) {
+ BNX2X_ERR("vf ndo called though sriov is disabled\n");
+ return -EINVAL;
+ }
+
+ if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
+ vfidx, BNX2X_NR_VIRTFN(bp));
+ return -EINVAL;
+ }
+
+ if (!vf) {
+ BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
- /* enable sriov */
- DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn));
- rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn));
+ return 0;
+}
+
+int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
+ struct ifla_vf_info *ivi)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
+ struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+ struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ int rc;
+
+ /* sanity */
+ rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
if (rc)
- BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
- else
- DP(BNX2X_MSG_IOV, "sriov enabled\n");
+ return rc;
+ if (!mac_obj || !vlan_obj || !bulletin) {
+ BNX2X_ERR("VF partially initialized\n");
+ return -EINVAL;
+ }
+
+ ivi->vf = vfidx;
+ ivi->qos = 0;
+ ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
+ ivi->spoofchk = 1; /*always enabled */
+ if (vf->state == VF_ENABLED) {
+ /* mac and vlan are in vlan_mac objects */
+ mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
+ 0, ETH_ALEN);
+ vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan,
+ 0, VLAN_HLEN);
+ } else {
+ /* mac */
+ if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
+ /* mac configured by ndo so its in bulletin board */
+ memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
+ else
+ /* funtion has not been loaded yet. Show mac as 0s */
+ memset(&ivi->mac, 0, ETH_ALEN);
+
+ /* vlan */
+ if (bulletin->valid_bitmap & (1 << VLAN_VALID))
+ /* vlan configured by ndo so its in bulletin board */
+ memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
+ else
+ /* funtion has not been loaded yet. Show vlans as 0s */
+ memset(&ivi->vlan, 0, VLAN_HLEN);
+ }
+
+ return 0;
}
/* New mac for VF. Consider these cases:
@@ -3044,23 +3171,19 @@ void bnx2x_enable_sriov(struct bnx2x *bp)
* VF to configure any mac for itself except for this mac. In case of a race
* where the VF fails to see the new post on its bulletin board before sending a
* mac configuration request, the PF will simply fail the request and VF can try
- * again after consulting its bulletin board
+ * again after consulting its bulletin board.
*/
-int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
+int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
{
struct bnx2x *bp = netdev_priv(dev);
- int rc, q_logical_state, vfidx = queue;
+ int rc, q_logical_state;
struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
- /* if SRIOV is disabled there is nothing to do (and somewhere, someone
- * has erred).
- */
- if (!IS_SRIOV(bp)) {
- BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
- return -EINVAL;
- }
-
+ /* sanity */
+ rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ if (rc)
+ return rc;
if (!is_valid_ether_addr(mac)) {
BNX2X_ERR("mac address invalid\n");
return -EINVAL;
@@ -3085,7 +3208,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */
- unsigned long flags = 0;
+ unsigned long ramrod_flags = 0;
struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
/* must lock vfpf channel to protect against vf flows */
@@ -3106,14 +3229,133 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
}
/* configure the new mac to device */
- __set_bit(RAMROD_COMP_WAIT, &flags);
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
- BNX2X_ETH_MAC, &flags);
+ BNX2X_ETH_MAC, &ramrod_flags);
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
}
- return rc;
+ return 0;
+}
+
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc, q_logical_state;
+ struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+ /* sanity */
+ rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ if (rc)
+ return rc;
+
+ if (vlan > 4095) {
+ BNX2X_ERR("illegal vlan value %d\n", vlan);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
+ vfidx, vlan, 0);
+
+ /* update PF's copy of the VF's bulletin. No point in posting the vlan
+ * to the VF since it doesn't have anything to do with it. But it useful
+ * to store it here in case the VF is not up yet and we can only
+ * configure the vlan later when it does.
+ */
+ bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ bulletin->vlan = vlan;
+
+ /* is vf initialized and queue set up? */
+ q_logical_state =
+ bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ if (vf->state == VF_ENABLED &&
+ q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ /* configure the vlan in device on this vf's queue */
+ unsigned long ramrod_flags = 0;
+ unsigned long vlan_mac_flags = 0;
+ struct bnx2x_vlan_mac_obj *vlan_obj =
+ &bnx2x_vfq(vf, 0, vlan_obj);
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_queue_update_params *update_params;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+
+ /* remove existing vlans */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("failed to delete vlans\n");
+ return -EINVAL;
+ }
+
+ /* send queue update ramrod to configure default vlan and silent
+ * vlan removal
+ */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj);
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &update_params->update_flags);
+
+ if (vlan == 0) {
+ /* if vlan is 0 then we want to leave the VF traffic
+ * untagged, and leave the incoming traffic untouched
+ * (i.e. do not remove any vlan tags).
+ */
+ __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ } else {
+ /* configure the new vlan to device */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ ramrod_param.vlan_mac_obj = vlan_obj;
+ ramrod_param.ramrod_flags = ramrod_flags;
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc) {
+ BNX2X_ERR("failed to configure vlan\n");
+ return -EINVAL;
+ }
+
+ /* configure default vlan to vf queue and set silent
+ * vlan removal (the vf remains unaware of this vlan).
+ */
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ update_params->def_vlan = vlan;
+ }
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure default VLAN\n");
+ return rc;
+ }
+
+ /* clear the flag indicating that this VF needs its vlan
+ * (will only be set if the HV configured th Vlan before vf was
+ * and we were called because the VF came up later
+ */
+ vf->cfg_flags &= ~VF_CFG_VLAN;
+
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+ }
+ return 0;
}
/* crc is the first field in the bulletin board. compute the crc over the
@@ -3165,20 +3407,26 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
}
+ /* the vlan in bulletin board is valid and is new */
+ if (bulletin.valid_bitmap & 1 << VLAN_VALID)
+ memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
+
/* copy new bulletin board to bp */
bp->old_bulletin = bulletin;
return PFVF_BULLETIN_UPDATED;
}
-void bnx2x_vf_map_doorbells(struct bnx2x *bp)
+void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
{
/* vf doorbells are embedded within the regview */
- bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START;
+ return bp->regview + PXP_VF_ADDR_DB_START;
}
int bnx2x_vf_pci_alloc(struct bnx2x *bp)
{
+ mutex_init(&bp->vf2pf_mutex);
+
/* allocate vf2pf mailbox for vf to pf channel */
BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
sizeof(struct bnx2x_vf_mbx_msg));
@@ -3196,3 +3444,26 @@ alloc_mem_err:
sizeof(union pf_vf_bulletin));
return -ENOMEM;
}
+
+int bnx2x_open_epilog(struct bnx2x *bp)
+{
+ /* Enable sriov via delayed work. This must be done via delayed work
+ * because it causes the probe of the vf devices to be run, which invoke
+ * register_netdevice which must have rtnl lock taken. As we are holding
+ * the lock right now, that could only work if the probe would not take
+ * the lock. However, as the probe of the vf may be called from other
+ * contexts as well (such as passthrough to vm failes) it can't assume
+ * the lock is being held for it. Using delayed work here allows the
+ * probe code to simply take the lock (i.e. wait for it to be released
+ * if it is being held). We only want to do this if the number of VFs
+ * was set before PF driver was loaded.
+ */
+ if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index b4050173add9..d4b17b7a774e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -193,6 +193,7 @@ struct bnx2x_virtf {
#define VF_CFG_TPA 0x0004
#define VF_CFG_INT_SIMD 0x0008
#define VF_CACHE_LINE 0x0010
+#define VF_CFG_VLAN 0x0020
u8 state;
#define VF_FREE 0 /* VF ready to be acquired holds no resc */
@@ -712,6 +713,7 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
u16 length);
void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
u16 type, u16 length);
+void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv);
void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list);
bool bnx2x_tlv_supported(u16 tlvtype);
@@ -750,13 +752,17 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
}
enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
-void bnx2x_vf_map_doorbells(struct bnx2x *bp);
+void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
int bnx2x_vf_pci_alloc(struct bnx2x *bp);
-void bnx2x_enable_sriov(struct bnx2x *bp);
+int bnx2x_enable_sriov(struct bnx2x *bp);
+void bnx2x_disable_sriov(struct bnx2x *bp);
static inline int bnx2x_vf_headroom(struct bnx2x *bp)
{
return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
}
+void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
+int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
+int bnx2x_open_epilog(struct bnx2x *bp);
#else /* CONFIG_BNX2X_SRIOV */
@@ -779,7 +785,8 @@ static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
int num_vfs_param) {return 0; }
static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
-static inline void bnx2x_enable_sriov(struct bnx2x *bp) {}
+static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_disable_sriov(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
u8 tx_count, u8 rx_count) {return 0; }
static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
@@ -802,8 +809,15 @@ static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp
return PFVF_BULLETIN_UNCHANGED;
}
-static inline int bnx2x_vf_map_doorbells(struct bnx2x *bp) {return 0; }
+static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
+{
+ return NULL;
+}
+
static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
+static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
+static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 4397f8b76f2e..2ca3d94fcec2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1547,11 +1547,51 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
}
}
+void bnx2x_memset_stats(struct bnx2x *bp)
+{
+ int i;
+
+ /* function stats */
+ for_each_queue(bp, i) {
+ struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
+
+ memset(&fp_stats->old_tclient, 0,
+ sizeof(fp_stats->old_tclient));
+ memset(&fp_stats->old_uclient, 0,
+ sizeof(fp_stats->old_uclient));
+ memset(&fp_stats->old_xclient, 0,
+ sizeof(fp_stats->old_xclient));
+ if (bp->stats_init) {
+ memset(&fp_stats->eth_q_stats, 0,
+ sizeof(fp_stats->eth_q_stats));
+ memset(&fp_stats->eth_q_stats_old, 0,
+ sizeof(fp_stats->eth_q_stats_old));
+ }
+ }
+
+ memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+
+ if (bp->stats_init) {
+ memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
+ memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
+ memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
+ memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
+ memset(&bp->func_stats, 0, sizeof(bp->func_stats));
+ }
+
+ bp->stats_state = STATS_STATE_DISABLED;
+
+ if (bp->port.pmf && bp->port.port_stx)
+ bnx2x_port_stats_base_init(bp);
+
+ /* mark the end of statistics initializiation */
+ bp->stats_init = false;
+}
+
void bnx2x_stats_init(struct bnx2x *bp)
{
int /*abs*/port = BP_PORT(bp);
int mb_idx = BP_FW_MB_IDX(bp);
- int i;
bp->stats_pending = 0;
bp->executer_idx = 0;
@@ -1587,36 +1627,11 @@ void bnx2x_stats_init(struct bnx2x *bp)
&(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
}
- /* function stats */
- for_each_queue(bp, i) {
- struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
-
- memset(&fp_stats->old_tclient, 0,
- sizeof(fp_stats->old_tclient));
- memset(&fp_stats->old_uclient, 0,
- sizeof(fp_stats->old_uclient));
- memset(&fp_stats->old_xclient, 0,
- sizeof(fp_stats->old_xclient));
- if (bp->stats_init) {
- memset(&fp_stats->eth_q_stats, 0,
- sizeof(fp_stats->eth_q_stats));
- memset(&fp_stats->eth_q_stats_old, 0,
- sizeof(fp_stats->eth_q_stats_old));
- }
- }
-
/* Prepare statistics ramrod data */
bnx2x_prep_fw_stats_req(bp);
- memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+ /* Clean SP from previous statistics */
if (bp->stats_init) {
- memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
- memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
- memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
- memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
- memset(&bp->func_stats, 0, sizeof(bp->func_stats));
-
- /* Clean SP from previous statistics */
if (bp->func_stx) {
memset(bnx2x_sp(bp, func_stats), 0,
sizeof(struct host_func_stats));
@@ -1626,13 +1641,7 @@ void bnx2x_stats_init(struct bnx2x *bp)
}
}
- bp->stats_state = STATS_STATE_DISABLED;
-
- if (bp->port.pmf && bp->port.port_stx)
- bnx2x_port_stats_base_init(bp);
-
- /* mark the end of statistics initializiation */
- bp->stats_init = false;
+ bnx2x_memset_stats(bp);
}
void bnx2x_save_statistics(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 198f6f1c9ad5..d117f472816c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -540,8 +540,8 @@ struct bnx2x_fw_port_stats_old {
/* forward */
struct bnx2x;
+void bnx2x_memset_stats(struct bnx2x *bp);
void bnx2x_stats_init(struct bnx2x *bp);
-
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 531eebf40d60..90fbf9cc2c2c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -36,6 +36,8 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
u16 type, u16 length)
{
+ mutex_lock(&bp->vf2pf_mutex);
+
DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
type);
@@ -49,6 +51,15 @@ void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
}
+/* releases the mailbox */
+void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
+{
+ DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
+ first_tlv->tl.type);
+
+ mutex_unlock(&bp->vf2pf_mutex);
+}
+
/* list the types and lengths of the tlvs on the buffer */
void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
{
@@ -181,8 +192,10 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* clear mailbox and prep first tlv */
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
- if (bnx2x_get_vf_id(bp, &vf_id))
- return -EAGAIN;
+ if (bnx2x_get_vf_id(bp, &vf_id)) {
+ rc = -EAGAIN;
+ goto out;
+ }
req->vfdev_info.vf_id = vf_id;
req->vfdev_info.vf_os = 0;
@@ -213,7 +226,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* PF timeout */
if (rc)
- return rc;
+ goto out;
/* copy acquire response from buffer to bp */
memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
@@ -253,7 +266,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* PF reports error */
BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
bp->acquire_resp.hdr.status);
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto out;
}
}
@@ -279,20 +293,24 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->acquire_resp.resc.current_mac_addr,
ETH_ALEN);
- return 0;
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+ return rc;
}
int bnx2x_vfpf_release(struct bnx2x *bp)
{
struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- u32 rc = 0, vf_id;
+ u32 rc, vf_id;
/* clear mailbox and prep first tlv */
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
- if (bnx2x_get_vf_id(bp, &vf_id))
- return -EAGAIN;
+ if (bnx2x_get_vf_id(bp, &vf_id)) {
+ rc = -EAGAIN;
+ goto out;
+ }
req->vf_id = vf_id;
@@ -308,7 +326,8 @@ int bnx2x_vfpf_release(struct bnx2x *bp)
if (rc)
/* PF timeout */
- return rc;
+ goto out;
+
if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
/* PF released us */
DP(BNX2X_MSG_SP, "vf released\n");
@@ -316,10 +335,13 @@ int bnx2x_vfpf_release(struct bnx2x *bp)
/* PF reports error */
BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n",
resp->hdr.status);
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto out;
}
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
- return 0;
+ return rc;
}
/* Tell PF about SB addresses */
@@ -350,16 +372,20 @@ int bnx2x_vfpf_init(struct bnx2x *bp)
rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
if (rc)
- return rc;
+ goto out;
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
resp->hdr.status);
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto out;
}
DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
- return 0;
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
}
/* CLOSE VF - opposite to INIT_VF */
@@ -401,6 +427,8 @@ void bnx2x_vfpf_close_vf(struct bnx2x *bp)
BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
resp->hdr.status);
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
free_irq:
/* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 0);
@@ -435,7 +463,6 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
/* calculate queue flags */
flags |= VFPF_QUEUE_FLG_STATS;
flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
- flags |= IS_MF_SD(bp) ? VFPF_QUEUE_FLG_OV : 0;
flags |= VFPF_QUEUE_FLG_VLAN;
DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
@@ -486,8 +513,11 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
fp_idx, resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
+
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
return rc;
}
@@ -515,17 +545,19 @@ int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
if (rc) {
BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
rc);
- return rc;
+ goto out;
}
/* PF failed the transaction */
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
- return 0;
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+ return rc;
}
/* request pf to add a mac for the vf */
@@ -533,7 +565,7 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
{
struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- int rc;
+ int rc = 0;
/* clear mailbox and prep first tlv */
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
@@ -562,7 +594,7 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
if (rc) {
BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
- return rc;
+ goto out;
}
/* failure may mean PF was configured with a new mac for us */
@@ -587,8 +619,10 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
return 0;
}
@@ -643,14 +677,16 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
if (rc) {
BNX2X_ERR("Sending a message failed: %d\n", rc);
- return rc;
+ goto out;
}
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
return 0;
}
@@ -689,7 +725,8 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
break;
default:
BNX2X_ERR("BAD rx mode (%d)\n", mode);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
@@ -708,8 +745,10 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
return rc;
}
@@ -1004,7 +1043,7 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
}
/* convert MBX queue-flags to standard SP queue-flags */
-static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
+static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
unsigned long *sp_q_flags)
{
if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
@@ -1015,8 +1054,6 @@ static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
__set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
__set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
- if (mbx_q_flags & VFPF_QUEUE_FLG_OV)
- __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
__set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
@@ -1025,6 +1062,10 @@ static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
__set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
+
+ /* outer vlan removal is set according to the PF's multi fuction mode */
+ if (IS_MF_SD(bp))
+ __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
}
static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1075,11 +1116,11 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
init_p->tx.hc_rate = setup_q->txq.hc_rate;
init_p->tx.sb_cq_index = setup_q->txq.sb_index;
- bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
&init_p->tx.flags);
/* tx setup - flags */
- bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
&setup_p->flags);
/* tx setup - general, nothing */
@@ -1107,11 +1148,11 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* rx init */
init_p->rx.hc_rate = setup_q->rxq.hc_rate;
init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
- bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
&init_p->rx.flags);
/* rx setup - flags */
- bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
&setup_p->flags);
/* rx setup - general */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index bfc80baec00d..41708faab575 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -328,9 +328,15 @@ struct pf_vf_bulletin_content {
#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address
* is available for it
*/
+#define VLAN_VALID 1 /* when set, the vf should not access
+ * the vfpf channel
+ */
u8 mac[ETH_ALEN];
- u8 padding[2];
+ u8 mac_padding[2];
+
+ u16 vlan;
+ u8 vlan_padding[6];
};
union pf_vf_bulletin {
@@ -353,6 +359,7 @@ enum channel_tlvs {
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_FLR,
CHANNEL_TLV_PF_SET_MAC,
+ CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index e9b35da375cb..e80bfb60c3ef 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -831,11 +831,8 @@ static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
SMP_CACHE_BYTES * 2 +
NET_IP_ALIGN);
- if (sb_new == NULL) {
- pr_info("%s: sk_buff allocation failed\n",
- d->sbdma_eth->sbm_dev->name);
+ if (sb_new == NULL)
return -ENOBUFS;
- }
sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 17a972734ba7..a4416b09f209 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -212,6 +212,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
#define FIRMWARE_TG3 "tigon/tg3.bin"
+#define FIRMWARE_TG357766 "tigon/tg357766.bin"
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
@@ -3448,11 +3449,58 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
#define TX_CPU_SCRATCH_SIZE 0x04000
/* tp->lock is held. */
-static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
+static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
{
int i;
+ const int iters = 10000;
- BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
+ for (i = 0; i < iters; i++) {
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
+ if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
+ break;
+ }
+
+ return (i == iters) ? -EBUSY : 0;
+}
+
+/* tp->lock is held. */
+static int tg3_rxcpu_pause(struct tg3 *tp)
+{
+ int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
+
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
+ udelay(10);
+
+ return rc;
+}
+
+/* tp->lock is held. */
+static int tg3_txcpu_pause(struct tg3 *tp)
+{
+ return tg3_pause_cpu(tp, TX_CPU_BASE);
+}
+
+/* tp->lock is held. */
+static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
+{
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_MODE, 0x00000000);
+}
+
+/* tp->lock is held. */
+static void tg3_rxcpu_resume(struct tg3 *tp)
+{
+ tg3_resume_cpu(tp, RX_CPU_BASE);
+}
+
+/* tp->lock is held. */
+static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
+{
+ int rc;
+
+ BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
if (tg3_asic_rev(tp) == ASIC_REV_5906) {
u32 val = tr32(GRC_VCPU_EXT_CTRL);
@@ -3460,17 +3508,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
return 0;
}
- if (offset == RX_CPU_BASE) {
- for (i = 0; i < 10000; i++) {
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32(offset + CPU_MODE, CPU_MODE_HALT);
- if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
- break;
- }
-
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
- udelay(10);
+ if (cpu_base == RX_CPU_BASE) {
+ rc = tg3_rxcpu_pause(tp);
} else {
/*
* There is only an Rx CPU for the 5750 derivative in the
@@ -3479,17 +3518,12 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
if (tg3_flag(tp, IS_SSB_CORE))
return 0;
- for (i = 0; i < 10000; i++) {
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32(offset + CPU_MODE, CPU_MODE_HALT);
- if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
- break;
- }
+ rc = tg3_txcpu_pause(tp);
}
- if (i >= 10000) {
+ if (rc) {
netdev_err(tp->dev, "%s timed out, %s CPU\n",
- __func__, offset == RX_CPU_BASE ? "RX" : "TX");
+ __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
return -ENODEV;
}
@@ -3499,19 +3533,41 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
return 0;
}
-struct fw_info {
- unsigned int fw_base;
- unsigned int fw_len;
- const __be32 *fw_data;
-};
+static int tg3_fw_data_len(struct tg3 *tp,
+ const struct tg3_firmware_hdr *fw_hdr)
+{
+ int fw_len;
+
+ /* Non fragmented firmware have one firmware header followed by a
+ * contiguous chunk of data to be written. The length field in that
+ * header is not the length of data to be written but the complete
+ * length of the bss. The data length is determined based on
+ * tp->fw->size minus headers.
+ *
+ * Fragmented firmware have a main header followed by multiple
+ * fragments. Each fragment is identical to non fragmented firmware
+ * with a firmware header followed by a contiguous chunk of data. In
+ * the main header, the length field is unused and set to 0xffffffff.
+ * In each fragment header the length is the entire size of that
+ * fragment i.e. fragment data + header length. Data length is
+ * therefore length field in the header minus TG3_FW_HDR_LEN.
+ */
+ if (tp->fw_len == 0xffffffff)
+ fw_len = be32_to_cpu(fw_hdr->len);
+ else
+ fw_len = tp->fw->size;
+
+ return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
+}
/* tp->lock is held. */
static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
u32 cpu_scratch_base, int cpu_scratch_size,
- struct fw_info *info)
+ const struct tg3_firmware_hdr *fw_hdr)
{
- int err, lock_err, i;
+ int err, i;
void (*write_op)(struct tg3 *, u32, u32);
+ int total_len = tp->fw->size;
if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
netdev_err(tp->dev,
@@ -3520,30 +3576,49 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
return -EINVAL;
}
- if (tg3_flag(tp, 5705_PLUS))
+ if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
write_op = tg3_write_mem;
else
write_op = tg3_write_indirect_reg32;
- /* It is possible that bootcode is still loading at this point.
- * Get the nvram lock first before halting the cpu.
- */
- lock_err = tg3_nvram_lock(tp);
- err = tg3_halt_cpu(tp, cpu_base);
- if (!lock_err)
- tg3_nvram_unlock(tp);
- if (err)
- goto out;
+ if (tg3_asic_rev(tp) != ASIC_REV_57766) {
+ /* It is possible that bootcode is still loading at this point.
+ * Get the nvram lock first before halting the cpu.
+ */
+ int lock_err = tg3_nvram_lock(tp);
+ err = tg3_halt_cpu(tp, cpu_base);
+ if (!lock_err)
+ tg3_nvram_unlock(tp);
+ if (err)
+ goto out;
- for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
- write_op(tp, cpu_scratch_base + i, 0);
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
- for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
- write_op(tp, (cpu_scratch_base +
- (info->fw_base & 0xffff) +
- (i * sizeof(u32))),
- be32_to_cpu(info->fw_data[i]));
+ for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
+ write_op(tp, cpu_scratch_base + i, 0);
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE,
+ tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
+ } else {
+ /* Subtract additional main header for fragmented firmware and
+ * advance to the first fragment
+ */
+ total_len -= TG3_FW_HDR_LEN;
+ fw_hdr++;
+ }
+
+ do {
+ u32 *fw_data = (u32 *)(fw_hdr + 1);
+ for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
+ write_op(tp, cpu_scratch_base +
+ (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
+ (i * sizeof(u32)),
+ be32_to_cpu(fw_data[i]));
+
+ total_len -= be32_to_cpu(fw_hdr->len);
+
+ /* Advance to next fragment */
+ fw_hdr = (struct tg3_firmware_hdr *)
+ ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
+ } while (total_len > 0);
err = 0;
@@ -3552,13 +3627,33 @@ out:
}
/* tp->lock is held. */
+static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
+{
+ int i;
+ const int iters = 5;
+
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_PC, pc);
+
+ for (i = 0; i < iters; i++) {
+ if (tr32(cpu_base + CPU_PC) == pc)
+ break;
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
+ tw32_f(cpu_base + CPU_PC, pc);
+ udelay(1000);
+ }
+
+ return (i == iters) ? -EBUSY : 0;
+}
+
+/* tp->lock is held. */
static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
{
- struct fw_info info;
- const __be32 *fw_data;
- int err, i;
+ const struct tg3_firmware_hdr *fw_hdr;
+ int err;
- fw_data = (void *)tp->fw->data;
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
/* Firmware blob starts with version numbers, followed by
start address and length. We are setting complete length.
@@ -3566,60 +3661,117 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
Remainder is the blob to be loaded contiguously
from start address. */
- info.fw_base = be32_to_cpu(fw_data[1]);
- info.fw_len = tp->fw->size - 12;
- info.fw_data = &fw_data[3];
-
err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
- &info);
+ fw_hdr);
if (err)
return err;
err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
- &info);
+ fw_hdr);
if (err)
return err;
/* Now startup only the RX cpu. */
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
-
- for (i = 0; i < 5; i++) {
- if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
- break;
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
- tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
- udelay(1000);
- }
- if (i >= 5) {
+ err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
+ be32_to_cpu(fw_hdr->base_addr));
+ if (err) {
netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
"should be %08x\n", __func__,
- tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
+ tr32(RX_CPU_BASE + CPU_PC),
+ be32_to_cpu(fw_hdr->base_addr));
return -ENODEV;
}
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
+
+ tg3_rxcpu_resume(tp);
+
+ return 0;
+}
+
+static int tg3_validate_rxcpu_state(struct tg3 *tp)
+{
+ const int iters = 1000;
+ int i;
+ u32 val;
+
+ /* Wait for boot code to complete initialization and enter service
+ * loop. It is then safe to download service patches
+ */
+ for (i = 0; i < iters; i++) {
+ if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
+ break;
+
+ udelay(10);
+ }
+
+ if (i == iters) {
+ netdev_err(tp->dev, "Boot code not ready for service patches\n");
+ return -EBUSY;
+ }
+
+ val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
+ if (val & 0xff) {
+ netdev_warn(tp->dev,
+ "Other patches exist. Not downloading EEE patch\n");
+ return -EEXIST;
+ }
return 0;
}
/* tp->lock is held. */
+static void tg3_load_57766_firmware(struct tg3 *tp)
+{
+ struct tg3_firmware_hdr *fw_hdr;
+
+ if (!tg3_flag(tp, NO_NVRAM))
+ return;
+
+ if (tg3_validate_rxcpu_state(tp))
+ return;
+
+ if (!tp->fw)
+ return;
+
+ /* This firmware blob has a different format than older firmware
+ * releases as given below. The main difference is we have fragmented
+ * data to be written to non-contiguous locations.
+ *
+ * In the beginning we have a firmware header identical to other
+ * firmware which consists of version, base addr and length. The length
+ * here is unused and set to 0xffffffff.
+ *
+ * This is followed by a series of firmware fragments which are
+ * individually identical to previous firmware. i.e. they have the
+ * firmware header and followed by data for that fragment. The version
+ * field of the individual fragment header is unused.
+ */
+
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
+ if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
+ return;
+
+ if (tg3_rxcpu_pause(tp))
+ return;
+
+ /* tg3_load_firmware_cpu() will always succeed for the 57766 */
+ tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
+
+ tg3_rxcpu_resume(tp);
+}
+
+/* tp->lock is held. */
static int tg3_load_tso_firmware(struct tg3 *tp)
{
- struct fw_info info;
- const __be32 *fw_data;
+ const struct tg3_firmware_hdr *fw_hdr;
unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
- int err, i;
+ int err;
- if (tg3_flag(tp, HW_TSO_1) ||
- tg3_flag(tp, HW_TSO_2) ||
- tg3_flag(tp, HW_TSO_3))
+ if (!tg3_flag(tp, FW_TSO))
return 0;
- fw_data = (void *)tp->fw->data;
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
/* Firmware blob starts with version numbers, followed by
start address and length. We are setting complete length.
@@ -3627,10 +3779,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
Remainder is the blob to be loaded contiguously
from start address. */
- info.fw_base = be32_to_cpu(fw_data[1]);
cpu_scratch_size = tp->fw_len;
- info.fw_len = tp->fw->size - 12;
- info.fw_data = &fw_data[3];
if (tg3_asic_rev(tp) == ASIC_REV_5705) {
cpu_base = RX_CPU_BASE;
@@ -3643,30 +3792,22 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
err = tg3_load_firmware_cpu(tp, cpu_base,
cpu_scratch_base, cpu_scratch_size,
- &info);
+ fw_hdr);
if (err)
return err;
/* Now startup the cpu. */
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32_f(cpu_base + CPU_PC, info.fw_base);
-
- for (i = 0; i < 5; i++) {
- if (tr32(cpu_base + CPU_PC) == info.fw_base)
- break;
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
- tw32_f(cpu_base + CPU_PC, info.fw_base);
- udelay(1000);
- }
- if (i >= 5) {
+ err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
+ be32_to_cpu(fw_hdr->base_addr));
+ if (err) {
netdev_err(tp->dev,
"%s fails to set CPU PC, is %08x should be %08x\n",
- __func__, tr32(cpu_base + CPU_PC), info.fw_base);
+ __func__, tr32(cpu_base + CPU_PC),
+ be32_to_cpu(fw_hdr->base_addr));
return -ENODEV;
}
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32_f(cpu_base + CPU_MODE, 0x00000000);
+
+ tg3_resume_cpu(tp, cpu_base);
return 0;
}
@@ -8039,11 +8180,9 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
TG3_RX_RCB_RING_BYTES(tp),
&tnapi->rx_rcb_mapping,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!tnapi->rx_rcb)
goto err_out;
-
- memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
}
return 0;
@@ -8093,12 +8232,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
sizeof(struct tg3_hw_stats),
&tp->stats_mapping,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!tp->hw_stats)
goto err_out;
- memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
-
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
struct tg3_hw_status *sblk;
@@ -8106,11 +8243,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
TG3_HW_STATUS_SIZE,
&tnapi->status_mapping,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!tnapi->hw_status)
goto err_out;
- memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
sblk = tnapi->hw_status;
if (tg3_flag(tp, ENABLE_RSS)) {
@@ -9781,6 +9917,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
return err;
}
+ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+ /* Ignore any errors for the firmware download. If download
+ * fails, the device will operate with EEE disabled
+ */
+ tg3_load_57766_firmware(tp);
+ }
+
if (tg3_flag(tp, TSO_CAPABLE)) {
err = tg3_load_tso_firmware(tp);
if (err)
@@ -10570,7 +10713,7 @@ static int tg3_test_msi(struct tg3 *tp)
static int tg3_request_firmware(struct tg3 *tp)
{
- const __be32 *fw_data;
+ const struct tg3_firmware_hdr *fw_hdr;
if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
@@ -10578,15 +10721,15 @@ static int tg3_request_firmware(struct tg3 *tp)
return -ENOENT;
}
- fw_data = (void *)tp->fw->data;
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
/* Firmware blob starts with version numbers, followed by
* start address and _full_ length including BSS sections
* (which must be longer than the actual data, of course
*/
- tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
- if (tp->fw_len < (tp->fw->size - 12)) {
+ tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
+ if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
tp->fw_len, tp->fw_needed);
release_firmware(tp->fw);
@@ -10885,7 +11028,15 @@ static int tg3_open(struct net_device *dev)
if (tp->fw_needed) {
err = tg3_request_firmware(tp);
- if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+ if (err) {
+ netdev_warn(tp->dev, "EEE capability disabled\n");
+ tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+ } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
+ netdev_warn(tp->dev, "EEE capability restored\n");
+ tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
+ }
+ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
if (err)
return err;
} else if (err) {
@@ -14515,6 +14666,7 @@ static int tg3_phy_probe(struct tg3 *tp)
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
(tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_57766 ||
tg3_asic_rev(tp) == ASIC_REV_5762 ||
(tg3_asic_rev(tp) == ASIC_REV_5717 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
@@ -15300,7 +15452,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
tg3_asic_rev(tp) != ASIC_REV_5701 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
- tg3_flag_set(tp, TSO_BUG);
+ tg3_flag_set(tp, FW_TSO);
+ tg3_flag_set(tp, TSO_BUG);
if (tg3_asic_rev(tp) == ASIC_REV_5705)
tp->fw_needed = FIRMWARE_TG3TSO5;
else
@@ -15311,7 +15464,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3) ||
- tp->fw_needed) {
+ tg3_flag(tp, FW_TSO)) {
/* For firmware TSO, assume ASF is disabled.
* We'll disable TSO later if we discover ASF
* is enabled in tg3_get_eeprom_hw_cfg().
@@ -15326,6 +15479,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
tp->fw_needed = FIRMWARE_TG3;
+ if (tg3_asic_rev(tp) == ASIC_REV_57766)
+ tp->fw_needed = FIRMWARE_TG357766;
+
tp->irq_max = 1;
if (tg3_flag(tp, 5750_PLUS)) {
@@ -15598,7 +15754,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
*/
tg3_get_eeprom_hw_cfg(tp);
- if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
+ if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
tg3_flag_clear(tp, TSO_CAPABLE);
tg3_flag_clear(tp, TSO_BUG);
tp->fw_needed = NULL;
@@ -15786,6 +15942,11 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
udelay(50);
tg3_nvram_init(tp);
+ /* If the device has an NVRAM, no need to load patch firmware */
+ if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
+ !tg3_flag(tp, NO_NVRAM))
+ tp->fw_needed = NULL;
+
grc_misc_cfg = tr32(GRC_MISC_CFG);
grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 8d7d4c2ab5d6..1cdc1b641c77 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2222,6 +2222,12 @@
#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000
#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000
+#define TG3_SRAM_RXCPU_SCRATCH_BASE_57766 0x00030000
+#define TG3_SRAM_RXCPU_SCRATCH_SIZE_57766 0x00010000
+#define TG3_57766_FW_BASE_ADDR 0x00030000
+#define TG3_57766_FW_HANDSHAKE 0x0003fccc
+#define TG3_SBROM_IN_SERVICE_LOOP 0x51
+
#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128
#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64
#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32
@@ -3009,17 +3015,18 @@ enum TG3_FLAGS {
TG3_FLAG_JUMBO_CAPABLE,
TG3_FLAG_CHIP_RESETTING,
TG3_FLAG_INIT_COMPLETE,
- TG3_FLAG_TSO_BUG,
TG3_FLAG_MAX_RXPEND_64,
- TG3_FLAG_TSO_CAPABLE,
TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
TG3_FLAG_ASF_NEW_HANDSHAKE,
TG3_FLAG_HW_AUTONEG,
TG3_FLAG_IS_NIC,
TG3_FLAG_FLASH,
+ TG3_FLAG_FW_TSO,
TG3_FLAG_HW_TSO_1,
TG3_FLAG_HW_TSO_2,
TG3_FLAG_HW_TSO_3,
+ TG3_FLAG_TSO_CAPABLE,
+ TG3_FLAG_TSO_BUG,
TG3_FLAG_ICH_WORKAROUND,
TG3_FLAG_1SHOT_MSI,
TG3_FLAG_NO_FWARE_REPORTED,
@@ -3064,6 +3071,13 @@ enum TG3_FLAGS {
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
};
+struct tg3_firmware_hdr {
+ __be32 version; /* unused for fragments */
+ __be32 base_addr;
+ __be32 len;
+};
+#define TG3_FW_HDR_LEN (sizeof(struct tg3_firmware_hdr))
+
struct tg3 {
/* begin "general, frequently-used members" cacheline section */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 3227fdde521b..f2b73ffa9122 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -76,7 +76,7 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
-static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
+static void bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
u32 boot_param);
static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 7cce42dc2f20..d588f842d557 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1264,9 +1264,8 @@ bnad_mem_alloc(struct bnad *bnad,
mem_info->mdl[i].len = mem_info->len;
mem_info->mdl[i].kva =
dma_alloc_coherent(&bnad->pcidev->dev,
- mem_info->len, &dma_pa,
- GFP_KERNEL);
-
+ mem_info->len, &dma_pa,
+ GFP_KERNEL);
if (mem_info->mdl[i].kva == NULL)
goto err_return;
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 3becdb2deb46..a5f499f53dd6 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -47,22 +47,19 @@ static int at91ether_start(struct net_device *dev)
int i;
lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
- MAX_RX_DESCR * sizeof(struct macb_dma_desc),
- &lp->rx_ring_dma, GFP_KERNEL);
- if (!lp->rx_ring) {
- netdev_err(dev, "unable to alloc rx ring DMA buffer\n");
+ (MAX_RX_DESCR *
+ sizeof(struct macb_dma_desc)),
+ &lp->rx_ring_dma, GFP_KERNEL);
+ if (!lp->rx_ring)
return -ENOMEM;
- }
lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
- MAX_RX_DESCR * MAX_RBUFF_SZ,
- &lp->rx_buffers_dma, GFP_KERNEL);
+ MAX_RX_DESCR * MAX_RBUFF_SZ,
+ &lp->rx_buffers_dma, GFP_KERNEL);
if (!lp->rx_buffers) {
- netdev_err(dev, "unable to alloc rx data DMA buffer\n");
-
dma_free_coherent(&lp->pdev->dev,
- MAX_RX_DESCR * sizeof(struct macb_dma_desc),
- lp->rx_ring, lp->rx_ring_dma);
+ MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+ lp->rx_ring, lp->rx_ring_dma);
lp->rx_ring = NULL;
return -ENOMEM;
}
@@ -209,7 +206,6 @@ static void at91ether_rx(struct net_device *dev)
netif_rx(skb);
} else {
lp->stats.rx_dropped++;
- netdev_notice(dev, "Memory squeeze, dropping packet.\n");
}
if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
@@ -303,42 +299,7 @@ static const struct of_device_id at91ether_dt_ids[] = {
{ .compatible = "cdns,emac" },
{ /* sentinel */ }
};
-
MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
-
-static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
-
- if (np)
- return of_get_phy_mode(np);
-
- return -ENODEV;
-}
-
-static int at91ether_get_hwaddr_dt(struct macb *bp)
-{
- struct device_node *np = bp->pdev->dev.of_node;
-
- if (np) {
- const char *mac = of_get_mac_address(np);
- if (mac) {
- memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
- return 0;
- }
- }
-
- return -ENODEV;
-}
-#else
-static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
-{
- return -ENODEV;
-}
-static int at91ether_get_hwaddr_dt(struct macb *bp)
-{
- return -ENODEV;
-}
#endif
/* Detect MAC & PHY and perform ethernet interface initialization */
@@ -352,6 +313,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
struct macb *lp;
int res;
u32 reg;
+ const char *mac;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
@@ -403,11 +365,13 @@ static int __init at91ether_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- res = at91ether_get_hwaddr_dt(lp);
- if (res < 0)
+ mac = of_get_mac_address(pdev->dev.of_node);
+ if (mac)
+ memcpy(lp->dev->dev_addr, mac, ETH_ALEN);
+ else
macb_get_hwaddr(lp);
- res = at91ether_get_phy_mode_dt(pdev);
+ res = of_get_phy_mode(pdev->dev.of_node);
if (res < 0) {
if (board_data && board_data->is_rmii)
lp->phy_interface = PHY_INTERFACE_MODE_RMII;
@@ -519,18 +483,7 @@ static struct platform_driver at91ether_driver = {
},
};
-static int __init at91ether_init(void)
-{
- return platform_driver_probe(&at91ether_driver, at91ether_probe);
-}
-
-static void __exit at91ether_exit(void)
-{
- platform_driver_unregister(&at91ether_driver);
-}
-
-module_init(at91ether_init)
-module_exit(at91ether_exit)
+module_platform_driver_probe(at91ether_driver, at91ether_probe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 79039439bfdc..7fd0e3e977af 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -485,6 +485,8 @@ static void macb_tx_interrupt(struct macb *bp)
status = macb_readl(bp, TSR);
macb_writel(bp, TSR, status);
+ macb_writel(bp, ISR, MACB_BIT(TCOMP));
+
netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
(unsigned long)status);
@@ -736,6 +738,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
* now.
*/
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
+ macb_writel(bp, ISR, MACB_BIT(RCOMP));
if (napi_schedule_prep(&bp->napi)) {
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
@@ -1054,6 +1057,7 @@ static void macb_configure_dma(struct macb *bp)
dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
dmacfg |= GEM_BF(FBLDO, 16);
dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
+ dmacfg &= ~GEM_BIT(ENDIA);
gem_writel(bp, DMACFG, dmacfg);
}
}
@@ -1472,41 +1476,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,gem" },
{ /* sentinel */ }
};
-
MODULE_DEVICE_TABLE(of, macb_dt_ids);
-
-static int macb_get_phy_mode_dt(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
-
- if (np)
- return of_get_phy_mode(np);
-
- return -ENODEV;
-}
-
-static int macb_get_hwaddr_dt(struct macb *bp)
-{
- struct device_node *np = bp->pdev->dev.of_node;
- if (np) {
- const char *mac = of_get_mac_address(np);
- if (mac) {
- memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
- return 0;
- }
- }
-
- return -ENODEV;
-}
-#else
-static int macb_get_phy_mode_dt(struct platform_device *pdev)
-{
- return -ENODEV;
-}
-static int macb_get_hwaddr_dt(struct macb *bp)
-{
- return -ENODEV;
-}
#endif
static int __init macb_probe(struct platform_device *pdev)
@@ -1519,6 +1489,7 @@ static int __init macb_probe(struct platform_device *pdev)
u32 config;
int err = -ENXIO;
struct pinctrl *pinctrl;
+ const char *mac;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
@@ -1557,14 +1528,14 @@ static int __init macb_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get macb_clk\n");
goto err_out_free_dev;
}
- clk_enable(bp->pclk);
+ clk_prepare_enable(bp->pclk);
bp->hclk = clk_get(&pdev->dev, "hclk");
if (IS_ERR(bp->hclk)) {
dev_err(&pdev->dev, "failed to get hclk\n");
goto err_out_put_pclk;
}
- clk_enable(bp->hclk);
+ clk_prepare_enable(bp->hclk);
bp->regs = ioremap(regs->start, resource_size(regs));
if (!bp->regs) {
@@ -1592,11 +1563,13 @@ static int __init macb_probe(struct platform_device *pdev)
config |= macb_dbw(bp);
macb_writel(bp, NCFGR, config);
- err = macb_get_hwaddr_dt(bp);
- if (err < 0)
+ mac = of_get_mac_address(pdev->dev.of_node);
+ if (mac)
+ memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
+ else
macb_get_hwaddr(bp);
- err = macb_get_phy_mode_dt(pdev);
+ err = of_get_phy_mode(pdev->dev.of_node);
if (err < 0) {
pdata = pdev->dev.platform_data;
if (pdata && pdata->is_rmii)
@@ -1654,9 +1627,9 @@ err_out_free_irq:
err_out_iounmap:
iounmap(bp->regs);
err_out_disable_clocks:
- clk_disable(bp->hclk);
+ clk_disable_unprepare(bp->hclk);
clk_put(bp->hclk);
- clk_disable(bp->pclk);
+ clk_disable_unprepare(bp->pclk);
err_out_put_pclk:
clk_put(bp->pclk);
err_out_free_dev:
@@ -1683,9 +1656,9 @@ static int __exit macb_remove(struct platform_device *pdev)
unregister_netdev(dev);
free_irq(dev->irq, dev);
iounmap(bp->regs);
- clk_disable(bp->hclk);
+ clk_disable_unprepare(bp->hclk);
clk_put(bp->hclk);
- clk_disable(bp->pclk);
+ clk_disable_unprepare(bp->pclk);
clk_put(bp->pclk);
free_netdev(dev);
platform_set_drvdata(pdev, NULL);
@@ -1703,8 +1676,8 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state)
netif_carrier_off(netdev);
netif_device_detach(netdev);
- clk_disable(bp->hclk);
- clk_disable(bp->pclk);
+ clk_disable_unprepare(bp->hclk);
+ clk_disable_unprepare(bp->pclk);
return 0;
}
@@ -1714,8 +1687,8 @@ static int macb_resume(struct platform_device *pdev)
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
- clk_enable(bp->pclk);
- clk_enable(bp->hclk);
+ clk_prepare_enable(bp->pclk);
+ clk_prepare_enable(bp->hclk);
netif_device_attach(netdev);
@@ -1737,18 +1710,7 @@ static struct platform_driver macb_driver = {
},
};
-static int __init macb_init(void)
-{
- return platform_driver_probe(&macb_driver, macb_probe);
-}
-
-static void __exit macb_exit(void)
-{
- platform_driver_unregister(&macb_driver);
-}
-
-module_init(macb_init);
-module_exit(macb_exit);
+module_platform_driver_probe(macb_driver, macb_probe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 570908b93578..993d70380688 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -173,6 +173,8 @@
/* Bitfields in DMACFG. */
#define GEM_FBLDO_OFFSET 0
#define GEM_FBLDO_SIZE 5
+#define GEM_ENDIA_OFFSET 7
+#define GEM_ENDIA_SIZE 1
#define GEM_RXBMS_OFFSET 8
#define GEM_RXBMS_SIZE 2
#define GEM_TXPBMS_OFFSET 10
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 482976925154..55fe8c9f0484 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -835,7 +835,7 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
struct sk_buff *skb;
dma_addr_t mapping;
- skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
+ skb = dev_alloc_skb(q->rx_buffer_size);
if (!skb)
break;
@@ -1046,11 +1046,10 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev,
const struct freelQ_ce *ce = &fl->centries[fl->cidx];
if (len < copybreak) {
- skb = alloc_skb(len + 2, GFP_ATOMIC);
+ skb = netdev_alloc_skb_ip_align(NULL, len);
if (!skb)
goto use_orig_buf;
- skb_reserve(skb, 2); /* align IP header */
skb_put(skb, len);
pci_dma_sync_single_for_cpu(pdev,
dma_unmap_addr(ce, dma_addr),
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6db997c78a5f..681804b30a3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -54,6 +54,10 @@
#define FW_VERSION_MINOR 1
#define FW_VERSION_MICRO 0
+#define FW_VERSION_MAJOR_T5 0
+#define FW_VERSION_MINOR_T5 0
+#define FW_VERSION_MICRO_T5 0
+
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
enum {
@@ -66,7 +70,9 @@ enum {
enum {
MEM_EDC0,
MEM_EDC1,
- MEM_MC
+ MEM_MC,
+ MEM_MC0 = MEM_MC,
+ MEM_MC1
};
enum {
@@ -74,8 +80,10 @@ enum {
MEMWIN0_BASE = 0x1b800,
MEMWIN1_APERTURE = 32768,
MEMWIN1_BASE = 0x28000,
+ MEMWIN1_BASE_T5 = 0x52000,
MEMWIN2_APERTURE = 65536,
MEMWIN2_BASE = 0x30000,
+ MEMWIN2_BASE_T5 = 0x54000,
};
enum dev_master {
@@ -431,6 +439,7 @@ struct sge_txq {
spinlock_t db_lock;
int db_disabled;
unsigned short db_pidx;
+ u64 udb;
};
struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
@@ -504,13 +513,44 @@ struct sge {
struct l2t_data;
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+#define CHELSIO_T4 0x4
+#define CHELSIO_T5 0x5
+
+enum chip_type {
+ T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
+ T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+ T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+ T4_FIRST_REV = T4_A1,
+ T4_LAST_REV = T4_A3,
+
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_FIRST_REV = T5_A1,
+ T5_LAST_REV = T5_A1,
+};
+
+#ifdef CONFIG_PCI_IOV
+
+/* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial
+ * Configuration initialization for T5 only has SR-IOV functionality enabled
+ * on PF0-3 in order to simplify everything.
+ */
+#define NUM_OF_PF_WITH_SRIOV 4
+
+#endif
+
struct adapter {
void __iomem *regs;
+ void __iomem *bar2;
struct pci_dev *pdev;
struct device *pdev_dev;
unsigned int mbox;
unsigned int fn;
unsigned int flags;
+ enum chip_type chip;
int msg_enable;
@@ -673,6 +713,16 @@ enum {
VLAN_REWRITE
};
+static inline int is_t5(enum chip_type chip)
+{
+ return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV);
+}
+
+static inline int is_t4(enum chip_type chip)
+{
+ return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV);
+}
+
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
@@ -858,7 +908,8 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
unsigned int flags);
-int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
+int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
+ u64 *parity);
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index e707e31abd81..e76cf035100b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -68,8 +68,8 @@
#include "t4fw_api.h"
#include "l2t.h"
-#define DRV_VERSION "1.3.0-ko"
-#define DRV_DESC "Chelsio T4 Network Driver"
+#define DRV_VERSION "2.0.0-ko"
+#define DRV_DESC "Chelsio T4/T5 Network Driver"
/*
* Max interrupt hold-off timer value in us. Queues fall back to this value
@@ -229,11 +229,51 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0x440a, 4),
CH_DEVICE(0x440d, 4),
CH_DEVICE(0x440e, 4),
+ CH_DEVICE(0x5001, 5),
+ CH_DEVICE(0x5002, 5),
+ CH_DEVICE(0x5003, 5),
+ CH_DEVICE(0x5004, 5),
+ CH_DEVICE(0x5005, 5),
+ CH_DEVICE(0x5006, 5),
+ CH_DEVICE(0x5007, 5),
+ CH_DEVICE(0x5008, 5),
+ CH_DEVICE(0x5009, 5),
+ CH_DEVICE(0x500A, 5),
+ CH_DEVICE(0x500B, 5),
+ CH_DEVICE(0x500C, 5),
+ CH_DEVICE(0x500D, 5),
+ CH_DEVICE(0x500E, 5),
+ CH_DEVICE(0x500F, 5),
+ CH_DEVICE(0x5010, 5),
+ CH_DEVICE(0x5011, 5),
+ CH_DEVICE(0x5012, 5),
+ CH_DEVICE(0x5013, 5),
+ CH_DEVICE(0x5401, 5),
+ CH_DEVICE(0x5402, 5),
+ CH_DEVICE(0x5403, 5),
+ CH_DEVICE(0x5404, 5),
+ CH_DEVICE(0x5405, 5),
+ CH_DEVICE(0x5406, 5),
+ CH_DEVICE(0x5407, 5),
+ CH_DEVICE(0x5408, 5),
+ CH_DEVICE(0x5409, 5),
+ CH_DEVICE(0x540A, 5),
+ CH_DEVICE(0x540B, 5),
+ CH_DEVICE(0x540C, 5),
+ CH_DEVICE(0x540D, 5),
+ CH_DEVICE(0x540E, 5),
+ CH_DEVICE(0x540F, 5),
+ CH_DEVICE(0x5410, 5),
+ CH_DEVICE(0x5411, 5),
+ CH_DEVICE(0x5412, 5),
+ CH_DEVICE(0x5413, 5),
{ 0, }
};
#define FW_FNAME "cxgb4/t4fw.bin"
+#define FW5_FNAME "cxgb4/t5fw.bin"
#define FW_CFNAME "cxgb4/t4-config.txt"
+#define FW5_CFNAME "cxgb4/t5-config.txt"
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
@@ -241,6 +281,7 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
MODULE_FIRMWARE(FW_FNAME);
+MODULE_FIRMWARE(FW5_FNAME);
/*
* Normally we're willing to become the firmware's Master PF but will be happy
@@ -319,7 +360,10 @@ static bool vf_acls;
module_param(vf_acls, bool, 0644);
MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
-static unsigned int num_vf[4];
+/* Configure the number of PCI-E Virtual Function which are to be instantiated
+ * on SR-IOV Capable Physical Functions.
+ */
+static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
module_param_array(num_vf, uint, NULL, 0644);
MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
@@ -1002,21 +1046,36 @@ freeout: t4_free_sge_resources(adap);
static int upgrade_fw(struct adapter *adap)
{
int ret;
- u32 vers;
+ u32 vers, exp_major;
const struct fw_hdr *hdr;
const struct firmware *fw;
struct device *dev = adap->pdev_dev;
+ char *fw_file_name;
- ret = request_firmware(&fw, FW_FNAME, dev);
+ switch (CHELSIO_CHIP_VERSION(adap->chip)) {
+ case CHELSIO_T4:
+ fw_file_name = FW_FNAME;
+ exp_major = FW_VERSION_MAJOR;
+ break;
+ case CHELSIO_T5:
+ fw_file_name = FW5_FNAME;
+ exp_major = FW_VERSION_MAJOR_T5;
+ break;
+ default:
+ dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
+ return -EINVAL;
+ }
+
+ ret = request_firmware(&fw, fw_file_name, dev);
if (ret < 0) {
- dev_err(dev, "unable to load firmware image " FW_FNAME
- ", error %d\n", ret);
+ dev_err(dev, "unable to load firmware image %s, error %d\n",
+ fw_file_name, ret);
return ret;
}
hdr = (const struct fw_hdr *)fw->data;
vers = ntohl(hdr->fw_ver);
- if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
+ if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
ret = -EINVAL; /* wrong major version, won't do */
goto out;
}
@@ -1024,18 +1083,15 @@ static int upgrade_fw(struct adapter *adap)
/*
* If the flash FW is unusable or we found something newer, load it.
*/
- if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
+ if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
vers > adap->params.fw_vers) {
dev_info(dev, "upgrading firmware ...\n");
ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
/*force=*/false);
if (!ret)
- dev_info(dev, "firmware successfully upgraded to "
- FW_FNAME " (%d.%d.%d.%d)\n",
- FW_HDR_FW_VER_MAJOR_GET(vers),
- FW_HDR_FW_VER_MINOR_GET(vers),
- FW_HDR_FW_VER_MICRO_GET(vers),
- FW_HDR_FW_VER_BUILD_GET(vers));
+ dev_info(dev,
+ "firmware upgraded to version %pI4 from %s\n",
+ &hdr->fw_ver, fw_file_name);
else
dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
} else {
@@ -1308,6 +1364,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
"VLANinsertions ",
"GROpackets ",
"GROmerged ",
+ "WriteCoalSuccess ",
+ "WriteCoalFail ",
};
static int get_sset_count(struct net_device *dev, int sset)
@@ -1321,10 +1379,15 @@ static int get_sset_count(struct net_device *dev, int sset)
}
#define T4_REGMAP_SIZE (160 * 1024)
+#define T5_REGMAP_SIZE (332 * 1024)
static int get_regs_len(struct net_device *dev)
{
- return T4_REGMAP_SIZE;
+ struct adapter *adap = netdev2adap(dev);
+ if (is_t4(adap->chip))
+ return T4_REGMAP_SIZE;
+ else
+ return T5_REGMAP_SIZE;
}
static int get_eeprom_len(struct net_device *dev)
@@ -1398,11 +1461,25 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
+ u32 val1, val2;
t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
data += sizeof(struct port_stats) / sizeof(u64);
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
+ data += sizeof(struct queue_port_stats) / sizeof(u64);
+ if (!is_t4(adapter->chip)) {
+ t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
+ val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
+ val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
+ *data = val1 - val2;
+ data++;
+ *data = val2;
+ data++;
+ } else {
+ memset(data, 0, 2 * sizeof(u64));
+ *data += 2;
+ }
}
/*
@@ -1413,7 +1490,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
*/
static inline unsigned int mk_adap_vers(const struct adapter *ap)
{
- return 4 | (ap->params.rev << 10) | (1 << 16);
+ return CHELSIO_CHIP_VERSION(ap->chip) |
+ (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
}
static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
@@ -1428,7 +1506,7 @@ static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *buf)
{
- static const unsigned int reg_ranges[] = {
+ static const unsigned int t4_reg_ranges[] = {
0x1008, 0x1108,
0x1180, 0x11b4,
0x11fc, 0x123c,
@@ -1648,13 +1726,452 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0x27e00, 0x27e04
};
+ static const unsigned int t5_reg_ranges[] = {
+ 0x1008, 0x1148,
+ 0x1180, 0x11b4,
+ 0x11fc, 0x123c,
+ 0x1280, 0x173c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x3028,
+ 0x3060, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35ec, 0x35ec,
+ 0x3600, 0x5624,
+ 0x56cc, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x58bc,
+ 0x5940, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a9c,
+ 0x5b9c, 0x5bfc,
+ 0x6000, 0x6040,
+ 0x6058, 0x614c,
+ 0x7700, 0x7798,
+ 0x77c0, 0x78fc,
+ 0x7b00, 0x7c54,
+ 0x7d00, 0x7efc,
+ 0x8dc0, 0x8de0,
+ 0x8df8, 0x8e84,
+ 0x8ea0, 0x8f84,
+ 0x8fc0, 0x90f8,
+ 0x9400, 0x9470,
+ 0x9600, 0x96f4,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0xa020,
+ 0xd004, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0x11088,
+ 0x1109c, 0x1117c,
+ 0x11190, 0x11204,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x19124,
+ 0x19150, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x19290,
+ 0x193f8, 0x19474,
+ 0x19490, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c60,
+ 0x19c94, 0x19e10,
+ 0x19e50, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fe4,
+ 0x1a000, 0x1a06c,
+ 0x1a0b0, 0x1a120,
+ 0x1a128, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30030,
+ 0x30100, 0x30144,
+ 0x30190, 0x301d0,
+ 0x30200, 0x30318,
+ 0x30400, 0x3052c,
+ 0x30540, 0x3061c,
+ 0x30800, 0x30834,
+ 0x308c0, 0x30908,
+ 0x30910, 0x309ac,
+ 0x30a00, 0x30a04,
+ 0x30a0c, 0x30a2c,
+ 0x30a44, 0x30a50,
+ 0x30a74, 0x30c24,
+ 0x30d08, 0x30d14,
+ 0x30d1c, 0x30d20,
+ 0x30d3c, 0x30d50,
+ 0x31200, 0x3120c,
+ 0x31220, 0x31220,
+ 0x31240, 0x31240,
+ 0x31600, 0x31600,
+ 0x31608, 0x3160c,
+ 0x31a00, 0x31a1c,
+ 0x31e04, 0x31e20,
+ 0x31e38, 0x31e3c,
+ 0x31e80, 0x31e80,
+ 0x31e88, 0x31ea8,
+ 0x31eb0, 0x31eb4,
+ 0x31ec8, 0x31ed4,
+ 0x31fb8, 0x32004,
+ 0x32208, 0x3223c,
+ 0x32600, 0x32630,
+ 0x32a00, 0x32abc,
+ 0x32b00, 0x32b70,
+ 0x33000, 0x33048,
+ 0x33060, 0x3309c,
+ 0x330f0, 0x33148,
+ 0x33160, 0x3319c,
+ 0x331f0, 0x332e4,
+ 0x332f8, 0x333e4,
+ 0x333f8, 0x33448,
+ 0x33460, 0x3349c,
+ 0x334f0, 0x33548,
+ 0x33560, 0x3359c,
+ 0x335f0, 0x336e4,
+ 0x336f8, 0x337e4,
+ 0x337f8, 0x337fc,
+ 0x33814, 0x33814,
+ 0x3382c, 0x3382c,
+ 0x33880, 0x3388c,
+ 0x338e8, 0x338ec,
+ 0x33900, 0x33948,
+ 0x33960, 0x3399c,
+ 0x339f0, 0x33ae4,
+ 0x33af8, 0x33b10,
+ 0x33b28, 0x33b28,
+ 0x33b3c, 0x33b50,
+ 0x33bf0, 0x33c10,
+ 0x33c28, 0x33c28,
+ 0x33c3c, 0x33c50,
+ 0x33cf0, 0x33cfc,
+ 0x34000, 0x34030,
+ 0x34100, 0x34144,
+ 0x34190, 0x341d0,
+ 0x34200, 0x34318,
+ 0x34400, 0x3452c,
+ 0x34540, 0x3461c,
+ 0x34800, 0x34834,
+ 0x348c0, 0x34908,
+ 0x34910, 0x349ac,
+ 0x34a00, 0x34a04,
+ 0x34a0c, 0x34a2c,
+ 0x34a44, 0x34a50,
+ 0x34a74, 0x34c24,
+ 0x34d08, 0x34d14,
+ 0x34d1c, 0x34d20,
+ 0x34d3c, 0x34d50,
+ 0x35200, 0x3520c,
+ 0x35220, 0x35220,
+ 0x35240, 0x35240,
+ 0x35600, 0x35600,
+ 0x35608, 0x3560c,
+ 0x35a00, 0x35a1c,
+ 0x35e04, 0x35e20,
+ 0x35e38, 0x35e3c,
+ 0x35e80, 0x35e80,
+ 0x35e88, 0x35ea8,
+ 0x35eb0, 0x35eb4,
+ 0x35ec8, 0x35ed4,
+ 0x35fb8, 0x36004,
+ 0x36208, 0x3623c,
+ 0x36600, 0x36630,
+ 0x36a00, 0x36abc,
+ 0x36b00, 0x36b70,
+ 0x37000, 0x37048,
+ 0x37060, 0x3709c,
+ 0x370f0, 0x37148,
+ 0x37160, 0x3719c,
+ 0x371f0, 0x372e4,
+ 0x372f8, 0x373e4,
+ 0x373f8, 0x37448,
+ 0x37460, 0x3749c,
+ 0x374f0, 0x37548,
+ 0x37560, 0x3759c,
+ 0x375f0, 0x376e4,
+ 0x376f8, 0x377e4,
+ 0x377f8, 0x377fc,
+ 0x37814, 0x37814,
+ 0x3782c, 0x3782c,
+ 0x37880, 0x3788c,
+ 0x378e8, 0x378ec,
+ 0x37900, 0x37948,
+ 0x37960, 0x3799c,
+ 0x379f0, 0x37ae4,
+ 0x37af8, 0x37b10,
+ 0x37b28, 0x37b28,
+ 0x37b3c, 0x37b50,
+ 0x37bf0, 0x37c10,
+ 0x37c28, 0x37c28,
+ 0x37c3c, 0x37c50,
+ 0x37cf0, 0x37cfc,
+ 0x38000, 0x38030,
+ 0x38100, 0x38144,
+ 0x38190, 0x381d0,
+ 0x38200, 0x38318,
+ 0x38400, 0x3852c,
+ 0x38540, 0x3861c,
+ 0x38800, 0x38834,
+ 0x388c0, 0x38908,
+ 0x38910, 0x389ac,
+ 0x38a00, 0x38a04,
+ 0x38a0c, 0x38a2c,
+ 0x38a44, 0x38a50,
+ 0x38a74, 0x38c24,
+ 0x38d08, 0x38d14,
+ 0x38d1c, 0x38d20,
+ 0x38d3c, 0x38d50,
+ 0x39200, 0x3920c,
+ 0x39220, 0x39220,
+ 0x39240, 0x39240,
+ 0x39600, 0x39600,
+ 0x39608, 0x3960c,
+ 0x39a00, 0x39a1c,
+ 0x39e04, 0x39e20,
+ 0x39e38, 0x39e3c,
+ 0x39e80, 0x39e80,
+ 0x39e88, 0x39ea8,
+ 0x39eb0, 0x39eb4,
+ 0x39ec8, 0x39ed4,
+ 0x39fb8, 0x3a004,
+ 0x3a208, 0x3a23c,
+ 0x3a600, 0x3a630,
+ 0x3aa00, 0x3aabc,
+ 0x3ab00, 0x3ab70,
+ 0x3b000, 0x3b048,
+ 0x3b060, 0x3b09c,
+ 0x3b0f0, 0x3b148,
+ 0x3b160, 0x3b19c,
+ 0x3b1f0, 0x3b2e4,
+ 0x3b2f8, 0x3b3e4,
+ 0x3b3f8, 0x3b448,
+ 0x3b460, 0x3b49c,
+ 0x3b4f0, 0x3b548,
+ 0x3b560, 0x3b59c,
+ 0x3b5f0, 0x3b6e4,
+ 0x3b6f8, 0x3b7e4,
+ 0x3b7f8, 0x3b7fc,
+ 0x3b814, 0x3b814,
+ 0x3b82c, 0x3b82c,
+ 0x3b880, 0x3b88c,
+ 0x3b8e8, 0x3b8ec,
+ 0x3b900, 0x3b948,
+ 0x3b960, 0x3b99c,
+ 0x3b9f0, 0x3bae4,
+ 0x3baf8, 0x3bb10,
+ 0x3bb28, 0x3bb28,
+ 0x3bb3c, 0x3bb50,
+ 0x3bbf0, 0x3bc10,
+ 0x3bc28, 0x3bc28,
+ 0x3bc3c, 0x3bc50,
+ 0x3bcf0, 0x3bcfc,
+ 0x3c000, 0x3c030,
+ 0x3c100, 0x3c144,
+ 0x3c190, 0x3c1d0,
+ 0x3c200, 0x3c318,
+ 0x3c400, 0x3c52c,
+ 0x3c540, 0x3c61c,
+ 0x3c800, 0x3c834,
+ 0x3c8c0, 0x3c908,
+ 0x3c910, 0x3c9ac,
+ 0x3ca00, 0x3ca04,
+ 0x3ca0c, 0x3ca2c,
+ 0x3ca44, 0x3ca50,
+ 0x3ca74, 0x3cc24,
+ 0x3cd08, 0x3cd14,
+ 0x3cd1c, 0x3cd20,
+ 0x3cd3c, 0x3cd50,
+ 0x3d200, 0x3d20c,
+ 0x3d220, 0x3d220,
+ 0x3d240, 0x3d240,
+ 0x3d600, 0x3d600,
+ 0x3d608, 0x3d60c,
+ 0x3da00, 0x3da1c,
+ 0x3de04, 0x3de20,
+ 0x3de38, 0x3de3c,
+ 0x3de80, 0x3de80,
+ 0x3de88, 0x3dea8,
+ 0x3deb0, 0x3deb4,
+ 0x3dec8, 0x3ded4,
+ 0x3dfb8, 0x3e004,
+ 0x3e208, 0x3e23c,
+ 0x3e600, 0x3e630,
+ 0x3ea00, 0x3eabc,
+ 0x3eb00, 0x3eb70,
+ 0x3f000, 0x3f048,
+ 0x3f060, 0x3f09c,
+ 0x3f0f0, 0x3f148,
+ 0x3f160, 0x3f19c,
+ 0x3f1f0, 0x3f2e4,
+ 0x3f2f8, 0x3f3e4,
+ 0x3f3f8, 0x3f448,
+ 0x3f460, 0x3f49c,
+ 0x3f4f0, 0x3f548,
+ 0x3f560, 0x3f59c,
+ 0x3f5f0, 0x3f6e4,
+ 0x3f6f8, 0x3f7e4,
+ 0x3f7f8, 0x3f7fc,
+ 0x3f814, 0x3f814,
+ 0x3f82c, 0x3f82c,
+ 0x3f880, 0x3f88c,
+ 0x3f8e8, 0x3f8ec,
+ 0x3f900, 0x3f948,
+ 0x3f960, 0x3f99c,
+ 0x3f9f0, 0x3fae4,
+ 0x3faf8, 0x3fb10,
+ 0x3fb28, 0x3fb28,
+ 0x3fb3c, 0x3fb50,
+ 0x3fbf0, 0x3fc10,
+ 0x3fc28, 0x3fc28,
+ 0x3fc3c, 0x3fc50,
+ 0x3fcf0, 0x3fcfc,
+ 0x40000, 0x4000c,
+ 0x40040, 0x40068,
+ 0x40080, 0x40144,
+ 0x40180, 0x4018c,
+ 0x40200, 0x40298,
+ 0x402ac, 0x4033c,
+ 0x403f8, 0x403fc,
+ 0x41300, 0x413c4,
+ 0x41400, 0x4141c,
+ 0x41480, 0x414d0,
+ 0x44000, 0x44078,
+ 0x440c0, 0x44278,
+ 0x442c0, 0x44478,
+ 0x444c0, 0x44678,
+ 0x446c0, 0x44878,
+ 0x448c0, 0x449fc,
+ 0x45000, 0x45068,
+ 0x45080, 0x45084,
+ 0x450a0, 0x450b0,
+ 0x45200, 0x45268,
+ 0x45280, 0x45284,
+ 0x452a0, 0x452b0,
+ 0x460c0, 0x460e4,
+ 0x47000, 0x4708c,
+ 0x47200, 0x47250,
+ 0x47400, 0x47420,
+ 0x47600, 0x47618,
+ 0x47800, 0x47814,
+ 0x48000, 0x4800c,
+ 0x48040, 0x48068,
+ 0x48080, 0x48144,
+ 0x48180, 0x4818c,
+ 0x48200, 0x48298,
+ 0x482ac, 0x4833c,
+ 0x483f8, 0x483fc,
+ 0x49300, 0x493c4,
+ 0x49400, 0x4941c,
+ 0x49480, 0x494d0,
+ 0x4c000, 0x4c078,
+ 0x4c0c0, 0x4c278,
+ 0x4c2c0, 0x4c478,
+ 0x4c4c0, 0x4c678,
+ 0x4c6c0, 0x4c878,
+ 0x4c8c0, 0x4c9fc,
+ 0x4d000, 0x4d068,
+ 0x4d080, 0x4d084,
+ 0x4d0a0, 0x4d0b0,
+ 0x4d200, 0x4d268,
+ 0x4d280, 0x4d284,
+ 0x4d2a0, 0x4d2b0,
+ 0x4e0c0, 0x4e0e4,
+ 0x4f000, 0x4f08c,
+ 0x4f200, 0x4f250,
+ 0x4f400, 0x4f420,
+ 0x4f600, 0x4f618,
+ 0x4f800, 0x4f814,
+ 0x50000, 0x500cc,
+ 0x50400, 0x50400,
+ 0x50800, 0x508cc,
+ 0x50c00, 0x50c00,
+ 0x51000, 0x5101c,
+ 0x51300, 0x51308,
+ };
+
int i;
struct adapter *ap = netdev2adap(dev);
+ static const unsigned int *reg_ranges;
+ int arr_size = 0, buf_size = 0;
+
+ if (is_t4(ap->chip)) {
+ reg_ranges = &t4_reg_ranges[0];
+ arr_size = ARRAY_SIZE(t4_reg_ranges);
+ buf_size = T4_REGMAP_SIZE;
+ } else {
+ reg_ranges = &t5_reg_ranges[0];
+ arr_size = ARRAY_SIZE(t5_reg_ranges);
+ buf_size = T5_REGMAP_SIZE;
+ }
regs->version = mk_adap_vers(ap);
- memset(buf, 0, T4_REGMAP_SIZE);
- for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
+ memset(buf, 0, buf_size);
+ for (i = 0; i < arr_size; i += 2)
reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
}
@@ -2363,8 +2880,8 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
int ret, ofst;
__be32 data[16];
- if (mem == MEM_MC)
- ret = t4_mc_read(adap, pos, data, NULL);
+ if ((mem == MEM_MC) || (mem == MEM_MC1))
+ ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
else
ret = t4_edc_read(adap, mem, pos, data, NULL);
if (ret)
@@ -2405,18 +2922,37 @@ static void add_debugfs_mem(struct adapter *adap, const char *name,
static int setup_debugfs(struct adapter *adap)
{
int i;
+ u32 size;
if (IS_ERR_OR_NULL(adap->debugfs_root))
return -1;
i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
- if (i & EDRAM0_ENABLE)
- add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
- if (i & EDRAM1_ENABLE)
- add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
- if (i & EXT_MEM_ENABLE)
- add_debugfs_mem(adap, "mc", MEM_MC,
- EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
+ if (i & EDRAM0_ENABLE) {
+ size = t4_read_reg(adap, MA_EDRAM0_BAR);
+ add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
+ }
+ if (i & EDRAM1_ENABLE) {
+ size = t4_read_reg(adap, MA_EDRAM1_BAR);
+ add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
+ }
+ if (is_t4(adap->chip)) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
+ if (i & EXT_MEM_ENABLE)
+ add_debugfs_mem(adap, "mc", MEM_MC,
+ EXT_MEM_SIZE_GET(size));
+ } else {
+ if (i & EXT_MEM_ENABLE) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
+ add_debugfs_mem(adap, "mc0", MEM_MC0,
+ EXT_MEM_SIZE_GET(size));
+ }
+ if (i & EXT_MEM1_ENABLE) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
+ add_debugfs_mem(adap, "mc1", MEM_MC1,
+ EXT_MEM_SIZE_GET(size));
+ }
+ }
if (adap->l2t)
debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
&t4_l2t_fops);
@@ -2747,10 +3283,18 @@ EXPORT_SYMBOL(cxgb4_port_chan);
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
{
struct adapter *adap = netdev2adap(dev);
- u32 v;
+ u32 v1, v2, lp_count, hp_count;
- v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
- return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
+ v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
+ v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
+ if (is_t4(adap->chip)) {
+ lp_count = G_LP_COUNT(v1);
+ hp_count = G_HP_COUNT(v1);
+ } else {
+ lp_count = G_LP_COUNT_T5(v1);
+ hp_count = G_HP_COUNT_T5(v2);
+ }
+ return lpfifo ? lp_count : hp_count;
}
EXPORT_SYMBOL(cxgb4_dbfifo_count);
@@ -2853,6 +3397,25 @@ out:
}
EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
+void cxgb4_disable_db_coalescing(struct net_device *dev)
+{
+ struct adapter *adap;
+
+ adap = netdev2adap(dev);
+ t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
+ F_NOCOALESCE);
+}
+EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
+
+void cxgb4_enable_db_coalescing(struct net_device *dev)
+{
+ struct adapter *adap;
+
+ adap = netdev2adap(dev);
+ t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
+}
+EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
+
static struct pci_driver cxgb4_driver;
static void check_neigh_update(struct neighbour *neigh)
@@ -2888,14 +3451,23 @@ static struct notifier_block cxgb4_netevent_nb = {
static void drain_db_fifo(struct adapter *adap, int usecs)
{
- u32 v;
+ u32 v1, v2, lp_count, hp_count;
do {
+ v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
+ v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
+ if (is_t4(adap->chip)) {
+ lp_count = G_LP_COUNT(v1);
+ hp_count = G_HP_COUNT(v1);
+ } else {
+ lp_count = G_LP_COUNT_T5(v1);
+ hp_count = G_HP_COUNT_T5(v2);
+ }
+
+ if (lp_count == 0 && hp_count == 0)
+ break;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(usecs));
- v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
- if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
- break;
} while (1);
}
@@ -3004,24 +3576,62 @@ static void process_db_drop(struct work_struct *work)
adap = container_of(work, struct adapter, db_drop_task);
+ if (is_t4(adap->chip)) {
+ disable_dbs(adap);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
+ drain_db_fifo(adap, 1);
+ recover_all_queues(adap);
+ enable_dbs(adap);
+ } else {
+ u32 dropped_db = t4_read_reg(adap, 0x010ac);
+ u16 qid = (dropped_db >> 15) & 0x1ffff;
+ u16 pidx_inc = dropped_db & 0x1fff;
+ unsigned int s_qpp;
+ unsigned short udb_density;
+ unsigned long qpshift;
+ int page;
+ u32 udb;
+
+ dev_warn(adap->pdev_dev,
+ "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
+ dropped_db, qid,
+ (dropped_db >> 14) & 1,
+ (dropped_db >> 13) & 1,
+ pidx_inc);
+
+ drain_db_fifo(adap, 1);
+
+ s_qpp = QUEUESPERPAGEPF1 * adap->fn;
+ udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
+ SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
+ qpshift = PAGE_SHIFT - ilog2(udb_density);
+ udb = qid << qpshift;
+ udb &= PAGE_MASK;
+ page = udb / PAGE_SIZE;
+ udb += (qid - (page * udb_density)) * 128;
+
+ writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
+
+ /* Re-enable BAR2 WC */
+ t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
+ }
+
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
- disable_dbs(adap);
- notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
- drain_db_fifo(adap, 1);
- recover_all_queues(adap);
- enable_dbs(adap);
}
void t4_db_full(struct adapter *adap)
{
- t4_set_reg_field(adap, SGE_INT_ENABLE3,
- DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
- queue_work(workq, &adap->db_full_task);
+ if (is_t4(adap->chip)) {
+ t4_set_reg_field(adap, SGE_INT_ENABLE3,
+ DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
+ queue_work(workq, &adap->db_full_task);
+ }
}
void t4_db_dropped(struct adapter *adap)
{
- queue_work(workq, &adap->db_drop_task);
+ if (is_t4(adap->chip))
+ queue_work(workq, &adap->db_drop_task);
}
static void uld_attach(struct adapter *adap, unsigned int uld)
@@ -3566,17 +4176,27 @@ void t4_fatal_err(struct adapter *adap)
static void setup_memwin(struct adapter *adap)
{
- u32 bar0;
+ u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
+ if (is_t4(adap->chip)) {
+ mem_win0_base = bar0 + MEMWIN0_BASE;
+ mem_win1_base = bar0 + MEMWIN1_BASE;
+ mem_win2_base = bar0 + MEMWIN2_BASE;
+ } else {
+ /* For T5, only relative offset inside the PCIe BAR is passed */
+ mem_win0_base = MEMWIN0_BASE;
+ mem_win1_base = MEMWIN1_BASE_T5;
+ mem_win2_base = MEMWIN2_BASE_T5;
+ }
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
- (bar0 + MEMWIN0_BASE) | BIR(0) |
+ mem_win0_base | BIR(0) |
WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
- (bar0 + MEMWIN1_BASE) | BIR(0) |
+ mem_win1_base | BIR(0) |
WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
- (bar0 + MEMWIN2_BASE) | BIR(0) |
+ mem_win2_base | BIR(0) |
WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
}
@@ -3745,6 +4365,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
unsigned long mtype = 0, maddr = 0;
u32 finiver, finicsum, cfcsum;
int ret, using_flash;
+ char *fw_config_file, fw_config_file_path[256];
/*
* Reset device if necessary.
@@ -3761,7 +4382,21 @@ static int adap_init0_config(struct adapter *adapter, int reset)
* then use that. Otherwise, use the configuration file stored
* in the adapter flash ...
*/
- ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev);
+ switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
+ case CHELSIO_T4:
+ fw_config_file = FW_CFNAME;
+ break;
+ case CHELSIO_T5:
+ fw_config_file = FW5_CFNAME;
+ break;
+ default:
+ dev_err(adapter->pdev_dev, "Device %d is not supported\n",
+ adapter->pdev->device);
+ ret = -EINVAL;
+ goto bye;
+ }
+
+ ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
if (ret < 0) {
using_flash = 1;
mtype = FW_MEMTYPE_CF_FLASH;
@@ -3877,6 +4512,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
if (ret < 0)
goto bye;
+ sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
/*
* Return successfully and note that we're operating with parameters
* not supplied by the driver, rather than from hard-wired
@@ -3887,7 +4523,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
"Configuration File %s, version %#x, computed checksum %#x\n",
(using_flash
? "in device FLASH"
- : "/lib/firmware/" FW_CFNAME),
+ : fw_config_file_path),
finiver, cfcsum);
return 0;
@@ -4814,7 +5450,8 @@ static void print_port_info(const struct net_device *dev)
sprintf(bufp, "BASE-%s", base[pi->port_type]);
netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
- adap->params.vpd.id, adap->params.rev, buf,
+ adap->params.vpd.id,
+ CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
@@ -4854,10 +5491,11 @@ static void free_some_resources(struct adapter *adapter)
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+#define SEGMENT_SIZE 128
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int func, i, err;
+ int func, i, err, s_qpp, qpp, num_seg;
struct port_info *pi;
bool highdma = false;
struct adapter *adapter = NULL;
@@ -4934,7 +5572,34 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = t4_prep_adapter(adapter);
if (err)
- goto out_unmap_bar;
+ goto out_unmap_bar0;
+
+ if (!is_t4(adapter->chip)) {
+ s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
+ qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
+ SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
+ num_seg = PAGE_SIZE / SEGMENT_SIZE;
+
+ /* Each segment size is 128B. Write coalescing is enabled only
+ * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
+ * queue is less no of segments that can be accommodated in
+ * a page size.
+ */
+ if (qpp > num_seg) {
+ dev_err(&pdev->dev,
+ "Incorrect number of egress queues per page\n");
+ err = -EINVAL;
+ goto out_unmap_bar0;
+ }
+ adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ if (!adapter->bar2) {
+ dev_err(&pdev->dev, "cannot map device bar2 region\n");
+ err = -ENOMEM;
+ goto out_unmap_bar0;
+ }
+ }
+
setup_memwin(adapter);
err = adap_init0(adapter);
setup_memwin_rdma(adapter);
@@ -5063,6 +5728,9 @@ sriov:
out_free_dev:
free_some_resources(adapter);
out_unmap_bar:
+ if (!is_t4(adapter->chip))
+ iounmap(adapter->bar2);
+ out_unmap_bar0:
iounmap(adapter->regs);
out_free_adapter:
kfree(adapter);
@@ -5113,6 +5781,8 @@ static void remove_one(struct pci_dev *pdev)
free_some_resources(adapter);
iounmap(adapter->regs);
+ if (!is_t4(adapter->chip))
+ iounmap(adapter->bar2);
kfree(adapter);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index e2bbc7f3e2de..4faf4d067ee7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -269,4 +269,7 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
unsigned int skb_len, unsigned int pull_len);
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
int cxgb4_flush_eq_cache(struct net_device *dev);
+void cxgb4_disable_db_coalescing(struct net_device *dev);
+void cxgb4_enable_db_coalescing(struct net_device *dev);
+
#endif /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index fe9a2ea3588b..8b47b253e204 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -506,10 +506,14 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
+ u32 val;
if (q->pend_cred >= 8) {
+ val = PIDX(q->pend_cred / 8);
+ if (!is_t4(adap->chip))
+ val |= DBTYPE(1);
wmb();
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
- QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
+ QID(q->cntxt_id) | val);
q->pend_cred &= 7;
}
}
@@ -812,6 +816,22 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
*end = 0;
}
+/* This function copies 64 byte coalesced work request to
+ * memory mapped BAR2 space(user space writes).
+ * For coalesced WR SGE, fetches data from the FIFO instead of from Host.
+ */
+static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
+{
+ int count = 8;
+
+ while (count) {
+ writeq(*src, dst);
+ src++;
+ dst++;
+ count--;
+ }
+}
+
/**
* ring_tx_db - check and potentially ring a Tx queue's doorbell
* @adap: the adapter
@@ -822,11 +842,25 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
*/
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
+ unsigned int *wr, index;
+
wmb(); /* write descriptors before telling HW */
spin_lock(&q->db_lock);
if (!q->db_disabled) {
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
- QID(q->cntxt_id) | PIDX(n));
+ if (is_t4(adap->chip)) {
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+ QID(q->cntxt_id) | PIDX(n));
+ } else {
+ if (n == 1) {
+ index = q->pidx ? (q->pidx - 1) : (q->size - 1);
+ wr = (unsigned int *)&q->desc[index];
+ cxgb_pio_copy((u64 __iomem *)
+ (adap->bar2 + q->udb + 64),
+ (u64 *)wr);
+ } else
+ writel(n, adap->bar2 + q->udb + 8);
+ wmb();
+ }
}
q->db_pidx = q->pidx;
spin_unlock(&q->db_lock);
@@ -1555,7 +1589,6 @@ static noinline int handle_trace_pkt(struct adapter *adap,
const struct pkt_gl *gl)
{
struct sk_buff *skb;
- struct cpl_trace_pkt *p;
skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);