using these calls except with such hotplug-deficient drivers.
struct platform_device *platform_device_alloc(
- char *name, unsigned id);
+ const char *name, int id);
You can use platform_device_alloc() to dynamically allocate a device, which
you will then initialize with resources and platform_device_register().
A better solution is usually:
struct platform_device *platform_device_register_simple(
- char *name, unsigned id,
- struct resource *res, unsigned nres);
+ const char *name, int id,
+ struct resource *res, unsigned int nres);
You can use platform_device_register_simple() as a one-step call to allocate
and register a device.
--- /dev/null
+NOTE:
+This is Japanese translated version of "Documentation/stable_kernel_rules.txt".
+This one is maintained by Tsugikazu Shibata <tshibata@ab.jp.nec.com>
+and JF Project team <www.linux.or.jp/JF>.
+If you find difference with original file or problem in translation,
+please contact maintainer of this file or JF project.
+
+Please also note that purpose of this file is easier to read for non
+English natives and do no intended to fork. So, if you have any
+comment or update of this file, please try to update Original(English)
+file at first.
+
+==================================
+ã\81\93ã\82\8cã\81¯ã\80\81
+linux-2.6.24/Documentation/stable_kernel_rules.txt
+ã\81®å\92\8c訳ã\81§ã\81\99ã\80\82
+
+翻訳å\9b£ä½\93ï¼\9a JF ã\83\97ã\83Âã\82¸ã\82§ã\82¯ã\83\88 < http://www.linux.or.jp/JF/ >
+翻訳æ\97¥ï¼\9a 2007/12/30
+翻訳è\80\85ï¼\9a Tsugikazu Shibata <tshibata at ab dot jp dot nec dot com>
+æ ¡æ£è\80\85ï¼\9a æ¦äº\95伸å\85\89ã\81\95ã\82\93ã\80\81<takei at webmasters dot gr dot jp>
+ ã\81\8bã\81Âã\81\93ã\81\95ã\82\93 (Seiji Kaneko) <skaneko at a2 dot mbn dot or dot jp>
+ å°\8fæ\9e\97 é\9b\85å\85¸ã\81\95ã\82\93 (Masanori Kobayasi) <zap03216 at nifty dot ne dot jp>
+ é\87\8eå\8f£ã\81\95ã\82\93 (Kenji Noguchi) <tokyo246 at gmail dot com>
+ ç¥\9e宮信太é\83\8eã\81\95ã\82\93 <jin at libjingu dot jp>
+==================================
+
+ã\81\9aã\81£ã\81¨ç\9f¥ã\82\8aã\81\9fã\81\8bã\81£ã\81\9f Linux 2.6 -stable ã\83ªã\83ªã\83¼ã\82¹ã\81®å\85¨ã\81¦
+
+"-stable" ã\83\84ã\83ªã\83¼ã\81«ã\81©ã\81®ã\82\88ã\81\86ã\81ªç¨®é¡\9eã\81®ã\83\91ã\83\83ã\83\81ã\81\8cå\8f\97ã\81\91å\85¥ã\82\8cã\82\89ã\82\8cã\82\8bã\81\8bã\80\81ã\81©ã\81®ã\82\88ã\81\86ã\81ª
+ã\82\82ã\81®ã\81\8cå\8f\97ã\81\91å\85¥ã\82\8cã\82\89ã\82\8cã\81ªã\81\84ã\81\8bã\80\81ã\81«ã\81¤ã\81\84ã\81¦ã\81®è¦\8få\89\87-
+
+ - æ\98\8eã\82\89ã\81\8bã\81«æ£ã\81\97ã\81\8fã\80\81ã\83\86ã\82¹ã\83\88ã\81\95ã\82\8cã\81¦ã\81\84ã\82\8bã\82\82ã\81®ã\81§ã\81ªã\81\91ã\82\8cã\81°ã\81ªã\82\89ã\81ªã\81\84ã\80\82
+ - æ\96\87è\84\88(å¤\89æ\9b´è¡\8cã\81®å\89\8då¾\8c)ã\82\92å\90«ã\82\81ã\81¦ 100 è¡\8cã\82\88ã\82\8a大ã\81\8dã\81\8fã\81¦ã\81¯ã\81\84ã\81\91ã\81ªã\81\84ã\80\82
+ - ã\81\9fã\81 ä¸\80Ã¥\80\8bã\81®ã\81\93ã\81¨ã\81 ã\81\91ã\82\92ä¿®æ£ã\81\97ã\81¦ã\81\84ã\82\8bã\81¹ã\81\8dã\80\82
+ - ç\9a\86ã\82\92æ\82©ã\81¾ã\81\9bã\81¦ã\81\84ã\82\8bæ\9c\89©ã\81®ã\83\90ã\82°ã\82\92ä¿®æ£ã\81\97ã\81ªã\81\91ã\82\8cã\81°ã\81ªã\82\89ã\81ªã\81\84ã\80\82("ã\81\93ã\82\8cã\81¯ã\83\90ã\82°ã\81§
+ ã\81\82ã\82\8bã\81\8bã\82\82ã\81\97ã\82\8cã\81ªã\81\84ã\81\8c..." ã\81®ã\82\88ã\81\86ã\81ªã\82\82ã\81®ã\81§ã\81¯ã\81ªã\81\84)
+ - ã\83\93ã\83«ã\83\89ã\82¨ã\83©ã\83¼(CONFIG_BROKENã\81«ã\81ªã\81£ã\81¦ã\81\84ã\82\8bã\82\82ã\81®ã\82\92é\99¤ã\81\8f), oops, ã\83\8fã\83³ã\82°ã\80\81ã\83\87ã\83¼
+ ã\82¿ç ´å£\8aã\80\81ç\8f¾å®\9fã\81®ã\82ȋ\82Âã\83¥ã\83ªã\83\86ã\82£å\95\8fé¡\8cã\80\81ã\81\9dã\81®ä»\96 "ã\81\82ã\81\82ã\80\81ã\81\93ã\82\8cã\81¯ã\83\80ã\83¡ã\81 ã\81Â"ã\81¨ã\81\84ã\81\86
+ ã\82\88ã\81\86ã\81ªã\82\82ã\81®ã\82\92ä¿®æ£ã\81\97ã\81ªã\81\91ã\82\8cã\81°ã\81ªã\82\89ã\81ªã\81\84ã\80\82ç\9fÂã\81\8fè¨\80ã\81\88ã\81°ã\80\81é\87\8d大ã\81ªå\95\8fé¡\8cã\80\82
+ - ã\81©ã\81®ã\82\88ã\81\86ã\81«ç«¶å\90\88ç\8a¶æ\85\8bã\81\8cç\99ºç\94\9fã\81\99ã\82\8bã\81\8bã\81®èª¬æ\98\8eã\82\82ä¸\80ç·\92ã\81«æ\9b¸ã\81\8bã\82\8cã\81¦ã\81\84ã\81ªã\81\84é\99\90ã\82\8aã\80\81
+ "ç\90\86è«\96ç\9a\84ã\81«ã\81¯ç«¶å\90\88ç\8a¶æ\85\8bã\81«ã\81ªã\82\8b"ã\82\88ã\81\86ã\81ªã\82\82ã\81®ã\81¯ä¸\8då\8f¯ã\80\82
+ - ã\81\84ã\81\8bã\81ªã\82\8bäº\9bç´°ã\81ªä¿®æ£ã\82\82Ã¥\90«ã\82\81ã\82\8bã\81\93ã\81¨ã\81¯ã\81§ã\81\8dã\81ªã\81\84ã\80\82(ã\82¹ã\83\9aã\83«ã\81®ä¿®æ£ã\80\81空ç\99½ã\81®ã\82¯ã\83ªã\83¼
+ ã\83³ã\82¢ã\83\83ã\83\97ã\81ªã\81©)
+ - 対å¿\9cã\81\99ã\82\8bã\82µã\83\96ã\82·ã\82¹ã\83\86ã\83 ã\83¡ã\83³ã\83\86ã\83\8aã\81\8cå\8f\97ã\81\91å\85¥ã\82\8cã\81\9fã\82\82ã\81®ã\81§ã\81ªã\81\91ã\82\8cã\81°ã\81ªã\82\89ã\81ªã\81\84ã\80\82
+ - Documentation/SubmittingPatches ã\81®è¦\8få\89\87ã\81«å¾\93ã\81£ã\81\9fã\82\82ã\81®ã\81§ã\81ªã\81\91ã\82\8cã\81°ã\81ªã\82\89ã\81ªã\81\84ã\80\82
+
+-stable ã\83\84ã\83ªã\83¼ã\81«ã\83\91ã\83\83ã\83\81ã\82\92é\80\81ä»\98ã\81\99ã\82\8bæ\89\8bç¶\9aã\81\8d-
+
+ - ä¸\8aè¨\98ã\81®è¦\8få\89\87ã\81«å¾\93ã\81£ã\81¦ã\81\84ã\82\8bã\81\8bã\82\92確èª\8dã\81\97ã\81\9få¾\8cã\81«ã\80\81stable@kernel.org ã\81«ã\83\91ã\83\83ã\83\81
+ ã\82\92é\80\81ã\82\8bã\80\82
+ - é\80\81ä¿¡è\80\85ã\81¯ã\83\91ã\83\83ã\83\81ã\81\8cã\82Âã\83¥ã\83¼ã\81«å\8f\97ã\81\91ä»\98ã\81\91ã\82\89ã\82\8cã\81\9fé\9a\9bã\81«ã\81¯ ACK ã\82\92ã\80\81Ã¥\8d´ä¸\8bã\81\95ã\82\8cã\81\9få ´å\90\88
+ ã\81«ã\81¯ NAK ã\82\92å\8f\97ã\81\91å\8f\96ã\82\8bã\80\82ã\81\93ã\81®å\8f\8då¿\9cã\81¯é\96\8bç\99ºè\80\85ã\81\9fã\81¡ã\81®ã\82¹ã\82±ã\82¸ã\83¥ã\83¼ã\83«ã\81«ã\82\88ã\81£ã\81¦ã\80\81æ\95°
+ æ\97¥ã\81\8bã\81\8bã\82\8bå ´å\90\88ã\81\8cã\81\82ã\82\8bã\80\82
+ - ã\82\82ã\81\97å\8f\97ã\81\91å\8f\96ã\82\89ã\82\8cã\81\9fã\82\89ã\80\81ã\83\91ã\83\83ã\83\81ã\81¯ä»\96ã\81®é\96\8bç\99ºè\80\85ã\81\9fã\81¡ã\81®ã\83¬ã\83\93ã\83¥ã\83¼ã\81®ã\81\9fã\82\81ã\81«
+ -stable ã\82Âã\83¥ã\83¼ã\81«è¿½å\8a ã\81\95ã\82\8cã\82\8bã\80\82
+ - ã\82ȋ\82Âã\83¥ã\83ªã\83\86ã\82£ã\83\91ã\83\83ã\83\81ã\81¯ã\81\93ã\81®ã\82¨ã\82¤ã\83ªã\82¢ã\82¹ (stable@kernel.org) ã\81«é\80\81ã\82\89ã\82\8cã\82\8bã\81¹
+ ã\81\8dã\81§ã\81¯ã\81ªã\81\8fã\80\81代ã\82\8fã\82\8aã\81« security@kernel.org ã\81®ã\82¢ã\83\89ã\83¬ã\82¹ã\81«é\80\81ã\82\89ã\82\8cã\82\8bã\80\82
+
+ã\83¬ã\83\93ã\83¥ã\83¼ã\82µã\82¤ã\82¯ã\83«-
+
+ - -stable ã\83¡ã\83³ã\83\86ã\83\8aã\81\8cã\83¬ã\83\93ã\83¥ã\83¼ã\82µã\82¤ã\82¯ã\83«ã\82\92決ã\82\81ã\82\8bã\81¨ã\81\8dã\80\81ã\83\91ã\83\83ã\83\81ã\81¯ã\83¬ã\83\93ã\83¥ã\83¼å§\94
+ å\93¡ä¼\9aã\81¨ã\83\91ã\83\83ã\83\81ã\81\8cå½±é\9f¿ã\81\99ã\82\8bé \98å\9f\9fã\81®ã\83¡ã\83³ã\83\86ã\83\8a(æ\8f\90ä¾\9bè\80\85ã\81\8cã\81\9dã\81®é \98å\9f\9fã\81®ã\83¡ã\83³ã\83\86ã\83\8aã\81§ç\84¡
+ ã\81\84é\99\90ã\82\8a)ã\81«é\80\81ã\82\89ã\82\8cã\80\81linux-kernel ã\83¡ã\83¼ã\83ªã\83³ã\82°ã\83ªã\82¹ã\83\88ã\81«CCã\81\95ã\82\8cã\82\8bã\80\82
+ - ã\83¬ã\83\93ã\83¥ã\83¼å§\94å\93¡ä¼\9aã\81¯ 48æ\99\82é\96\93ã\81®é\96\93ã\81« ACK ã\81\8b NAK ã\82\92å\87ºã\81\99ã\80\82
+ - ã\82\82ã\81\97ã\83\91ã\83\83ã\83\81ã\81\8cå§\94å\93¡ä¼\9aã\81®ã\83¡ã\83³ã\83\90ã\81\8bã\82\89å\8d´ä¸\8bã\82\8cã\82\8bã\81\8bã\80\81ã\83¡ã\83³ã\83\86ã\83\8aé\81\94ã\82\84ã\83¡ã\83³ã\83\90ã\81\8cæ°\97ä»\98
+ ã\81\8bã\81ªã\81\8bã\81£ã\81\9fÃ¥\95\8fé¡\8cã\81\8cæ\8c\81ã\81¡ã\81\82ã\81\8cã\82\8aã\80\81linux-kernel ã\83¡ã\83³ã\83\90ã\81\8cã\83\91ã\83\83ã\83\81ã\81«ç\95°è°ã\82\92Ã¥\94±ã\81\88
+ ã\81\9få ´å\90\88ã\81«ã\81¯ã\80\81ã\83\91ã\83\83ã\83\81ã\81¯ã\82Âã\83¥ã\83¼ã\81\8bã\82\89Ã¥\89\8aé\99¤ã\81\95ã\82\8cã\82\8bã\80\82
+ - ã\83¬ã\83\93ã\83¥ã\83¼ã\82µã\82¤ã\82¯ã\83«ã\81®æ\9c\80å¾\8cã\81«ã\80\81ACK ã\82\92å\8f\97ã\81\91ã\81\9fã\83\91ã\83\83ã\83\81ã\81¯æ\9c\80æ\96°ã\81® -stable ã\83ªã\83ªã\83¼
+ ã\82¹ã\81«è¿½å\8a ã\81\95ã\82\8cã\80\81ã\81\9dã\81®å¾\8cã\81«æ\96°ã\81\97ã\81\84 -stable ã\83ªã\83ªã\83¼ã\82¹ã\81\8cè¡\8cã\82\8fã\82\8cã\82\8bã\80\82
+ - ã\82ȋ\82Âã\83¥ã\83ªã\83\86ã\82£ã\83\91ã\83\83ã\83\81ã\81¯ã\80\81é\80\9a常ã\81®ã\83‹\83\93ã\83¥ã\83¼ã\82µã\82¤ã\82¯ã\83«ã\82\92é\80\9aã\82\89ã\81\9aã\80\81ã\82ȋ\82Âã\83¥ã\83ªã\83\86ã\82£
+ ã\82«ã\83¼ã\83\8dã\83«ã\83\81ã\83¼ã\83 ã\81\8bã\82\89ç\9b´æ\8e¥ -stable ã\83\84ã\83ªã\83¼ã\81«å\8f\97ã\81\91ä»\98ã\81\91ã\82\89ã\82\8cã\82\8bã\80\82
+ ã\81\93ã\81®æ\89\8bç¶\9aã\81\8dã\81®è©³ç´°ã\81«ã\81¤ã\81\84ã\81¦ã\81¯ kernel security ã\83\81ã\83¼ã\83 ã\81«å\95\8fã\81\84å\90\88ã\82\8fã\81\9bã\82\8bã\81\93ã\81¨ã\80\82
+
+ã\83¬ã\83\93ã\83¥ã\83¼å§\94å\93¡ä¼\9a-
+
+ - ã\81\93ã\81®å§\94å\93¡ä¼\9aã\81¯ã\80\81ã\81\93ã\81®ã\82¿ã\82¹ã\82¯ã\81«ã\81¤ã\81\84ã\81¦æ´»å\8b\95ã\81\99ã\82\8bå¤\9aã\81\8fã\81®ã\83\9cã\83©ã\83³ã\83\86ã\82£ã\82¢ã\81¨ã\80\81å°\91æ\95°ã\81®
+ é\9d\9eã\83\9cã\83©ã\83³ã\83\86ã\82£ã\82¢ã\81®ã\82«ã\83¼ã\83\8dã\83«é\96\8bç\99ºè\80\85é\81\94ã\81§æ§\8bæ\88\90ã\81\95ã\82\8cã\81¦ã\81\84ã\82\8bã\80\82
+
it has been replaced by a better system and you
should be using that.
-3C359 NETWORK DRIVER
-P: Mike Phillips
-M: mikep@linuxtr.net
-L: netdev@vger.kernel.org
-W: http://www.linuxtr.net
-S: Maintained
-
3C505 NETWORK DRIVER
P: Philip Blundell
M: philb@gnu.org
S: Maintained
BONDING DRIVER
-P: Chad Tindel
-M: ctindel@users.sourceforge.net
P: Jay Vosburgh
M: fubar@us.ibm.com
L: bonding-devel@lists.sourceforge.net
W: http://oss.oracle.com/projects/ocfs2/
S: Supported
-OLYMPIC NETWORK DRIVER
-P: Peter De Shrijver
-M: p2@ace.ulyssis.student.kuleuven.ac.be
-P: Mike Phillips
-M: mikep@linuxtr.net
-L: netdev@vger.kernel.org
-W: http://www.linuxtr.net
-S: Maintained
-
OMNIKEY CARDMAN 4000 DRIVER
P: Harald Welte
M: laforge@gnumonks.org
W: http://sourceforge.net/projects/tlan/
S: Maintained
-TOKEN-RING NETWORK DRIVER
-P: Mike Phillips
-M: mikep@linuxtr.net
-L: netdev@vger.kernel.org
-W: http://www.linuxtr.net
-S: Maintained
-
TOSHIBA ACPI EXTRAS DRIVER
P: John Belmonte
M: toshiba_acpi@memebeam.org
--- /dev/null
+#
+# General architecture dependent options
+#
+
+config OPROFILE
+ tristate "OProfile system profiling (EXPERIMENTAL)"
+ depends on PROFILING
+ depends on HAVE_OPROFILE
+ help
+ OProfile is a profiling system capable of profiling the
+ whole system, include the kernel, kernel modules, libraries,
+ and applications.
+
+ If unsure, say N.
+
+config HAVE_OPROFILE
+ def_bool n
+
+config KPROBES
+ bool "Kprobes"
+ depends on KALLSYMS && MODULES
+ depends on HAVE_KPROBES
+ help
+ Kprobes allows you to trap at almost any kernel address and
+ execute a callback function. register_kprobe() establishes
+ a probepoint and specifies the callback. Kprobes is useful
+ for kernel debugging, non-intrusive instrumentation and testing.
+ If in doubt, say "N".
+
+config HAVE_KPROBES
+ def_bool n
config ALPHA
bool
default y
+ select HAVE_OPROFILE
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
source "fs/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/alpha/Kconfig.debug"
# DUMMY_CONSOLE may be defined in drivers/video/console/Kconfig
default y
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
+ select HAVE_OPROFILE
+ select HAVE_KPROBES if (!XIP_KERNEL)
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
config ARCH_MTD_XIP
bool
+if OPROFILE
+
+config OPROFILE_ARMV6
+ def_bool y
+ depends on CPU_V6 && !SMP
+ select OPROFILE_ARM11_CORE
+
+config OPROFILE_MPCORE
+ def_bool y
+ depends on CPU_V6 && SMP
+ select OPROFILE_ARM11_CORE
+
+config OPROFILE_ARM11_CORE
+ bool
+
+endif
+
config VECTORS_BASE
hex
default 0xffff0000 if MMU || CPU_HIGH_VECTOR
source "fs/Kconfig"
-source "arch/arm/Kconfig.instrumentation"
-
source "arch/arm/Kconfig.debug"
source "security/Kconfig"
+++ /dev/null
-menuconfig INSTRUMENTATION
- bool "Instrumentation Support"
- default y
- ---help---
- Say Y here to get to see options related to performance measurement,
- system-wide debugging, and testing. This option alone does not add any
- kernel code.
-
- If you say N, all options in this submenu will be skipped and
- disabled. If you're trying to debug the kernel itself, go see the
- Kernel Hacking menu.
-
-if INSTRUMENTATION
-
-config PROFILING
- bool "Profiling support (EXPERIMENTAL)"
- help
- Say Y here to enable the extended profiling support mechanisms used
- by profilers such as OProfile.
-
-config OPROFILE
- tristate "OProfile system profiling (EXPERIMENTAL)"
- depends on PROFILING && !UML
- help
- OProfile is a profiling system capable of profiling the
- whole system, include the kernel, kernel modules, libraries,
- and applications.
-
- If unsure, say N.
-
-config OPROFILE_ARMV6
- bool
- depends on OPROFILE && CPU_V6 && !SMP
- default y
- select OPROFILE_ARM11_CORE
-
-config OPROFILE_MPCORE
- bool
- depends on OPROFILE && CPU_V6 && SMP
- default y
- select OPROFILE_ARM11_CORE
-
-config OPROFILE_ARM11_CORE
- bool
-
-config KPROBES
- bool "Kprobes"
- depends on KALLSYMS && MODULES && !UML && !XIP_KERNEL
- help
- Kprobes allows you to trap at almost any kernel address and
- execute a callback function. register_kprobe() establishes
- a probepoint and specifies the callback. Kprobes is useful
- for kernel debugging, non-intrusive instrumentation and testing.
- If in doubt, say "N".
-
-config MARKERS
- bool "Activate markers"
- help
- Place an empty function call at each marker site. Can be
- dynamically changed for a probe function.
-
-endif # INSTRUMENTATION
# With EMBEDDED=n, we get lots of stuff automatically selected
# that we usually don't need on AVR32.
select EMBEDDED
+ select HAVE_OPROFILE
+ select HAVE_KPROBES
help
AVR32 is a high-performance 32-bit RISC microprocessor core,
designed for cost-sensitive embedded applications, with particular
config ARCH_HAS_ILOG2_U64
def_bool n
-config ARCH_SUPPORTS_OPROFILE
- def_bool y
-
config GENERIC_HWEIGHT
def_bool y
source "fs/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/avr32/Kconfig.debug"
source "security/Kconfig"
config BLACKFIN
bool
default y
+ select HAVE_OPROFILE
config ZONE_DMA
bool
source "fs/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/blackfin/Kconfig.debug"
source "security/Kconfig"
source "drivers/usb/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/cris/Kconfig.debug"
source "security/Kconfig"
source "fs/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/frv/Kconfig.debug"
source "security/Kconfig"
source "fs/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/h8300/Kconfig.debug"
source "security/Kconfig"
select ACPI if (!IA64_HP_SIM)
select PM if (!IA64_HP_SIM)
select ARCH_SUPPORTS_MSI
+ select HAVE_OPROFILE
+ select HAVE_KPROBES
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
source "arch/ia64/hp/sim/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/ia64/Kconfig.debug"
source "security/Kconfig"
config M32R
bool
default y
+ select HAVE_OPROFILE
config SBUS
bool
source "fs/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/m32r/Kconfig.debug"
source "security/Kconfig"
source "fs/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/m68k/Kconfig.debug"
source "security/Kconfig"
source "fs/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/m68knommu/Kconfig.debug"
source "security/Kconfig"
config MIPS
bool
default y
+ select HAVE_OPROFILE
# Horrible source of confusion. Die, die, die ...
select EMBEDDED
select RTC_LIB
source "fs/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/mips/Kconfig.debug"
source "security/Kconfig"
config PARISC
def_bool y
+ select HAVE_OPROFILE
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
in many of their workstations & servers (HP9000 700 and 800 series,
source "fs/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/parisc/Kconfig.debug"
source "security/Kconfig"
config PPC
bool
default y
+ select HAVE_OPROFILE
+ select HAVE_KPROBES
config EARLY_PRINTK
bool
source "lib/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/powerpc/Kconfig.debug"
source "security/Kconfig"
config PPC
bool
default y
+ select HAVE_OPROFILE
+ select HAVE_KPROBES
config PPC32
bool
source "lib/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/ppc/Kconfig.debug"
source "security/Kconfig"
config S390
def_bool y
+ select HAVE_OPROFILE
+ select HAVE_KPROBES
source "init/Kconfig"
source "fs/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/s390/Kconfig.debug"
source "security/Kconfig"
config SUPERH
def_bool y
select EMBEDDED
+ select HAVE_OPROFILE
help
The SuperH is a RISC processor targeted for use in embedded systems
and consumer electronics; it was also used in the Sega Dreamcast
source "fs/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/sh/Kconfig.debug"
source "security/Kconfig"
config SPARC
bool
default y
+ select HAVE_OPROFILE
# Identify this as a Sparc32 build
config SPARC32
source "fs/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/sparc/Kconfig.debug"
source "security/Kconfig"
config SPARC
bool
default y
+ select HAVE_OPROFILE
+ select HAVE_KPROBES
config SPARC64
bool
source "fs/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/sparc64/Kconfig.debug"
source "security/Kconfig"
bool
default n
-source "kernel/Kconfig.instrumentation"
-
source "arch/um/Kconfig.debug"
source "drivers/usb/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/v850/Kconfig.debug"
source "security/Kconfig"
### Arch settings
config X86
def_bool y
+ select HAVE_OPROFILE
+ select HAVE_KPROBES
config GENERIC_LOCKBREAK
def_bool n
config HAVE_SETUP_PER_CPU_AREA
def_bool X86_64
-config ARCH_SUPPORTS_OPROFILE
- bool
- default y
-
select HAVE_KVM
config ARCH_HIBERNATION_POSSIBLE
source "fs/Kconfig"
-source "kernel/Kconfig.instrumentation"
-
source "arch/x86/Kconfig.debug"
source "security/Kconfig"
provide one yourself.
endmenu
-source "kernel/Kconfig.instrumentation"
-
source "arch/xtensa/Kconfig.debug"
source "security/Kconfig"
int device_create_file(struct device *dev, struct device_attribute *attr)
{
int error = 0;
- if (get_device(dev)) {
+ if (dev)
error = sysfs_create_file(&dev->kobj, &attr->attr);
- put_device(dev);
- }
return error;
}
*/
void device_remove_file(struct device *dev, struct device_attribute *attr)
{
- if (get_device(dev)) {
+ if (dev)
sysfs_remove_file(&dev->kobj, &attr->attr);
- put_device(dev);
- }
}
/**
}
EXPORT_SYMBOL_GPL(device_create);
-/**
- * find_device - finds a device that was created with device_create()
- * @class: pointer to the struct class that this device was registered with
- * @devt: the dev_t of the device that was previously registered
- */
-static struct device *find_device(struct class *class, dev_t devt)
+static int __match_devt(struct device *dev, void *data)
{
- struct device *dev = NULL;
- struct device *dev_tmp;
+ dev_t *devt = data;
- down(&class->sem);
- list_for_each_entry(dev_tmp, &class->devices, node) {
- if (dev_tmp->devt == devt) {
- dev = dev_tmp;
- break;
- }
- }
- up(&class->sem);
- return dev;
+ return dev->devt == *devt;
}
/**
{
struct device *dev;
- dev = find_device(class, devt);
- if (dev)
+ dev = class_find_device(class, &devt, __match_devt);
+ if (dev) {
+ put_device(dev);
device_unregister(dev);
+ }
}
EXPORT_SYMBOL_GPL(device_destroy);
{
struct device *dev;
- dev = find_device(class, devt);
- if (dev)
+ dev = class_find_device(class, &devt, __match_devt);
+ if (dev) {
device_pm_schedule_removal(dev);
+ put_device(dev);
+ }
}
EXPORT_SYMBOL_GPL(destroy_suspended_device);
#endif /* CONFIG_PM_SLEEP */
struct driver_attribute *attr)
{
int error;
- if (get_driver(drv)) {
+ if (drv)
error = sysfs_create_file(&drv->p->kobj, &attr->attr);
- put_driver(drv);
- } else
+ else
error = -EINVAL;
return error;
}
void driver_remove_file(struct device_driver *drv,
struct driver_attribute *attr)
{
- if (get_driver(drv)) {
+ if (drv)
sysfs_remove_file(&drv->p->kobj, &attr->attr);
- put_driver(drv);
- }
}
EXPORT_SYMBOL_GPL(driver_remove_file);
list_move_tail(&dev->power.entry, &dpm_destroy);
mutex_unlock(&dpm_list_mtx);
}
+EXPORT_SYMBOL_GPL(device_pm_schedule_removal);
/**
* pm_sleep_lock - mutual exclusion for registration and suspend
extern void device_pm_add(struct device *);
extern void device_pm_remove(struct device *);
-extern void device_pm_schedule_removal(struct device *);
extern int pm_sleep_lock(void);
extern void pm_sleep_unlock(void);
vma->vm_ops = &drm_vm_dma_ops;
vma->vm_flags |= VM_RESERVED; /* Don't swap */
+ vma->vm_flags |= VM_DONTEXPAND;
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open_locked(vma);
return -EINVAL; /* This should never happen. */
}
vma->vm_flags |= VM_RESERVED; /* Don't swap */
+ vma->vm_flags |= VM_DONTEXPAND;
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open_locked(vma);
vdata->refcnt = ATOMIC_INIT(1);
vma->vm_private_data = vdata;
- vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP);
+ vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &mspec_vm_ops;
* nozomi.c -- HSDPA driver Broadband Wireless Data Card - Globe Trotter
*
* Written by: Ulf Jakobsson,
- * Jan �erfeldt,
+ * Jan Ã…kerfeldt,
* Stefan Thomasson,
*
* Maintained by: Paul Hardwick (p.hardwick@option.com)
* --------------------------------------------------------------------------
*/
-/*
- * CHANGELOG
- * Version 2.1d
- * 11-November-2007 Jiri Slaby, Frank Seidel
- * - Big rework of multicard support by Jiri
- * - Major cleanups (semaphore to mutex, endianess, no major reservation)
- * - Optimizations
- *
- * Version 2.1c
- * 30-October-2007 Frank Seidel
- * - Completed multicard support
- * - Minor cleanups
- *
- * Version 2.1b
- * 07-August-2007 Frank Seidel
- * - Minor cleanups
- * - theoretical multicard support
- *
- * Version 2.1
- * 03-July-2006 Paul Hardwick
- *
- * - Stability Improvements. Incorporated spinlock wraps patch.
- * - Updated for newer 2.6.14+ kernels (tty_buffer_request_room)
- * - using __devexit macro for tty
- *
- *
- * Version 2.0
- * 08-feb-2006 15:34:10:Ulf
- *
- * -Fixed issue when not waking up line disipine layer, could probably result
- * in better uplink performance for 2.4.
- *
- * -Fixed issue with big endian during initalization, now proper toggle flags
- * are handled between preloader and maincode.
- *
- * -Fixed flow control issue.
- *
- * -Added support for setting DTR.
- *
- * -For 2.4 kernels, removing temporary buffer that's not needed.
- *
- * -Reading CTS only for modem port (only port that supports it).
- *
- * -Return 0 in write_room instead of netative value, it's not handled in
- * upper layer.
- *
- * --------------------------------------------------------------------------
- * Version 1.0
- *
- * First version of driver, only tested with card of type F32_2.
- * Works fine with 2.4 and 2.6 kernels.
- * Driver also support big endian architecture.
- */
-
/* Enable this to have a lot of debug printouts */
#define DEBUG
/* Do we need this settable at runtime? */
static int debug = NOZOMI_DEBUG_LEVEL;
-#define D(lvl, args...) do {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \
- while (0)
+#define D(lvl, args...) do \
+ {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \
+ while (0)
#define D_(lvl, args...) D(lvl, ##args)
/* These printouts are always printed */
/* Big endian */
struct toggles {
- unsigned enabled:5; /*
+ unsigned int enabled:5; /*
* Toggle fields are valid if enabled is 0,
* else A-channels must always be used.
*/
- unsigned diag_dl:1;
- unsigned mdm_dl:1;
- unsigned mdm_ul:1;
+ unsigned int diag_dl:1;
+ unsigned int mdm_dl:1;
+ unsigned int mdm_ul:1;
} __attribute__ ((packed));
/* Configuration table to read at startup of card */
/* This stores all control downlink flags */
struct ctrl_dl {
u8 port;
- unsigned reserved:4;
- unsigned CTS:1;
- unsigned RI:1;
- unsigned DCD:1;
- unsigned DSR:1;
+ unsigned int reserved:4;
+ unsigned int CTS:1;
+ unsigned int RI:1;
+ unsigned int DCD:1;
+ unsigned int DSR:1;
} __attribute__ ((packed));
/* This stores all control uplink flags */
struct ctrl_ul {
u8 port;
- unsigned reserved:6;
- unsigned RTS:1;
- unsigned DTR:1;
+ unsigned int reserved:6;
+ unsigned int RTS:1;
+ unsigned int DTR:1;
} __attribute__ ((packed));
#else
/* This represents the toggle information */
struct toggles {
- unsigned mdm_ul:1;
- unsigned mdm_dl:1;
- unsigned diag_dl:1;
- unsigned enabled:5; /*
+ unsigned int mdm_ul:1;
+ unsigned int mdm_dl:1;
+ unsigned int diag_dl:1;
+ unsigned int enabled:5; /*
* Toggle fields are valid if enabled is 0,
* else A-channels must always be used.
*/
/* This stores all control downlink flags */
struct ctrl_dl {
- unsigned DSR:1;
- unsigned DCD:1;
- unsigned RI:1;
- unsigned CTS:1;
- unsigned reserverd:4;
+ unsigned int DSR:1;
+ unsigned int DCD:1;
+ unsigned int RI:1;
+ unsigned int CTS:1;
+ unsigned int reserverd:4;
u8 port;
} __attribute__ ((packed));
/* This stores all control uplink flags */
struct ctrl_ul {
- unsigned DTR:1;
- unsigned RTS:1;
- unsigned reserved:6;
+ unsigned int DTR:1;
+ unsigned int RTS:1;
+ unsigned int reserved:6;
u8 port;
} __attribute__ ((packed));
#endif
} __attribute__ ((packed));
/* Global variables */
-static struct pci_device_id nozomi_pci_tbl[] = {
+static const struct pci_device_id nozomi_pci_tbl[] __devinitconst = {
{PCI_DEVICE(VENDOR1, DEVICE1)},
{},
};
* -Optimize
* -Rewrite cleaner
*/
-static u32 write_mem32(void __iomem *mem_addr_start, u32 *buf,
+static u32 write_mem32(void __iomem *mem_addr_start, const u32 *buf,
u32 size_bytes)
{
u32 i = 0;
u32 *ptr = (__force u32 *) mem_addr_start;
- u16 *buf16;
+ const u16 *buf16;
if (unlikely(!ptr || !buf))
return 0;
/* shortcut for extremely often used cases */
switch (size_bytes) {
case 2: /* 2 bytes */
- buf16 = (u16 *) buf;
+ buf16 = (const u16 *)buf;
writew(__cpu_to_le16(*buf16), (void __iomem *)ptr);
return 2;
break;
while (i < size_bytes) {
if (size_bytes - i == 2) {
/* 2 bytes */
- buf16 = (u16 *) buf;
+ buf16 = (const u16 *)buf;
writew(__cpu_to_le16(*buf16), (void __iomem *)ptr);
i += 2;
} else {
dc->config_table.ul_ctrl_len);
}
#else
-static __inline__ void dump_table(const struct nozomi *dc) { }
+static inline void dump_table(const struct nozomi *dc) { }
#endif
/*
/* Enable uplink interrupts */
static void enable_transmit_ul(enum port_type port, struct nozomi *dc)
{
- u16 mask[NOZOMI_MAX_PORTS] = \
- {MDM_UL, DIAG_UL, APP1_UL, APP2_UL, CTRL_UL};
+ static const u16 mask[] = {MDM_UL, DIAG_UL, APP1_UL, APP2_UL, CTRL_UL};
if (port < NOZOMI_MAX_PORTS) {
dc->last_ier |= mask[port];
/* Disable uplink interrupts */
static void disable_transmit_ul(enum port_type port, struct nozomi *dc)
{
- u16 mask[NOZOMI_MAX_PORTS] = \
- {~MDM_UL, ~DIAG_UL, ~APP1_UL, ~APP2_UL, ~CTRL_UL};
+ static const u16 mask[] =
+ {~MDM_UL, ~DIAG_UL, ~APP1_UL, ~APP2_UL, ~CTRL_UL};
if (port < NOZOMI_MAX_PORTS) {
dc->last_ier &= mask[port];
/* Enable downlink interrupts */
static void enable_transmit_dl(enum port_type port, struct nozomi *dc)
{
- u16 mask[NOZOMI_MAX_PORTS] = \
- {MDM_DL, DIAG_DL, APP1_DL, APP2_DL, CTRL_DL};
+ static const u16 mask[] = {MDM_DL, DIAG_DL, APP1_DL, APP2_DL, CTRL_DL};
if (port < NOZOMI_MAX_PORTS) {
dc->last_ier |= mask[port];
/* Disable downlink interrupts */
static void disable_transmit_dl(enum port_type port, struct nozomi *dc)
{
- u16 mask[NOZOMI_MAX_PORTS] = \
- {~MDM_DL, ~DIAG_DL, ~APP1_DL, ~APP2_DL, ~CTRL_DL};
+ static const u16 mask[] =
+ {~MDM_DL, ~DIAG_DL, ~APP1_DL, ~APP2_DL, ~CTRL_DL};
if (port < NOZOMI_MAX_PORTS) {
dc->last_ier &= mask[port];
* Return 1 - send buffer to card and ack.
* Return 0 - don't ack, don't send buffer to card.
*/
-static int send_data(enum port_type index, struct nozomi *dc)
+static int send_data(enum port_type index, const struct nozomi *dc)
{
u32 size = 0;
- struct port *port = &dc->port[index];
- u8 toggle = port->toggle_ul;
+ const struct port *port = &dc->port[index];
+ const u8 toggle = port->toggle_ul;
void __iomem *addr = port->ul_addr[toggle];
- u32 ul_size = port->ul_size[toggle];
+ const u32 ul_size = port->ul_size[toggle];
struct tty_struct *tty = port->tty;
/* Get data from tty and place in buf for now */
}
/*
- * Handle donlink data, ports that are handled are modem and diagnostics
+ * Handle downlink data, ports that are handled are modem and diagnostics
* Return 1 - ok
* Return 0 - toggle fields are out of sync
*/
static ssize_t card_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
+ const struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
return sprintf(buf, "%d\n", dc->card_type);
}
-static DEVICE_ATTR(card_type, 0444, card_type_show, NULL);
+static DEVICE_ATTR(card_type, S_IRUGO, card_type_show, NULL);
static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
+ const struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
return sprintf(buf, "%u\n", dc->open_ttys);
}
-static DEVICE_ATTR(open_ttys, 0444, open_ttys_show, NULL);
+static DEVICE_ATTR(open_ttys, S_IRUGO, open_ttys_show, NULL);
static void make_sysfs_files(struct nozomi *dc)
{
{
struct port *port = tty->driver_data;
int room = 0;
- struct nozomi *dc = get_dc_by_tty(tty);
+ const struct nozomi *dc = get_dc_by_tty(tty);
if (!dc || !port)
return 0;
/* Gets io control parameters */
static int ntty_tiocmget(struct tty_struct *tty, struct file *file)
{
- struct port *port = tty->driver_data;
- struct ctrl_dl *ctrl_dl = &port->ctrl_dl;
- struct ctrl_ul *ctrl_ul = &port->ctrl_ul;
+ const struct port *port = tty->driver_data;
+ const struct ctrl_dl *ctrl_dl = &port->ctrl_dl;
+ const struct ctrl_ul *ctrl_ul = &port->ctrl_ul;
return (ctrl_ul->RTS ? TIOCM_RTS : 0) |
(ctrl_ul->DTR ? TIOCM_DTR : 0) |
static int ntty_cflags_changed(struct port *port, unsigned long flags,
struct async_icount *cprev)
{
- struct async_icount cnow = port->tty_icount;
+ const struct async_icount cnow = port->tty_icount;
int ret;
ret = ((flags & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
static int ntty_ioctl_tiocgicount(struct port *port, void __user *argp)
{
- struct async_icount cnow = port->tty_icount;
+ const struct async_icount cnow = port->tty_icount;
struct serial_icounter_struct icount;
icount.cts = cnow.cts;
/* just to discard single character writes */
static void ntty_put_char(struct tty_struct *tty, unsigned char c)
{
- /* FIXME !!! */
+ /*
+ * card does not react correct when we write single chars
+ * to the card, so we discard them
+ */
DBG2("PUT CHAR Function: %c", c);
}
return rval;
}
-static struct tty_operations tty_ops = {
+static const struct tty_operations tty_ops = {
.ioctl = ntty_ioctl,
.open = ntty_open,
.close = ntty_close,
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/kthread.h>
+#include <linux/platform_device.h>
#include <asm/uaccess.h>
#include <asm/semaphore.h>
will be called smc-ultra32.
config BFIN_MAC
- tristate "Blackfin 536/537 on-chip mac support"
- depends on NET_ETHERNET && (BF537 || BF536) && (!BF537_PORT_H)
+ tristate "Blackfin 527/536/537 on-chip mac support"
+ depends on NET_ETHERNET && (BF527 || BF537 || BF536) && (!BF537_PORT_H)
select CRC32
select MII
select PHYLIB
config BFIN_MAC_USE_L1
bool "Use L1 memory for rx/tx packets"
- depends on BFIN_MAC && BF537
+ depends on BFIN_MAC && (BF527 || BF537)
default y
help
To get maximum network performance, you should use L1 memory as rx/tx buffers.
config BFIN_MAC_RMII
bool "RMII PHY Interface (EXPERIMENTAL)"
depends on BFIN_MAC && EXPERIMENTAL
- default n
+ default y if BFIN527_EZKIT
+ default n if BFIN537_STAMP
help
Use Reduced PHY MII Interface
config IBMLANA
tristate "IBM LAN Adapter/A support"
- depends on MCA && MCA_LEGACY
+ depends on MCA
---help---
This is a Micro Channel Ethernet adapter. You need to set
CONFIG_MCA to use this driver. It is both available as an in-kernel
/* Wait until PHY reset is complete */
do {
read_phy(lp->phy_address, MII_BMCR, &bmcr);
- } while (!(bmcr && BMCR_RESET));
+ } while (!(bmcr & BMCR_RESET));
disable_mdi();
spin_unlock_irq(&lp->lock);
static void ax_reset_8390(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
unsigned long reset_start_time = jiffies;
void __iomem *addr = (void __iomem *)dev->base_addr;
if (ei_debug > 1)
- printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
+ dev_dbg(&ax->dev->dev, "resetting the 8390 t=%ld\n", jiffies);
ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
/* This check _should_not_ be necessary, omit eventually. */
while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
if (jiffies - reset_start_time > 2*HZ/100) {
- printk(KERN_WARNING "%s: %s did not complete.\n",
+ dev_warn(&ax->dev->dev, "%s: %s did not complete.\n",
__FUNCTION__, dev->name);
break;
}
int ring_page)
{
struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem;
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk(KERN_EMERG "%s: DMAing conflict in %s [DMAstat:%d][irqlock:%d].\n",
+ dev_err(&ax->dev->dev, "%s: DMAing conflict in %s "
+ "[DMAstat:%d][irqlock:%d].\n",
dev->name, __FUNCTION__,
- ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
struct sk_buff *skb, int ring_offset)
{
struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem;
char *buf = skb->data;
if (ei_status.dmaing) {
- printk(KERN_EMERG "%s: DMAing conflict in ax_block_input "
+ dev_err(&ax->dev->dev,
+ "%s: DMAing conflict in %s "
"[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ dev->name, __FUNCTION__,
+ ei_status.dmaing, ei_status.irqlock);
return;
}
const unsigned char *buf, const int start_page)
{
struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem;
unsigned long dma_start;
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk(KERN_EMERG "%s: DMAing conflict in %s."
+ dev_err(&ax->dev->dev, "%s: DMAing conflict in %s."
"[DMAstat:%d][irqlock:%d]\n",
dev->name, __FUNCTION__,
ei_status.dmaing, ei_status.irqlock);
while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
- printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ dev_warn(&ax->dev->dev,
+ "%s: timeout waiting for Tx RDC.\n", dev->name);
ax_reset_8390(dev);
ax_NS8390_init(dev,1);
break;
ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
{
struct ei_device *ei = (struct ei_device *) netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
unsigned long flags;
- printk(KERN_DEBUG "%s: %p, %04x, %04x %04x\n",
- __FUNCTION__, dev, phy_addr, reg, value);
+ dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
+ __FUNCTION__, dev, phy_addr, reg, value);
spin_lock_irqsave(&ei->page_lock, flags);
ax_NS8390_init(dev, 0);
if (first_init) {
- printk("AX88796: %dbit, irq %d, %lx, MAC: ",
- ei_status.word16 ? 16:8, dev->irq, dev->base_addr);
-
- for (i = 0; i < ETHER_ADDR_LEN; i++)
- printk("%2.2x%c", dev->dev_addr[i],
- (i < (ETHER_ADDR_LEN-1) ? ':' : ' '));
+ DECLARE_MAC_BUF(mac);
- printk("\n");
+ dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %s\n",
+ ei_status.word16 ? 16:8, dev->irq, dev->base_addr,
+ print_mac(mac, dev->dev_addr));
}
ret = register_netdev(dev);
/*
- * File: drivers/net/bfin_mac.c
- * Based on:
- * Maintainer:
- * Bryan Wu <bryan.wu@analog.com>
+ * Blackfin On-Chip MAC Driver
*
- * Original author:
- * Luke Yang <luke.yang@analog.com>
+ * Copyright 2004-2007 Analog Devices Inc.
*
- * Created:
- * Description:
+ * Enter bugs at http://blackfin.uclinux.org/
*
- * Modified:
- * Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software ; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation ; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY ; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program ; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Licensed under the GPL-2 or later.
*/
#include <linux/init.h>
#define DRV_NAME "bfin_mac"
#define DRV_VERSION "1.1"
#define DRV_AUTHOR "Bryan Wu, Luke Yang"
-#define DRV_DESC "Blackfin BF53[67] on-chip Ethernet MAC driver"
+#define DRV_DESC "Blackfin BF53[67] BF527 on-chip Ethernet MAC driver"
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_LICENSE("GPL");
/* poll the STABUSY bit */
while ((bfin_read_EMAC_STAADD()) & STABUSY) {
- mdelay(10);
+ udelay(1);
if (timeout_cnt-- < 0) {
printk(KERN_ERR DRV_NAME
": wait MDC/MDIO transaction to complete timeout\n");
spin_unlock_irqrestore(&lp->lock, flags);
}
+/* MDC = 2.5 MHz */
+#define MDC_CLK 2500000
+
static int mii_probe(struct net_device *dev)
{
struct bf537mac_local *lp = netdev_priv(dev);
struct phy_device *phydev = NULL;
unsigned short sysctl;
int i;
+ u32 sclk, mdc_div;
/* Enable PHY output early */
if (!(bfin_read_VR_CTL() & PHYCLKOE))
bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
- /* MDC = 2.5 MHz */
+ sclk = get_sclk();
+ mdc_div = ((sclk / MDC_CLK) / 2) - 1;
+
sysctl = bfin_read_EMAC_SYSCTL();
- sysctl |= SET_MDCDIV(24);
+ sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
bfin_write_EMAC_SYSCTL(sysctl);
/* search for connect PHY device */
lp->phydev = phydev;
printk(KERN_INFO "%s: attached PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n",
- DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq);
+ "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)"
+ "@sclk=%dMHz)\n",
+ DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq,
+ MDC_CLK, mdc_div, sclk/1000000);
return 0;
}
*/
if (current_tx_ptr->next->next == tx_list_head) {
while (tx_list_head->status.status_word == 0) {
- mdelay(10);
+ mdelay(1);
if (tx_list_head->status.status_word != 0
|| !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) {
goto adjust_head;
current_rx_ptr->skb = new_skb;
current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
+ /* Invidate the data cache of skb->data range when it is write back
+ * cache. It will prevent overwritting the new data from DMA
+ */
+ blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
+ (unsigned long)new_skb->end);
+
len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
skb_put(skb, len);
blackfin_dcache_invalidate_range((unsigned long)skb->head,
#if defined(CONFIG_BFIN_MAC_RMII)
opmode |= RMII; /* For Now only 100MBit are supported */
-#ifdef CONFIG_BF_REV_0_2
+#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2
opmode |= TE;
#endif
#endif
netif_wake_queue(dev);
}
+static void bf537mac_multicast_hash(struct net_device *dev)
+{
+ u32 emac_hashhi, emac_hashlo;
+ struct dev_mc_list *dmi = dev->mc_list;
+ char *addrs;
+ int i;
+ u32 crc;
+
+ emac_hashhi = emac_hashlo = 0;
+
+ for (i = 0; i < dev->mc_count; i++) {
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ /* skip non-multicast addresses */
+ if (!(*addrs & 1))
+ continue;
+
+ crc = ether_crc(ETH_ALEN, addrs);
+ crc >>= 26;
+
+ if (crc & 0x20)
+ emac_hashhi |= 1 << (crc & 0x1f);
+ else
+ emac_hashlo |= 1 << (crc & 0x1f);
+ }
+
+ bfin_write_EMAC_HASHHI(emac_hashhi);
+ bfin_write_EMAC_HASHLO(emac_hashlo);
+
+ return;
+}
+
/*
* This routine will, depending on the values passed to it,
* either make it accept multicast packets, go into
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= RAF;
bfin_write_EMAC_OPMODE(sysctl);
- } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+ } else if (dev->flags & IFF_ALLMULTI) {
/* accept all multicast */
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= PAM;
bfin_write_EMAC_OPMODE(sysctl);
+ } else if (dev->mc_count) {
+ /* set up multicast hash table */
+ sysctl = bfin_read_EMAC_OPMODE();
+ sysctl |= HM;
+ bfin_write_EMAC_OPMODE(sysctl);
+ bf537mac_multicast_hash(dev);
} else {
/* clear promisc or multicast mode */
sysctl = bfin_read_EMAC_OPMODE();
return retval;
phy_start(lp->phydev);
+ phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
setup_system_regs(dev);
bf537mac_disable();
bf537mac_enable();
-
pr_debug("hardware init finished\n");
netif_start_queue(dev);
netif_carrier_on(dev);
netif_carrier_off(dev);
phy_stop(lp->phydev);
+ phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
/* clear everything */
bf537mac_shutdown(dev);
/* register irq handler */
if (request_irq
(IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED,
- "BFIN537_MAC_RX", dev)) {
+ "EMAC_RX", dev)) {
printk(KERN_WARNING DRV_NAME
": Unable to attach BlackFin MAC RX interrupt\n");
return -EBUSY;
/*
- * File: drivers/net/bfin_mac.c
- * Based on:
- * Maintainer:
- * Bryan Wu <bryan.wu@analog.com>
+ * Blackfin On-Chip MAC Driver
*
- * Original author:
- * Luke Yang <luke.yang@analog.com>
+ * Copyright 2004-2007 Analog Devices Inc.
*
- * Created:
- * Description:
+ * Enter bugs at http://blackfin.uclinux.org/
*
- * Modified:
- * Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software ; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation ; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY ; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program ; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Licensed under the GPL-2 or later.
*/
#define BFIN_MAC_CSUM_OFFLOAD
dev_set_allmulti(slave_dev, 1);
}
+ netif_tx_lock_bh(bond_dev);
/* upload master's mc_list to new slave */
for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
dev_mc_add (slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
}
+ netif_tx_unlock_bh(bond_dev);
}
if (bond->params.mode == BOND_MODE_8023AD) {
}
/* flush master's mc_list from slave */
+ netif_tx_lock_bh(bond_dev);
bond_mc_list_flush(bond_dev, slave_dev);
+ netif_tx_unlock_bh(bond_dev);
}
netdev_set_master(slave_dev, NULL);
}
/* flush master's mc_list from slave */
+ netif_tx_lock_bh(bond_dev);
bond_mc_list_flush(bond_dev, slave_dev);
+ netif_tx_unlock_bh(bond_dev);
}
netdev_set_master(slave_dev, NULL);
}
if (do_failover) {
- rtnl_lock();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- rtnl_unlock();
-
}
re_arm:
slave->link = BOND_LINK_UP;
- rtnl_lock();
-
write_lock_bh(&bond->curr_slave_lock);
if ((!bond->curr_active_slave) &&
}
write_unlock_bh(&bond->curr_slave_lock);
- rtnl_unlock();
}
} else {
read_lock(&bond->curr_slave_lock);
bond->dev->name,
slave->dev->name);
- rtnl_lock();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- rtnl_unlock();
-
bond->current_arp_slave = slave;
if (slave) {
bond->primary_slave->dev->name);
/* primary is up so switch to it */
- rtnl_lock();
write_lock_bh(&bond->curr_slave_lock);
bond_change_active_slave(bond, bond->primary_slave);
write_unlock_bh(&bond->curr_slave_lock);
- rtnl_unlock();
-
slave = bond->primary_slave;
slave->jiffies = jiffies;
} else {
{
struct bonding *bond = bond_dev->priv;
struct net_device_stats *stats = &(bond->stats), *sstats;
+ struct net_device_stats local_stats;
struct slave *slave;
int i;
- memset(stats, 0, sizeof(struct net_device_stats));
+ memset(&local_stats, 0, sizeof(struct net_device_stats));
read_lock_bh(&bond->lock);
bond_for_each_slave(bond, slave, i) {
sstats = slave->dev->get_stats(slave->dev);
- stats->rx_packets += sstats->rx_packets;
- stats->rx_bytes += sstats->rx_bytes;
- stats->rx_errors += sstats->rx_errors;
- stats->rx_dropped += sstats->rx_dropped;
+ local_stats.rx_packets += sstats->rx_packets;
+ local_stats.rx_bytes += sstats->rx_bytes;
+ local_stats.rx_errors += sstats->rx_errors;
+ local_stats.rx_dropped += sstats->rx_dropped;
- stats->tx_packets += sstats->tx_packets;
- stats->tx_bytes += sstats->tx_bytes;
- stats->tx_errors += sstats->tx_errors;
- stats->tx_dropped += sstats->tx_dropped;
+ local_stats.tx_packets += sstats->tx_packets;
+ local_stats.tx_bytes += sstats->tx_bytes;
+ local_stats.tx_errors += sstats->tx_errors;
+ local_stats.tx_dropped += sstats->tx_dropped;
- stats->multicast += sstats->multicast;
- stats->collisions += sstats->collisions;
+ local_stats.multicast += sstats->multicast;
+ local_stats.collisions += sstats->collisions;
- stats->rx_length_errors += sstats->rx_length_errors;
- stats->rx_over_errors += sstats->rx_over_errors;
- stats->rx_crc_errors += sstats->rx_crc_errors;
- stats->rx_frame_errors += sstats->rx_frame_errors;
- stats->rx_fifo_errors += sstats->rx_fifo_errors;
- stats->rx_missed_errors += sstats->rx_missed_errors;
+ local_stats.rx_length_errors += sstats->rx_length_errors;
+ local_stats.rx_over_errors += sstats->rx_over_errors;
+ local_stats.rx_crc_errors += sstats->rx_crc_errors;
+ local_stats.rx_frame_errors += sstats->rx_frame_errors;
+ local_stats.rx_fifo_errors += sstats->rx_fifo_errors;
+ local_stats.rx_missed_errors += sstats->rx_missed_errors;
- stats->tx_aborted_errors += sstats->tx_aborted_errors;
- stats->tx_carrier_errors += sstats->tx_carrier_errors;
- stats->tx_fifo_errors += sstats->tx_fifo_errors;
- stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors;
- stats->tx_window_errors += sstats->tx_window_errors;
+ local_stats.tx_aborted_errors += sstats->tx_aborted_errors;
+ local_stats.tx_carrier_errors += sstats->tx_carrier_errors;
+ local_stats.tx_fifo_errors += sstats->tx_fifo_errors;
+ local_stats.tx_heartbeat_errors += sstats->tx_heartbeat_errors;
+ local_stats.tx_window_errors += sstats->tx_window_errors;
}
+ memcpy(stats, &local_stats, sizeof(struct net_device_stats));
+
read_unlock_bh(&bond->lock);
return stats;
struct bonding *bond = bond_dev->priv;
struct dev_mc_list *dmi;
- write_lock_bh(&bond->lock);
-
/*
* Do promisc before checking multicast_mode
*/
bond_set_allmulti(bond, -1);
}
+ read_lock(&bond->lock);
+
bond->flags = bond_dev->flags;
/* looking for addresses to add to slaves' mc list */
bond_mc_list_destroy(bond);
bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC);
- write_unlock_bh(&bond->lock);
+ read_unlock(&bond->lock);
}
/*
struct net_device *bond_dev = bond->dev;
bond_work_cancel_all(bond);
+ netif_tx_lock_bh(bond_dev);
bond_mc_list_destroy(bond);
+ netif_tx_unlock_bh(bond_dev);
/* Release the bonded slaves */
bond_release_all(bond_dev);
bond_deinit(bond_dev);
int bond_parse_parm(const char *buf, struct bond_parm_tbl *tbl)
{
int mode = -1, i, rv;
- char modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, };
+ char *p, modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, };
- rv = sscanf(buf, "%d", &mode);
- if (!rv) {
+ for (p = (char *)buf; *p; p++)
+ if (!(isdigit(*p) || isspace(*p)))
+ break;
+
+ if (*p)
rv = sscanf(buf, "%20s", modestr);
- if (!rv)
- return -1;
- }
+ else
+ rv = sscanf(buf, "%d", &mode);
+
+ if (!rv)
+ return -1;
for (i = 0; tbl[i].modename; i++) {
if (mode == tbl[i].mode)
down_write(&bonding_rwsem);
/* Check to see if the bond already exists. */
- list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list)
- if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) {
- printk(KERN_ERR DRV_NAME
+ if (name) {
+ list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list)
+ if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) {
+ printk(KERN_ERR DRV_NAME
": cannot add bond %s; it already exists\n",
- name);
- res = -EPERM;
- goto out_rtnl;
- }
+ name);
+ res = -EPERM;
+ goto out_rtnl;
+ }
+ }
bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
ether_setup);
#include "bond_3ad.h"
#include "bond_alb.h"
-#define DRV_VERSION "3.2.3"
-#define DRV_RELDATE "December 6, 2007"
+#define DRV_VERSION "3.2.4"
+#define DRV_RELDATE "January 28, 2008"
#define DRV_NAME "bonding"
#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
}
-void __devinit t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
+void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
{
#define K * 1024
* defaults for the assorted SGE parameters, which admins can change until
* they are used to initialize the SGE.
*/
-void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
+void t3_sge_prep(struct adapter *adap, struct sge_params *p)
{
int i;
V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
}
-static void __devinit init_mtus(unsigned short mtus[])
+static void init_mtus(unsigned short mtus[])
{
/*
* See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
/*
* Initial congestion control parameters.
*/
-static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
+static void init_cong_ctrl(unsigned short *a, unsigned short *b)
{
a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
a[9] = 2;
* Determines a card's PCI mode and associated parameters, such as speed
* and width.
*/
-static void __devinit get_pci_mode(struct adapter *adapter,
- struct pci_params *p)
+static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
{
static unsigned short speed_map[] = { 33, 66, 100, 133 };
u32 pci_mode, pcie_cap;
* capabilities and default speed/duplex/flow-control/autonegotiation
* settings.
*/
-static void __devinit init_link_config(struct link_config *lc,
- unsigned int caps)
+static void init_link_config(struct link_config *lc, unsigned int caps)
{
lc->supported = caps;
lc->requested_speed = lc->speed = SPEED_INVALID;
* Calculates the size of an MC7 memory in bytes from the value of its
* configuration register.
*/
-static unsigned int __devinit mc7_calc_size(u32 cfg)
+static unsigned int mc7_calc_size(u32 cfg)
{
unsigned int width = G_WIDTH(cfg);
unsigned int banks = !!(cfg & F_BKS) + 1;
return MBs << 20;
}
-static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
- unsigned int base_addr, const char *name)
+static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
+ unsigned int base_addr, const char *name)
{
u32 cfg;
return 0;
}
-static int __devinit init_parity(struct adapter *adap)
+static int init_parity(struct adapter *adap)
{
int i, err, addr;
* for some adapter tunables, take PHYs out of reset, and initialize the MDIO
* interface.
*/
-int __devinit t3_prep_adapter(struct adapter *adapter,
- const struct adapter_info *ai, int reset)
+int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
+ int reset)
{
int ret;
unsigned int i, j = 0;
* enabled. 82557 pads with 7Eh, while the later controllers pad
* with 00h.
*
- * IV. Recieve
+ * IV. Receive
*
* The Receive Frame Area (RFA) comprises a ring of Receive Frame
* Descriptors (RFD) + data buffer, thus forming the simplified mode
* and Rx indication and re-allocation happen in the same context,
* therefore no locking is required. A software-generated interrupt
* is generated from the watchdog to recover from a failed allocation
- * senario where all Rx resources have been indicated and none re-
+ * scenario where all Rx resources have been indicated and none re-
* placed.
*
* V. Miscellaneous
/* Quadwords to DMA into FIFO before starting frame transmit */
nic->tx_threshold = 0xE0;
- /* no interrupt for every tx completion, delay = 256us if not 557*/
+ /* no interrupt for every tx completion, delay = 256us if not 557 */
nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
&s->complete;
/* Device's stats reporting may take several microseconds to
- * complete, so where always waiting for results of the
+ * complete, so we're always waiting for results of the
* previous command. */
if(*complete == cpu_to_le32(cuc_dump_reset_complete)) {
if(restart_required) {
// ack the rnr?
- writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
+ iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
e100_start_receiver(nic, nic->rx_to_clean);
if(work_done)
(*work_done)++;
struct nic *nic = netdev_priv(netdev);
unregister_netdev(netdev);
e100_free(nic);
- iounmap(nic->csr);
+ pci_iounmap(pdev, nic->csr);
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
/**
* e100_io_error_detected - called when PCI error is detected.
* @pdev: Pointer to PCI device
- * @state: The current pci conneection state
+ * @state: The current pci connection state
*/
static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct nic *nic = netdev_priv(netdev);
- /* Similar to calling e100_down(), but avoids adpater I/O. */
+ /* Similar to calling e100_down(), but avoids adapter I/O. */
netdev->stop(netdev);
- /* Detach; put netif into state similar to hotplug unplug. */
+ /* Detach; put netif into a state similar to hotplug unplug. */
napi_enable(&nic->napi);
netif_device_detach(netdev);
pci_disable_device(pdev);
/**
* Dump the eeprom for users having checksum issues
**/
-void e1000_dump_eeprom(struct e1000_adapter *adapter)
+static void e1000_dump_eeprom(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ethtool_eeprom eeprom;
#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
/* Extended Device Control */
#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
return err;
}
-bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data,
- int reg, int offset, u32 mask, u32 write)
+static bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data,
+ int reg, int offset, u32 mask, u32 write)
{
int i;
u32 read;
return;
wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC;
+ WAKE_BCAST | WAKE_MAGIC |
+ WAKE_PHY | WAKE_ARP;
/* apply any specific unsupported masks here */
if (adapter->flags & FLAG_NO_WAKE_UCAST) {
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & E1000_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
+ if (adapter->wol & E1000_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
+ if (adapter->wol & E1000_WUFC_ARP)
+ wol->wolopts |= WAKE_ARP;
}
static int e1000_set_wol(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ if (wol->wolopts & WAKE_MAGICSECURE)
return -EOPNOTSUPP;
if (!(adapter->flags & FLAG_HAS_WOL))
adapter->wol |= E1000_WUFC_BC;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= E1000_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= E1000_WUFC_LNKC;
+ if (wol->wolopts & WAKE_ARP)
+ adapter->wol |= E1000_WUFC_ARP;
return 0;
}
int irq_flags = IRQF_SHARED;
int err;
- err = pci_enable_msi(adapter->pdev);
- if (err) {
- ndev_warn(netdev,
- "Unable to allocate MSI interrupt Error: %d\n", err);
- } else {
+ if (!pci_enable_msi(adapter->pdev)) {
adapter->flags |= FLAG_MSI_ENABLED;
handler = e1000_intr_msi;
irq_flags = 0;
err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
netdev);
if (err) {
+ ndev_err(netdev,
+ "Unable to allocate %s interrupt (return: %d)\n",
+ adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx",
+ err);
if (adapter->flags & FLAG_MSI_ENABLED)
pci_disable_msi(adapter->pdev);
- ndev_err(netdev,
- "Unable to allocate interrupt Error: %d\n", err);
}
return err;
int ehea_sense_port_attr(struct ehea_port *port);
int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
+extern u64 ehea_driver_flags;
+extern struct work_struct ehea_rereg_mr_task;
+
#endif /* __EHEA_H__ */
return ret;
if (netif_carrier_ok(dev)) {
- switch(port->port_speed) {
+ switch (port->port_speed) {
case EHEA_SPEED_10M: cmd->speed = SPEED_10; break;
case EHEA_SPEED_100M: cmd->speed = SPEED_100; break;
case EHEA_SPEED_1G: cmd->speed = SPEED_1000; break;
goto doit;
}
- switch(cmd->speed) {
+ switch (cmd->speed) {
case SPEED_10:
if (cmd->duplex == DUPLEX_FULL)
sp = H_SPEED_10M_F;
#ifndef __EHEA_HW_H__
#define __EHEA_HW_H__
-#define QPX_SQA_VALUE EHEA_BMASK_IBM(48,63)
-#define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48,63)
-#define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48,63)
-#define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48,63)
+#define QPX_SQA_VALUE EHEA_BMASK_IBM(48, 63)
+#define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48, 63)
+#define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48, 63)
+#define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48, 63)
#define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x)
* (C) Copyright IBM Corp. 2006
*
* Authors:
- * Christoph Raisch <raisch@de.ibm.com>
- * Jan-Bernd Themann <themann@de.ibm.com>
- * Thomas Klein <tklein@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
*
*
* This program is free software; you can redistribute it and/or modify
static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
static int sq_entries = EHEA_DEF_ENTRIES_SQ;
-static int use_mcs = 0;
-static int use_lro = 0;
+static int use_mcs;
+static int use_lro;
static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
static int num_tx_qps = EHEA_NUM_TX_QP;
-static int prop_carrier_state = 0;
+static int prop_carrier_state;
module_param(msg_level, int, 0);
module_param(rq1_entries, int, 0);
MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
"Default = 0");
-static int port_name_cnt = 0;
+static int port_name_cnt;
static LIST_HEAD(adapter_list);
-u64 ehea_driver_flags = 0;
+u64 ehea_driver_flags;
struct work_struct ehea_rereg_mr_task;
struct semaphore dlpar_mem_lock;
.remove = ehea_remove,
};
-void ehea_dump(void *adr, int len, char *msg) {
+void ehea_dump(void *adr, int len, char *msg)
+{
int x;
unsigned char *deb = adr;
for (x = 0; x < len; x += 16) {
printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg,
- deb, x, *((u64*)&deb[0]), *((u64*)&deb[8]));
+ deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
deb += 16;
}
}
last_wqe_index = wqe_index;
rmb();
if (!ehea_check_cqe(cqe, &rq)) {
- if (rq == 1) { /* LL RQ1 */
+ if (rq == 1) {
+ /* LL RQ1 */
skb = get_skb_by_index_ll(skb_arr_rq1,
skb_arr_rq1_len,
wqe_index);
if (!skb)
break;
}
- skb_copy_to_linear_data(skb, ((char*)cqe) + 64,
+ skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
cqe->num_bytes_transfered - 4);
ehea_fill_skb(dev, skb, cqe);
- } else if (rq == 2) { /* RQ2 */
+ } else if (rq == 2) {
+ /* RQ2 */
skb = get_skb_by_index(skb_arr_rq2,
skb_arr_rq2_len, cqe);
if (unlikely(!skb)) {
}
ehea_fill_skb(dev, skb, cqe);
processed_rq2++;
- } else { /* RQ3 */
+ } else {
+ /* RQ3 */
skb = get_skb_by_index(skb_arr_rq3,
skb_arr_rq3_len, cqe);
if (unlikely(!skb)) {
unsigned long flags;
cqe = ehea_poll_cq(send_cq);
- while(cqe && (quota > 0)) {
+ while (cqe && (quota > 0)) {
ehea_inc_cq(send_cq);
cqe_counter++;
static int ehea_poll(struct napi_struct *napi, int budget)
{
- struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, napi);
+ struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
+ napi);
struct net_device *dev = pr->port->netdev;
struct ehea_cqe *cqe;
struct ehea_cqe *cqe_skb = NULL;
u64 hret;
struct hcp_ehea_port_cb0 *cb0;
- cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); /* May be called via */
- if (!cb0) { /* ehea_neq_tasklet() */
+ /* may be called via ehea_neq_tasklet() */
+ cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!cb0) {
ehea_error("no mem for cb0");
ret = -ENOMEM;
goto out;
/* MAC address */
port->mac_addr = cb0->port_mac_addr << 16;
- if (!is_valid_ether_addr((u8*)&port->mac_addr)) {
+ if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
ret = -EADDRNOTAVAIL;
goto out_free;
}
static void ehea_neq_tasklet(unsigned long data)
{
- struct ehea_adapter *adapter = (struct ehea_adapter*)data;
+ struct ehea_adapter *adapter = (struct ehea_adapter *)data;
struct ehea_eqe *eqe;
u64 event_mask;
static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
{
- int arr_size = sizeof(void*) * max_q_entries;
+ int arr_size = sizeof(void *) * max_q_entries;
q_skba->arr = vmalloc(arr_size);
if (!q_skba->arr)
nfrags = skb_shinfo(skb)->nr_frags;
sg1entry = &swqe->u.immdata_desc.sg_entry;
- sg_list = (struct ehea_vsgentry*)&swqe->u.immdata_desc.sg_list;
+ sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
swqe->descriptors = 0;
sg1entry_contains_frag_data = 0;
reg_type, port->mac_addr, 0, hcallid);
if (hret != H_SUCCESS) {
ehea_error("%sregistering bc address failed (tagged)",
- hcallid == H_REG_BCMC ? "" : "de");
+ hcallid == H_REG_BCMC ? "" : "de");
ret = -EIO;
goto out_herr;
}
}
}
-static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr)
+static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
{
struct ehea_mc_list *ehea_mcl_entry;
u64 hret;
goto out;
}
- for (i = 0, k_mcl_entry = dev->mc_list;
- i < dev->mc_count;
- i++, k_mcl_entry = k_mcl_entry->next) {
+ for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++,
+ k_mcl_entry = k_mcl_entry->next)
ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
- }
+
}
out:
return;
if ((skb->protocol == htons(ETH_P_IP)) &&
(ip_hdr(skb)->protocol == IPPROTO_TCP)) {
- tcp = (struct tcphdr*)(skb_network_header(skb) + (ip_hdr(skb)->ihl * 4));
+ tcp = (struct tcphdr *)(skb_network_header(skb) +
+ (ip_hdr(skb)->ihl * 4));
tmp = (tcp->source + (tcp->dest << 16)) % 31;
tmp += ip_hdr(skb)->daddr % 31;
return tmp % num_qps;
- }
- else
+ } else
return 0;
}
u64 hret;
u16 dummy16 = 0;
u64 dummy64 = 0;
- struct hcp_modify_qp_cb0* cb0;
+ struct hcp_modify_qp_cb0 *cb0;
cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb0) {
int ret = 0;
int i;
- for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
ret |= ehea_clean_portres(port, &port->port_res[i]);
ret |= ehea_destroy_eq(port->qp_eq);
goto out_clean_pr;
}
- for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
if (ret) {
ehea_error("activate_qp failed");
}
}
- for(i = 0; i < port->num_def_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
ret = ehea_fill_port_res(&port->port_res[i]);
if (ret) {
ehea_error("out_free_irqs");
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
- struct hcp_modify_qp_cb0* cb0;
+ struct hcp_modify_qp_cb0 *cb0;
int ret = -EIO;
int dret;
int i;
return ret;
}
-void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res * pr)
+void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
{
struct ehea_qp qp = *orig_qp;
struct ehea_qp_init_attr *init_attr = &qp.init_attr;
int ret = 0;
int i;
- struct hcp_modify_qp_cb0* cb0;
+ struct hcp_modify_qp_cb0 *cb0;
u64 hret;
u64 dummy64 = 0;
u16 dummy16 = 0;
of_node_put(port->ofdev.node);
}
-static int ehea_driver_sysfs_add(struct device *dev,
- struct device_driver *driver)
-{
- int ret;
-
- ret = sysfs_create_link(&driver->kobj, &dev->kobj,
- kobject_name(&dev->kobj));
- if (ret == 0) {
- ret = sysfs_create_link(&dev->kobj, &driver->kobj,
- "driver");
- if (ret)
- sysfs_remove_link(&driver->kobj,
- kobject_name(&dev->kobj));
- }
- return ret;
-}
-
-static void ehea_driver_sysfs_remove(struct device *dev,
- struct device_driver *driver)
-{
- struct device_driver *drv = driver;
-
- if (drv) {
- sysfs_remove_link(&drv->kobj, kobject_name(&dev->kobj));
- sysfs_remove_link(&dev->kobj, "driver");
- }
-}
-
static struct device *ehea_register_port(struct ehea_port *port,
struct device_node *dn)
{
goto out_unreg_of_dev;
}
- ret = ehea_driver_sysfs_add(&port->ofdev.dev, &ehea_driver.driver);
- if (ret) {
- ehea_error("failed to register sysfs driver link");
- goto out_rem_dev_file;
- }
-
return &port->ofdev.dev;
-out_rem_dev_file:
- device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
out_unreg_of_dev:
of_device_unregister(&port->ofdev);
out:
static void ehea_unregister_port(struct ehea_port *port)
{
- ehea_driver_sysfs_remove(&port->ofdev.dev, &ehea_driver.driver);
device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
of_device_unregister(&port->ofdev);
}
of_node_put(eth_dn);
if (port) {
- for (i=0; i < EHEA_MAX_PORTS; i++)
+ for (i = 0; i < EHEA_MAX_PORTS; i++)
if (!adapter->port[i]) {
adapter->port[i] = port;
break;
ehea_shutdown_single_port(port);
- for (i=0; i < EHEA_MAX_PORTS; i++)
+ for (i = 0; i < EHEA_MAX_PORTS; i++)
if (adapter->port[i] == port) {
adapter->port[i] = NULL;
break;
}
static struct notifier_block ehea_reboot_nb = {
- .notifier_call = ehea_reboot_notifier,
+ .notifier_call = ehea_reboot_notifier,
};
static int check_module_parm(void)
* (C) Copyright IBM Corp. 2006
*
* Authors:
- * Christoph Raisch <raisch@de.ibm.com>
- * Jan-Bernd Themann <themann@de.ibm.com>
- * Thomas Klein <tklein@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
*
*
* This program is free software; you can redistribute it and/or modify
}
/* Defines for H_CALL H_ALLOC_RESOURCE */
-#define H_ALL_RES_TYPE_QP 1
-#define H_ALL_RES_TYPE_CQ 2
-#define H_ALL_RES_TYPE_EQ 3
-#define H_ALL_RES_TYPE_MR 5
-#define H_ALL_RES_TYPE_MW 6
+#define H_ALL_RES_TYPE_QP 1
+#define H_ALL_RES_TYPE_CQ 2
+#define H_ALL_RES_TYPE_EQ 3
+#define H_ALL_RES_TYPE_MR 5
+#define H_ALL_RES_TYPE_MW 6
static long ehea_plpar_hcall_norets(unsigned long opcode,
unsigned long arg1,
const u64 qp_handle, const u64 sel_mask, void *cb_addr)
{
return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
- adapter_handle, /* R4 */
- qp_category, /* R5 */
- qp_handle, /* R6 */
- sel_mask, /* R7 */
+ adapter_handle, /* R4 */
+ qp_category, /* R5 */
+ qp_handle, /* R6 */
+ sel_mask, /* R7 */
virt_to_abs(cb_addr), /* R8 */
0, 0);
}
/* input param R5 */
-#define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11)
-#define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12)
-#define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15)
-#define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16)
-#define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17)
-#define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19)
-#define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21)
-#define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23)
-#define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55)
-#define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63)
+#define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11)
+#define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12)
+#define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15)
+#define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16)
+#define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17)
+#define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19)
+#define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21)
+#define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23)
+#define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55)
+#define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63)
/* input param R9 */
-#define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32,63)
+#define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32, 63)
/* input param R10 */
-#define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7)
-#define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15)
-#define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23)
-#define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31)
+#define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7)
+#define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15)
+#define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23)
+#define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31)
/* Max Send Scatter Gather Elements */
-#define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39)
-#define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47)
+#define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39)
+#define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47)
/* Max Receive SG Elements RQ1 */
-#define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55)
-#define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63)
+#define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55)
+#define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63)
/* input param R11 */
-#define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7)
+#define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7)
/* max swqe immediate data length */
-#define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63)
+#define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63)
/* input param R12 */
-#define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15)
+#define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15)
/* Threshold RQ2 */
-#define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31)
+#define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31)
/* Threshold RQ3 */
/* output param R6 */
-#define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15)
-#define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31)
-#define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47)
-#define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63)
+#define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15)
+#define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31)
+#define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47)
+#define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63)
/* output param, R7 */
-#define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7)
-#define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15)
-#define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23)
-#define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31)
+#define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7)
+#define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15)
+#define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23)
+#define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31)
#define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39)
/* output param R8,R9 */
-#define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63)
-#define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63)
/* output param R11,R12 */
-#define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63)
-#define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63)
u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
struct ehea_qp_init_attr *init_attr, const u32 pd,
}
/* Defines for H_CALL H_ALLOC_RESOURCE */
-#define H_ALL_RES_TYPE_QP 1
-#define H_ALL_RES_TYPE_CQ 2
-#define H_ALL_RES_TYPE_EQ 3
-#define H_ALL_RES_TYPE_MR 5
-#define H_ALL_RES_TYPE_MW 6
+#define H_ALL_RES_TYPE_QP 1
+#define H_ALL_RES_TYPE_CQ 2
+#define H_ALL_RES_TYPE_EQ 3
+#define H_ALL_RES_TYPE_MR 5
+#define H_ALL_RES_TYPE_MW 6
/* input param R5 */
-#define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0)
+#define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0)
#define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7)
#define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16)
-#define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63)
+#define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63)
/* input param R6 */
-#define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63)
/* output param R6 */
-#define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63)
/* output param R7 */
-#define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63)
/* output param R8 */
-#define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63)
/* output param R9 */
#define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31)
hret = ehea_plpar_hcall9(H_REGISTER_SMR,
outs,
- adapter_handle , /* R4 */
- orig_mr_handle, /* R5 */
- vaddr_in, /* R6 */
- (((u64)access_ctrl) << 32ULL), /* R7 */
- pd, /* R8 */
- 0, 0, 0, 0); /* R9-R12 */
+ adapter_handle , /* R4 */
+ orig_mr_handle, /* R5 */
+ vaddr_in, /* R6 */
+ (((u64)access_ctrl) << 32ULL), /* R7 */
+ pd, /* R8 */
+ 0, 0, 0, 0); /* R9-R12 */
mr->handle = outs[0];
mr->lkey = (u32)outs[2];
u64 outs[PLPAR_HCALL9_BUFSIZE];
return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
- outs,
+ outs,
adapter_handle, /* R4 */
H_DISABLE_GET_EHEA_WQE_P, /* R5 */
qp_handle, /* R6 */
- 0, 0, 0, 0, 0, 0); /* R7-R12 */
+ 0, 0, 0, 0, 0, 0); /* R7-R12 */
}
u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
{
return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle, /* R4 */
- res_handle, /* R5 */
+ res_handle, /* R5 */
force_bit,
- 0, 0, 0, 0); /* R7-R10 */
+ 0, 0, 0, 0); /* R7-R10 */
}
u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
const u32 pd, u64 *mr_handle, u32 *lkey)
{
u64 hret;
- u64 outs[PLPAR_HCALL9_BUFSIZE];
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
outs,
adapter_handle, /* R4 */
5, /* R5 */
- vaddr, /* R6 */
+ vaddr, /* R6 */
length, /* R7 */
(((u64) access_ctrl) << 32ULL), /* R8 */
pd, /* R9 */
void *rblock)
{
return ehea_plpar_hcall_norets(H_ERROR_DATA,
- adapter_handle, /* R4 */
- ressource_handle, /* R5 */
- virt_to_abs(rblock), /* R6 */
- 0, 0, 0, 0); /* R7-R12 */
+ adapter_handle, /* R4 */
+ ressource_handle, /* R5 */
+ virt_to_abs(rblock), /* R6 */
+ 0, 0, 0, 0); /* R7-R12 */
}
static inline void hcp_epas_dtor(struct h_epas *epas)
{
if (epas->kernel.addr)
- iounmap((void __iomem*)((u64)epas->kernel.addr & PAGE_MASK));
+ iounmap((void __iomem *)((u64)epas->kernel.addr & PAGE_MASK));
epas->user.addr = 0;
epas->kernel.addr = 0;
const u64 qp_handle,
const u64 sel_mask,
void *cb_addr,
- u64 * inv_attr_id,
- u64 * proc_mask, u16 * out_swr, u16 * out_rwr);
+ u64 *inv_attr_id,
+ u64 *proc_mask, u16 *out_swr, u16 *out_rwr);
u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
- struct ehea_eq_attr *eq_attr, u64 * eq_handle);
+ struct ehea_eq_attr *eq_attr, u64 *eq_handle);
u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
struct ehea_cq_attr *cq_attr,
- u64 * cq_handle, struct h_epas *epas);
+ u64 *cq_handle, struct h_epas *epas);
u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
struct ehea_qp_init_attr *init_attr,
const u32 pd,
- u64 * qp_handle, struct h_epas *h_epas);
+ u64 *qp_handle, struct h_epas *h_epas);
-#define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48,55)
-#define H_REG_RPAGE_QT EHEA_BMASK_IBM(62,63)
+#define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48, 55)
+#define H_REG_RPAGE_QT EHEA_BMASK_IBM(62, 63)
u64 ehea_h_register_rpage(const u64 adapter_handle,
const u8 pagesize,
u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
const u64 length, const u32 access_ctrl,
- const u32 pd, u64 * mr_handle, u32 * lkey);
+ const u32 pd, u64 *mr_handle, u32 *lkey);
u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
const u8 pagesize, const u8 queue_type,
u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr);
/* output param R5 */
-#define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40,47)
-#define H_MEHEAPORT_PN EHEA_BMASK_IBM(48,63)
+#define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40, 47)
+#define H_MEHEAPORT_PN EHEA_BMASK_IBM(48, 63)
u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
const u8 cb_cat, const u64 select_mask,
struct ehea_busmap ehea_bmap = { 0, 0, NULL };
-extern u64 ehea_driver_flags;
-extern struct work_struct ehea_rereg_mr_task;
static void *hw_qpageit_get_inc(struct hw_queue *queue)
}
queue->queue_length = nr_of_pages * pagesize;
- queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL);
+ queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
if (!queue->queue_pages) {
ehea_error("no mem for queue_pages");
return -ENOMEM;
*/
i = 0;
while (i < nr_of_pages) {
- u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
+ u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
if (!kpage)
goto out_nomem;
for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
- (queue->queue_pages)[i] = (struct ehea_page*)kpage;
+ (queue->queue_pages)[i] = (struct ehea_page *)kpage;
kpage += pagesize;
i++;
}
return 0;
hcp_epas_dtor(&cq->epas);
-
- if ((hret = ehea_destroy_cq_res(cq, NORMAL_FREE)) == H_R_STATE) {
+ hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
+ if (hret == H_R_STATE) {
ehea_error_data(cq->adapter, cq->fw_handle);
hret = ehea_destroy_cq_res(cq, FORCE_FREE);
}
if (i == (eq->attr.nr_pages - 1)) {
/* last page */
vpage = hw_qpageit_get_inc(&eq->hw_queue);
- if ((hret != H_SUCCESS) || (vpage)) {
+ if ((hret != H_SUCCESS) || (vpage))
goto out_kill_hwq;
- }
+
} else {
- if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+ if ((hret != H_PAGE_REGISTERED) || (!vpage))
goto out_kill_hwq;
- }
+
}
}
unsigned long flags;
spin_lock_irqsave(&eq->spinlock, flags);
- eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
+ eqe = (struct ehea_eqe *)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
spin_unlock_irqrestore(&eq->spinlock, flags);
return eqe;
hcp_epas_dtor(&eq->epas);
- if ((hret = ehea_destroy_eq_res(eq, NORMAL_FREE)) == H_R_STATE) {
+ hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
+ if (hret == H_R_STATE) {
ehea_error_data(eq->adapter, eq->fw_handle);
hret = ehea_destroy_eq_res(eq, FORCE_FREE);
}
hcp_epas_dtor(&qp->epas);
- if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) {
+ hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
+ if (hret == H_R_STATE) {
ehea_error_data(qp->adapter, qp->fw_handle);
hret = ehea_destroy_qp_res(qp, FORCE_FREE);
}
return 0;
}
-int ehea_create_busmap( void )
+int ehea_create_busmap(void)
{
u64 vaddr = EHEA_BUSMAP_START;
unsigned long high_section_index = 0;
return 0;
}
-void ehea_destroy_busmap( void )
+void ehea_destroy_busmap(void)
{
vfree(ehea_bmap.vaddr);
}
#define EHEA_SECTSIZE (1UL << 24)
#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
-#if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE
-#error eHEA module can't work if kernel sectionsize < ehea sectionsize
+#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
+#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
#endif
/* Some abbreviations used here:
u64 entry;
};
-#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52,63)
-#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0,7)
+#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63)
+#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7)
static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
{
static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
{
void *retvalue = hw_qeit_get(queue);
- u32 qe = *(u8*)retvalue;
+ u32 qe = *(u8 *)retvalue;
if ((qe >> 7) == (queue->toggle_state & 1))
hw_qeit_eq_get_inc(queue);
else
int ehea_destroy_cq(struct ehea_cq *cq);
-struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd,
+struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
struct ehea_qp_init_attr *init_attr);
int ehea_destroy_qp(struct ehea_qp *qp);
void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
-int ehea_create_busmap( void );
-void ehea_destroy_busmap( void );
+int ehea_create_busmap(void);
+void ehea_destroy_busmap(void);
u64 ehea_map_vaddr(void *caddr);
#endif /* __EHEA_QMR_H__ */
* Copyright (C) 2004 Andrew de Quincey (wol support)
* Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
* IRQ rate fixes, bigendian fixes, cleanups, verification)
- * Copyright (c) 2004,5,6 NVIDIA Corporation
+ * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#define NVREG_MISC1_HD 0x02
#define NVREG_MISC1_FORCE 0x3b0f3c
- NvRegMacReset = 0x3c,
+ NvRegMacReset = 0x34,
#define NVREG_MAC_RESET_ASSERT 0x0F3
NvRegTransmitterControl = 0x084,
#define NVREG_XMITCTL_START 0x01
#define NVREG_MCASTADDRA_FORCE 0x01
NvRegMulticastAddrB = 0xB4,
NvRegMulticastMaskA = 0xB8,
+#define NVREG_MCASTMASKA_NONE 0xffffffff
NvRegMulticastMaskB = 0xBC,
+#define NVREG_MCASTMASKB_NONE 0xffff
NvRegPhyInterface = 0xC0,
#define PHY_RGMII 0x10000000
NvRegTxRingPhysAddrHigh = 0x148,
NvRegRxRingPhysAddrHigh = 0x14C,
NvRegTxPauseFrame = 0x170,
-#define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
-#define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
+#define NVREG_TX_PAUSEFRAME_DISABLE 0x01ff0080
+#define NVREG_TX_PAUSEFRAME_ENABLE 0x01800010
NvRegMIIStatus = 0x180,
#define NVREG_MIISTAT_ERROR 0x0001
#define NVREG_MIISTAT_LINKCHANGE 0x0008
#define NV_RX_AVAIL (1<<31)
#define NV_RX2_CHECKSUMMASK (0x1C000000)
-#define NV_RX2_CHECKSUMOK1 (0x10000000)
-#define NV_RX2_CHECKSUMOK2 (0x14000000)
-#define NV_RX2_CHECKSUMOK3 (0x18000000)
+#define NV_RX2_CHECKSUM_IP (0x10000000)
+#define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
+#define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
#define NV_RX2_DESCRIPTORVALID (1<<29)
#define NV_RX2_SUBSTRACT1 (1<<25)
#define NV_RX2_ERROR1 (1<<18)
goto next_pkt;
}
}
- if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
+ if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
+ ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
- (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
- }
} else {
dev_kfree_skb(skb);
goto next_pkt;
}
}
- if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
+ if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
+ ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
- (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
- }
/* got a valid packet - forward it to the network core */
skb_put(skb, len);
addr[1] = alwaysOn[1];
mask[0] = alwaysOn[0] | alwaysOff[0];
mask[1] = alwaysOn[1] | alwaysOff[1];
+ } else {
+ mask[0] = NVREG_MCASTMASKA_NONE;
+ mask[1] = NVREG_MCASTMASKB_NONE;
}
}
addr[0] |= NVREG_MCASTADDRA_FORCE;
nv_mac_reset(dev);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
- writel(0, base + NvRegMulticastMaskA);
- writel(0, base + NvRegMulticastMaskB);
+ writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
+ writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
writel(0, base + NvRegPacketFilterFlags);
writel(0, base + NvRegTransmitterControl);
spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
- writel(0, base + NvRegMulticastMaskA);
- writel(0, base + NvRegMulticastMaskB);
+ writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
+ writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
/* One manual link speed update: Interrupts are enabled, future link
* speed changes cause interrupts and are handled by nv_link_irq().
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{0,},
};
0x0000
};
-static char *ibmlana_adapter_names[] __initdata = {
+static char *ibmlana_adapter_names[] __devinitdata = {
"IBM LAN Adapter/A",
NULL
};
-static int ibmlana_init_one(struct device *kdev)
+static int __devinit ibmlana_init_one(struct device *kdev)
{
struct mca_device *mdev = to_mca_device(kdev);
struct net_device *dev;
if (adapter->msix_entries) {
err = igb_request_msix(adapter);
if (!err) {
- struct e1000_hw *hw = &adapter->hw;
/* enable IAM, auto-mask,
* DO NOT USE EIAME or IAME in legacy mode */
wr32(E1000_IAM, IMS_ENABLE_MASK);
return phy_mii_ioctl(phydev, if_mii(rq), cmd);
}
-static int __devinit macb_probe(struct platform_device *pdev)
+static int __init macb_probe(struct platform_device *pdev)
{
struct eth_platform_data *pdata;
struct resource *regs;
return err;
}
-static int __devexit macb_remove(struct platform_device *pdev)
+static int __exit macb_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct macb *bp;
}
static struct platform_driver macb_driver = {
- .probe = macb_probe,
- .remove = __devexit_p(macb_remove),
+ .remove = __exit_p(macb_remove),
.driver = {
.name = "macb",
},
static int __init macb_init(void)
{
- return platform_driver_register(&macb_driver);
+ return platform_driver_probe(&macb_driver, macb_probe);
}
static void __exit macb_exit(void)
* for more details.
*/
-#define DEBUG
-
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <asm/mips-boards/simint.h>
-#include "mipsnet.h" /* actual device IO mapping */
+#define MIPSNET_VERSION "2007-11-17"
+
+/*
+ * Net status/control block as seen by sw in the core.
+ */
+struct mipsnet_regs {
+ /*
+ * Device info for probing, reads as MIPSNET%d where %d is some
+ * form of version.
+ */
+ u64 devId; /*0x00 */
-#define MIPSNET_VERSION "2005-06-20"
+ /*
+ * read only busy flag.
+ * Set and cleared by the Net Device to indicate that an rx or a tx
+ * is in progress.
+ */
+ u32 busy; /*0x08 */
-#define mipsnet_reg_address(dev, field) (dev->base_addr + field_offset(field))
+ /*
+ * Set by the Net Device.
+ * The device will set it once data has been received.
+ * The value is the number of bytes that should be read from
+ * rxDataBuffer. The value will decrease till 0 until all the data
+ * from rxDataBuffer has been read.
+ */
+ u32 rxDataCount; /*0x0c */
+#define MIPSNET_MAX_RXTX_DATACOUNT (1 << 16)
+
+ /*
+ * Settable from the MIPS core, cleared by the Net Device.
+ * The core should set the number of bytes it wants to send,
+ * then it should write those bytes of data to txDataBuffer.
+ * The device will clear txDataCount has been processed (not
+ * necessarily sent).
+ */
+ u32 txDataCount; /*0x10 */
+
+ /*
+ * Interrupt control
+ *
+ * Used to clear the interrupted generated by this dev.
+ * Write a 1 to clear the interrupt. (except bit31).
+ *
+ * Bit0 is set if it was a tx-done interrupt.
+ * Bit1 is set when new rx-data is available.
+ * Until this bit is cleared there will be no other RXs.
+ *
+ * Bit31 is used for testing, it clears after a read.
+ * Writing 1 to this bit will cause an interrupt to be generated.
+ * To clear the test interrupt, write 0 to this register.
+ */
+ u32 interruptControl; /*0x14 */
+#define MIPSNET_INTCTL_TXDONE (1u << 0)
+#define MIPSNET_INTCTL_RXDONE (1u << 1)
+#define MIPSNET_INTCTL_TESTBIT (1u << 31)
+
+ /*
+ * Readonly core-specific interrupt info for the device to signal
+ * the core. The meaning of the contents of this field might change.
+ */
+ /* XXX: the whole memIntf interrupt scheme is messy: the device
+ * should have no control what so ever of what VPE/register set is
+ * being used.
+ * The MemIntf should only expose interrupt lines, and something in
+ * the config should be responsible for the line<->core/vpe bindings.
+ */
+ u32 interruptInfo; /*0x18 */
+
+ /*
+ * This is where the received data is read out.
+ * There is more data to read until rxDataReady is 0.
+ * Only 1 byte at this regs offset is used.
+ */
+ u32 rxDataBuffer; /*0x1c */
+
+ /*
+ * This is where the data to transmit is written.
+ * Data should be written for the amount specified in the
+ * txDataCount register.
+ * Only 1 byte at this regs offset is used.
+ */
+ u32 txDataBuffer; /*0x20 */
+};
+
+#define regaddr(dev, field) \
+ (dev->base_addr + offsetof(struct mipsnet_regs, field))
static char mipsnet_string[] = "mipsnet";
static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata,
int len)
{
- uint32_t available_len = inl(mipsnet_reg_address(dev, rxDataCount));
-
- if (available_len < len)
- return -EFAULT;
-
for (; len > 0; len--, kdata++)
- *kdata = inb(mipsnet_reg_address(dev, rxDataBuffer));
+ *kdata = inb(regaddr(dev, rxDataBuffer));
- return inl(mipsnet_reg_address(dev, rxDataCount));
+ return inl(regaddr(dev, rxDataCount));
}
-static inline ssize_t mipsnet_put_todevice(struct net_device *dev,
+static inline void mipsnet_put_todevice(struct net_device *dev,
struct sk_buff *skb)
{
int count_to_go = skb->len;
char *buf_ptr = skb->data;
- outl(skb->len, mipsnet_reg_address(dev, txDataCount));
+ outl(skb->len, regaddr(dev, txDataCount));
for (; count_to_go; buf_ptr++, count_to_go--)
- outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer));
+ outb(*buf_ptr, regaddr(dev, txDataBuffer));
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- return skb->len;
+ dev_kfree_skb(skb);
}
static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count)
+static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t len)
{
struct sk_buff *skb;
- size_t len = count;
- skb = alloc_skb(len + 2, GFP_KERNEL);
+ if (!len)
+ return len;
+
+ skb = dev_alloc_skb(len + NET_IP_ALIGN);
if (!skb) {
dev->stats.rx_dropped++;
return -ENOMEM;
}
- skb_reserve(skb, 2);
+ skb_reserve(skb, NET_IP_ALIGN);
if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len))
return -EFAULT;
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
- return count;
+ return len;
}
static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
-
- irqreturn_t retval = IRQ_NONE;
- uint64_t interruptFlags;
-
- if (irq == dev->irq) {
- retval = IRQ_HANDLED;
-
- interruptFlags =
- inl(mipsnet_reg_address(dev, interruptControl));
-
- if (interruptFlags & MIPSNET_INTCTL_TXDONE) {
- outl(MIPSNET_INTCTL_TXDONE,
- mipsnet_reg_address(dev, interruptControl));
- /* only one packet at a time, we are done. */
- netif_wake_queue(dev);
- } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) {
- mipsnet_get_fromdev(dev,
- inl(mipsnet_reg_address(dev, rxDataCount)));
- outl(MIPSNET_INTCTL_RXDONE,
- mipsnet_reg_address(dev, interruptControl));
-
- } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) {
- /*
- * TESTBIT is cleared on read.
- * And takes effect after a write with 0
- */
- outl(0, mipsnet_reg_address(dev, interruptControl));
- } else {
- /* Maybe shared IRQ, just ignore, no clearing. */
- retval = IRQ_NONE;
- }
-
- } else {
- printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
- dev->name, __FUNCTION__, irq);
- retval = IRQ_NONE;
+ u32 int_flags;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (irq != dev->irq)
+ goto out_badirq;
+
+ /* TESTBIT is cleared on read. */
+ int_flags = inl(regaddr(dev, interruptControl));
+ if (int_flags & MIPSNET_INTCTL_TESTBIT) {
+ /* TESTBIT takes effect after a write with 0. */
+ outl(0, regaddr(dev, interruptControl));
+ ret = IRQ_HANDLED;
+ } else if (int_flags & MIPSNET_INTCTL_TXDONE) {
+ /* Only one packet at a time, we are done. */
+ dev->stats.tx_packets++;
+ netif_wake_queue(dev);
+ outl(MIPSNET_INTCTL_TXDONE,
+ regaddr(dev, interruptControl));
+ ret = IRQ_HANDLED;
+ } else if (int_flags & MIPSNET_INTCTL_RXDONE) {
+ mipsnet_get_fromdev(dev, inl(regaddr(dev, rxDataCount)));
+ outl(MIPSNET_INTCTL_RXDONE, regaddr(dev, interruptControl));
+ ret = IRQ_HANDLED;
}
- return retval;
+ return ret;
+
+out_badirq:
+ printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
+ dev->name, __FUNCTION__, irq);
+ return ret;
}
static int mipsnet_open(struct net_device *dev)
err = request_irq(dev->irq, &mipsnet_interrupt,
IRQF_SHARED, dev->name, (void *) dev);
-
if (err) {
- release_region(dev->base_addr, MIPSNET_IO_EXTENT);
+ release_region(dev->base_addr, sizeof(struct mipsnet_regs));
return err;
}
netif_start_queue(dev);
/* test interrupt handler */
- outl(MIPSNET_INTCTL_TESTBIT,
- mipsnet_reg_address(dev, interruptControl));
-
+ outl(MIPSNET_INTCTL_TESTBIT, regaddr(dev, interruptControl));
return 0;
}
static int mipsnet_close(struct net_device *dev)
{
netif_stop_queue(dev);
-
+ free_irq(dev->irq, dev);
return 0;
}
*/
netdev->base_addr = 0x4200;
netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 +
- inl(mipsnet_reg_address(netdev, interruptInfo));
+ inl(regaddr(netdev, interruptInfo));
/* Get the io region now, get irq on open() */
- if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) {
+ if (!request_region(netdev->base_addr, sizeof(struct mipsnet_regs),
+ "mipsnet")) {
err = -EBUSY;
goto out_free_netdev;
}
return 0;
out_free_region:
- release_region(netdev->base_addr, MIPSNET_IO_EXTENT);
+ release_region(netdev->base_addr, sizeof(struct mipsnet_regs));
out_free_netdev:
free_netdev(netdev);
struct net_device *dev = dev_get_drvdata(device);
unregister_netdev(dev);
- release_region(dev->base_addr, MIPSNET_IO_EXTENT);
+ release_region(dev->base_addr, sizeof(struct mipsnet_regs));
free_netdev(dev);
dev_set_drvdata(device, NULL);
+++ /dev/null
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#ifndef __MIPSNET_H
-#define __MIPSNET_H
-
-/*
- * Id of this Net device, as seen by the core.
- */
-#define MIPS_NET_DEV_ID ((uint64_t) \
- ((uint64_t) 'M' << 0)| \
- ((uint64_t) 'I' << 8)| \
- ((uint64_t) 'P' << 16)| \
- ((uint64_t) 'S' << 24)| \
- ((uint64_t) 'N' << 32)| \
- ((uint64_t) 'E' << 40)| \
- ((uint64_t) 'T' << 48)| \
- ((uint64_t) '0' << 56))
-
-/*
- * Net status/control block as seen by sw in the core.
- * (Why not use bit fields? can't be bothered with cross-platform struct
- * packing.)
- */
-struct net_control_block {
- /*
- * dev info for probing
- * reads as MIPSNET%d where %d is some form of version
- */
- uint64_t devId; /* 0x00 */
-
- /*
- * read only busy flag.
- * Set and cleared by the Net Device to indicate that an rx or a tx
- * is in progress.
- */
- uint32_t busy; /* 0x08 */
-
- /*
- * Set by the Net Device.
- * The device will set it once data has been received.
- * The value is the number of bytes that should be read from
- * rxDataBuffer. The value will decrease till 0 until all the data
- * from rxDataBuffer has been read.
- */
- uint32_t rxDataCount; /* 0x0c */
-#define MIPSNET_MAX_RXTX_DATACOUNT (1<<16)
-
- /*
- * Settable from the MIPS core, cleared by the Net Device. The core
- * should set the number of bytes it wants to send, then it should
- * write those bytes of data to txDataBuffer. The device will clear
- * txDataCount has been processed (not necessarily sent).
- */
- uint32_t txDataCount; /* 0x10 */
-
- /*
- * Interrupt control
- *
- * Used to clear the interrupted generated by this dev.
- * Write a 1 to clear the interrupt. (except bit31).
- *
- * Bit0 is set if it was a tx-done interrupt.
- * Bit1 is set when new rx-data is available.
- * Until this bit is cleared there will be no other RXs.
- *
- * Bit31 is used for testing, it clears after a read.
- * Writing 1 to this bit will cause an interrupt to be generated.
- * To clear the test interrupt, write 0 to this register.
- */
- uint32_t interruptControl; /*0x14 */
-#define MIPSNET_INTCTL_TXDONE ((uint32_t)(1 << 0))
-#define MIPSNET_INTCTL_RXDONE ((uint32_t)(1 << 1))
-#define MIPSNET_INTCTL_TESTBIT ((uint32_t)(1 << 31))
-#define MIPSNET_INTCTL_ALLSOURCES (MIPSNET_INTCTL_TXDONE | \
- MIPSNET_INTCTL_RXDONE | \
- MIPSNET_INTCTL_TESTBIT)
-
- /*
- * Readonly core-specific interrupt info for the device to signal the
- * core. The meaning of the contents of this field might change.
- *
- * TODO: the whole memIntf interrupt scheme is messy: the device should
- * have no control what so ever of what VPE/register set is being
- * used. The MemIntf should only expose interrupt lines, and
- * something in the config should be responsible for the
- * line<->core/vpe bindings.
- */
- uint32_t interruptInfo; /* 0x18 */
-
- /*
- * This is where the received data is read out.
- * There is more data to read until rxDataReady is 0.
- * Only 1 byte at this regs offset is used.
- */
- uint32_t rxDataBuffer; /* 0x1c */
-
- /*
- * This is where the data to transmit is written. Data should be
- * written for the amount specified in the txDataCount register. Only
- * 1 byte at this regs offset is used.
- */
- uint32_t txDataBuffer; /* 0x20 */
-};
-
-#define MIPSNET_IO_EXTENT 0x40 /* being generous */
-
-#define field_offset(field) (offsetof(struct net_control_block, field))
-
-#endif /* __MIPSNET_H */
IIId. Synchronization
Most operations are synchronized on the np->lock irq spinlock, except the
-performance critical codepaths:
-
-The rx process only runs in the interrupt handler. Access from outside
-the interrupt handler is only permitted after disable_irq().
-
-The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
-is set, then access is permitted under spin_lock_irq(&np->lock).
-
-Thus configuration functions that want to access everything must call
- disable_irq(dev->irq);
- netif_tx_lock_bh(dev);
- spin_lock_irq(&np->lock);
-
-IV. Notes
-
-NatSemi PCI network controllers are very uncommon.
+recieve and transmit paths which are synchronised using a combination of
+hardware descriptor ownership, disabling interrupts and NAPI poll scheduling.
IVb. References
#define LRO_MAX_AGGR 64
+#define PE_MIN_MTU 64
+#define PE_MAX_MTU 1500
+#define PE_DEF_MTU ETH_DATA_LEN
+
#define DEFAULT_MSG_ENABLE \
(NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
& ((ring)->size - 1))
#define RING_AVAIL(ring) ((ring->size) - RING_USED(ring))
-#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
return -1;
}
+static void pasemi_mac_intf_disable(struct pasemi_mac *mac)
+{
+ unsigned int flags;
+
+ flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
+ flags &= ~PAS_MAC_CFG_PCFG_PE;
+ write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
+}
+
+static void pasemi_mac_intf_enable(struct pasemi_mac *mac)
+{
+ unsigned int flags;
+
+ flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
+ flags |= PAS_MAC_CFG_PCFG_PE;
+ write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
+}
+
static int pasemi_get_mac_addr(struct pasemi_mac *mac)
{
struct pci_dev *pdev = mac->pdev;
return 0;
}
+static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct pasemi_mac *mac = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ unsigned int adr0, adr1;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ adr0 = dev->dev_addr[2] << 24 |
+ dev->dev_addr[3] << 16 |
+ dev->dev_addr[4] << 8 |
+ dev->dev_addr[5];
+ adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1);
+ adr1 &= ~0xffff;
+ adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1];
+
+ pasemi_mac_intf_disable(mac);
+ write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0);
+ write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1);
+ pasemi_mac_intf_enable(mac);
+
+ return 0;
+}
+
static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
void **tcph, u64 *hdr_flags, void *data)
{
}
-static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
+static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac)
{
struct pasemi_mac_rxring *rx = rx_ring(mac);
unsigned int i;
}
for (i = 0; i < RX_RING_SIZE; i++)
- RX_DESC(rx, i) = 0;
+ RX_BUFF(rx, i) = 0;
+}
+
+static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
+{
+ pasemi_mac_free_rx_buffers(mac);
dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
rx_ring(mac)->buffers, rx_ring(mac)->buf_dma);
/* Entry in use? */
WARN_ON(*buff);
- skb = dev_alloc_skb(BUF_SIZE);
+ skb = dev_alloc_skb(mac->bufsz);
skb_reserve(skb, LOCAL_SKB_ALIGN);
if (unlikely(!skb))
break;
dma = pci_map_single(mac->dma_pdev, skb->data,
- BUF_SIZE - LOCAL_SKB_ALIGN,
+ mac->bufsz - LOCAL_SKB_ALIGN,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(dma))) {
info->skb = skb;
info->dma = dma;
- *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
+ *buff = XCT_RXB_LEN(mac->bufsz) | XCT_RXB_ADDR(dma);
fill++;
}
len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
- pci_unmap_single(pdev, dma, BUF_SIZE-LOCAL_SKB_ALIGN,
+ pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN,
PCI_DMA_FROMDEVICE);
if (macrx & XCT_MACRX_CRC) {
return IRQ_HANDLED;
}
-static void pasemi_mac_intf_disable(struct pasemi_mac *mac)
-{
- unsigned int flags;
-
- flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
- flags &= ~PAS_MAC_CFG_PCFG_PE;
- write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
-}
-
-static void pasemi_mac_intf_enable(struct pasemi_mac *mac)
-{
- unsigned int flags;
-
- flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
- flags |= PAS_MAC_CFG_PCFG_PE;
- write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
-}
-
static void pasemi_adjust_link(struct net_device *dev)
{
struct pasemi_mac *mac = netdev_priv(dev);
#define MAX_RETRIES 5000
+static void pasemi_mac_pause_txchan(struct pasemi_mac *mac)
+{
+ unsigned int sta, retries;
+ int txch = tx_ring(mac)->chan.chno;
+
+ write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch),
+ PAS_DMA_TXCHAN_TCMDSTA_ST);
+
+ for (retries = 0; retries < MAX_RETRIES; retries++) {
+ sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
+ if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
+ break;
+ cond_resched();
+ }
+
+ if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
+ dev_err(&mac->dma_pdev->dev,
+ "Failed to stop tx channel, tcmdsta %08x\n", sta);
+
+ write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
+}
+
+static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac)
+{
+ unsigned int sta, retries;
+ int rxch = rx_ring(mac)->chan.chno;
+
+ write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
+ PAS_DMA_RXCHAN_CCMDSTA_ST);
+ for (retries = 0; retries < MAX_RETRIES; retries++) {
+ sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
+ if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
+ break;
+ cond_resched();
+ }
+
+ if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
+ dev_err(&mac->dma_pdev->dev,
+ "Failed to stop rx channel, ccmdsta 08%x\n", sta);
+ write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
+}
+
+static void pasemi_mac_pause_rxint(struct pasemi_mac *mac)
+{
+ unsigned int sta, retries;
+
+ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+ PAS_DMA_RXINT_RCMDSTA_ST);
+ for (retries = 0; retries < MAX_RETRIES; retries++) {
+ sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
+ if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
+ break;
+ cond_resched();
+ }
+
+ if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
+ dev_err(&mac->dma_pdev->dev,
+ "Failed to stop rx interface, rcmdsta %08x\n", sta);
+ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
+}
+
static int pasemi_mac_close(struct net_device *dev)
{
struct pasemi_mac *mac = netdev_priv(dev);
unsigned int sta;
- int retries;
int rxch, txch;
rxch = rx_ring(mac)->chan.chno;
pasemi_mac_clean_tx(tx_ring(mac));
pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
- /* Disable interface */
- write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch),
- PAS_DMA_TXCHAN_TCMDSTA_ST);
- write_dma_reg( PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
- PAS_DMA_RXINT_RCMDSTA_ST);
- write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
- PAS_DMA_RXCHAN_CCMDSTA_ST);
-
- for (retries = 0; retries < MAX_RETRIES; retries++) {
- sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(rxch));
- if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
- break;
- cond_resched();
- }
-
- if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
- dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n");
-
- for (retries = 0; retries < MAX_RETRIES; retries++) {
- sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
- if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
- break;
- cond_resched();
- }
-
- if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
- dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");
-
- for (retries = 0; retries < MAX_RETRIES; retries++) {
- sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
- if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
- break;
- cond_resched();
- }
-
- if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
- dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");
-
- /* Then, disable the channel. This must be done separately from
- * stopping, since you can't disable when active.
- */
-
- write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
- write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
- write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
+ pasemi_mac_pause_txchan(mac);
+ pasemi_mac_pause_rxint(mac);
+ pasemi_mac_pause_rxchan(mac);
+ pasemi_mac_intf_disable(mac);
free_irq(mac->tx->chan.irq, mac->tx);
free_irq(mac->rx->chan.irq, mac->rx);
return pkts;
}
+static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct pasemi_mac *mac = netdev_priv(dev);
+ unsigned int reg;
+ unsigned int rcmdsta;
+ int running;
+
+ if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU)
+ return -EINVAL;
+
+ running = netif_running(dev);
+
+ if (running) {
+ /* Need to stop the interface, clean out all already
+ * received buffers, free all unused buffers on the RX
+ * interface ring, then finally re-fill the rx ring with
+ * the new-size buffers and restart.
+ */
+
+ napi_disable(&mac->napi);
+ netif_tx_disable(dev);
+ pasemi_mac_intf_disable(mac);
+
+ rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
+ pasemi_mac_pause_rxint(mac);
+ pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
+ pasemi_mac_free_rx_buffers(mac);
+ }
+
+ /* Change maxf, i.e. what size frames are accepted.
+ * Need room for ethernet header and CRC word
+ */
+ reg = read_mac_reg(mac, PAS_MAC_CFG_MACCFG);
+ reg &= ~PAS_MAC_CFG_MACCFG_MAXF_M;
+ reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4);
+ write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg);
+
+ dev->mtu = new_mtu;
+ /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
+ mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
+
+ if (running) {
+ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+ rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN);
+
+ rx_ring(mac)->next_to_fill = 0;
+ pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1);
+
+ napi_enable(&mac->napi);
+ netif_start_queue(dev);
+ pasemi_mac_intf_enable(mac);
+ }
+
+ return 0;
+}
+
static int __devinit
pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
dev->stop = pasemi_mac_close;
dev->hard_start_xmit = pasemi_mac_start_tx;
dev->set_multicast_list = pasemi_mac_set_rx_mode;
+ dev->set_mac_address = pasemi_mac_set_mac_addr;
+ dev->mtu = PE_DEF_MTU;
+ /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
+ mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
+
+ dev->change_mtu = pasemi_mac_change_mtu;
if (err)
goto out;
struct phy_device *phydev;
struct napi_struct napi;
+ int bufsz; /* RX ring buffer size */
u8 type;
#define MAC_TYPE_GMAC 1
#define MAC_TYPE_XAUI 2
/* MAC CFG register offsets */
enum {
PAS_MAC_CFG_PCFG = 0x80,
+ PAS_MAC_CFG_MACCFG = 0x84,
+ PAS_MAC_CFG_ADR0 = 0x8c,
+ PAS_MAC_CFG_ADR1 = 0x90,
PAS_MAC_CFG_TXP = 0x98,
PAS_MAC_IPC_CHNL = 0x208,
};
#define PAS_MAC_CFG_PCFG_SPD_100M 0x00000001
#define PAS_MAC_CFG_PCFG_SPD_1G 0x00000002
#define PAS_MAC_CFG_PCFG_SPD_10G 0x00000003
+
+#define PAS_MAC_CFG_MACCFG_TXT_M 0x70000000
+#define PAS_MAC_CFG_MACCFG_TXT_S 28
+#define PAS_MAC_CFG_MACCFG_PRES_M 0x0f000000
+#define PAS_MAC_CFG_MACCFG_PRES_S 24
+#define PAS_MAC_CFG_MACCFG_MAXF_M 0x00ffff00
+#define PAS_MAC_CFG_MACCFG_MAXF_S 8
+#define PAS_MAC_CFG_MACCFG_MAXF(x) (((x) << PAS_MAC_CFG_MACCFG_MAXF_S) & \
+ PAS_MAC_CFG_MACCFG_MAXF_M)
+#define PAS_MAC_CFG_MACCFG_MINF_M 0x000000ff
+#define PAS_MAC_CFG_MACCFG_MINF_S 0
+
#define PAS_MAC_CFG_TXP_FCF 0x01000000
#define PAS_MAC_CFG_TXP_FCE 0x00800000
#define PAS_MAC_CFG_TXP_FC 0x00400000
#define NETDRV_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0)
-#if MMIO_FLUSH_AUDIT_COMPLETE
+#ifdef MMIO_FLUSH_AUDIT_COMPLETE
/* write MMIO register */
#define NETDRV_W8(reg, val8) writeb ((val8), ioaddr + (reg))
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
- tp = dev->priv;
+ tp = netdev_priv(dev);
/* enable device (incl. PCI PM wakeup), and bus-mastering */
rc = pci_enable_device (pdev);
return i;
}
- tp = dev->priv;
+ tp = netdev_priv(dev);
assert (ioaddr != NULL);
assert (dev != NULL);
dev->base_addr = (unsigned long) ioaddr;
/* dev->priv/tp zeroed and aligned in alloc_etherdev */
- tp = dev->priv;
+ tp = netdev_priv(dev);
/* note: tp->chipset set in netdrv_init_board */
tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
assert (dev != NULL);
- np = dev->priv;
+ np = netdev_priv(dev);
assert (np != NULL);
unregister_netdev (dev);
static int mdio_read (struct net_device *dev, int phy_id, int location)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *mdio_addr = tp->mmio_addr + Config4;
int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
int retval = 0;
static void mdio_write (struct net_device *dev, int phy_id, int location,
int value)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *mdio_addr = tp->mmio_addr + Config4;
int mii_cmd =
(0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
static int netdrv_open (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
int retval;
#ifdef NETDRV_DEBUG
void *ioaddr = tp->mmio_addr;
/* Start the hardware at open or resume. */
static void netdrv_hw_start (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
u32 i;
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void netdrv_init_ring (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
int i;
DPRINTK ("ENTER\n");
static void netdrv_timer (unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
int next_tick = 60 * HZ;
int mii_lpa;
}
-static void netdrv_tx_clear (struct netdrv_private *tp)
+static void netdrv_tx_clear (struct net_device *dev)
{
int i;
+ struct netdrv_private *tp = netdev_priv(dev);
atomic_set (&tp->cur_tx, 0);
atomic_set (&tp->dirty_tx, 0);
static void netdrv_tx_timeout (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
int i;
u8 tmp8;
/* Stop a shared interrupt from scavenging while we are. */
spin_lock_irqsave (&tp->lock, flags);
- netdrv_tx_clear (tp);
+ netdrv_tx_clear (dev);
spin_unlock_irqrestore (&tp->lock, flags);
static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
int entry;
DPRINTK ("%s: netdrv_rx() status %4.4x, size %4.4x,"
" cur %4.4x.\n", dev->name, rx_status,
rx_size, cur_rx);
-#if NETDRV_DEBUG > 2
+#if defined(NETDRV_DEBUG) && (NETDRV_DEBUG > 2)
{
int i;
DPRINTK ("%s: Frame contents ", dev->name);
static irqreturn_t netdrv_interrupt (int irq, void *dev_instance)
{
struct net_device *dev = (struct net_device *) dev_instance;
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
int boguscnt = max_interrupt_work;
void *ioaddr = tp->mmio_addr;
int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */
static int netdrv_close (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned long flags;
spin_unlock_irqrestore (&tp->lock, flags);
- synchronize_irq ();
+ synchronize_irq (dev->irq);
free_irq (dev->irq, dev);
- netdrv_tx_clear (tp);
+ netdrv_tx_clear (dev);
pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
tp->rx_ring, tp->rx_ring_dma);
static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(rq);
unsigned long flags;
int rc = 0;
static void netdrv_set_rx_mode (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
u32 mc_filter[2]; /* Multicast hash filter */
int i, rx_mode;
static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata (pdev);
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned long flags;
static int netdrv_resume (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
- struct netdrv_private *tp = dev->priv;
+ /*struct netdrv_private *tp = netdev_priv(dev);*/
if (!netif_running(dev))
return 0;
---help---
Currently supports the IP175C PHY.
+config REALTEK_PHY
+ tristate "Drivers for Realtek PHYs"
+ ---help---
+ Supports the Realtek 821x PHY.
+
config FIXED_PHY
bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
---help---
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
+obj-$(CONFIG_REALTEK_PHY) += realtek.o
obj-$(CONFIG_FIXED_PHY) += fixed.o
obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
.driver = { .owner = THIS_MODULE },
};
+static struct phy_driver bcm5482_driver = {
+ .phy_id = 0x0143bcb0,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM5482",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = bcm54xx_ack_interrupt,
+ .config_intr = bcm54xx_config_intr,
+ .driver = { .owner = THIS_MODULE },
+};
+
static int __init broadcom_init(void)
{
int ret;
ret = phy_driver_register(&bcm5461_driver);
if (ret)
goto out_5461;
+ ret = phy_driver_register(&bcm5482_driver);
+ if (ret)
+ goto out_5482;
return ret;
+out_5482:
+ phy_driver_unregister(&bcm5461_driver);
out_5461:
phy_driver_unregister(&bcm5421_driver);
out_5421:
static void __exit broadcom_exit(void)
{
+ phy_driver_unregister(&bcm5482_driver);
phy_driver_unregister(&bcm5461_driver);
phy_driver_unregister(&bcm5421_driver);
phy_driver_unregister(&bcm5411_driver);
int i;
int err = 0;
- spin_lock_init(&bus->mdio_lock);
+ mutex_init(&bus->mdio_lock);
if (NULL == bus || NULL == bus->name ||
NULL == bus->read ||
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mii.h>
int retval;
struct mii_bus *bus = phydev->bus;
- spin_lock_bh(&bus->mdio_lock);
+ BUG_ON(in_interrupt());
+
+ mutex_lock(&bus->mdio_lock);
retval = bus->read(bus, phydev->addr, regnum);
- spin_unlock_bh(&bus->mdio_lock);
+ mutex_unlock(&bus->mdio_lock);
return retval;
}
int err;
struct mii_bus *bus = phydev->bus;
- spin_lock_bh(&bus->mdio_lock);
+ BUG_ON(in_interrupt());
+
+ mutex_lock(&bus->mdio_lock);
err = bus->write(bus, phydev->addr, regnum, val);
- spin_unlock_bh(&bus->mdio_lock);
+ mutex_unlock(&bus->mdio_lock);
return err;
}
{
int err;
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
if (AUTONEG_DISABLE == phydev->autoneg)
phy_sanitize_settings(phydev);
}
out_unlock:
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
return err;
}
EXPORT_SYMBOL(phy_start_aneg);
static void phy_change(struct work_struct *work);
+static void phy_state_machine(struct work_struct *work);
static void phy_timer(unsigned long data);
/**
{
phydev->adjust_state = handler;
+ INIT_WORK(&phydev->state_queue, phy_state_machine);
init_timer(&phydev->phy_timer);
phydev->phy_timer.function = &phy_timer;
phydev->phy_timer.data = (unsigned long) phydev;
void phy_stop_machine(struct phy_device *phydev)
{
del_timer_sync(&phydev->phy_timer);
+ cancel_work_sync(&phydev->state_queue);
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
if (phydev->state > PHY_UP)
phydev->state = PHY_UP;
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
phydev->adjust_state = NULL;
}
*/
void phy_error(struct phy_device *phydev)
{
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
phydev->state = PHY_HALTED;
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
}
/**
if (err)
goto phy_err;
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
phydev->state = PHY_CHANGELINK;
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
atomic_dec(&phydev->irq_disable);
enable_irq(phydev->irq);
*/
void phy_stop(struct phy_device *phydev)
{
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
if (PHY_HALTED == phydev->state)
goto out_unlock;
phydev->state = PHY_HALTED;
out_unlock:
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
/*
* Cannot call flush_scheduled_work() here as desired because
*/
void phy_start(struct phy_device *phydev)
{
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
switch (phydev->state) {
case PHY_STARTING:
default:
break;
}
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
}
EXPORT_SYMBOL(phy_stop);
EXPORT_SYMBOL(phy_start);
-/* PHY timer which handles the state machine */
-static void phy_timer(unsigned long data)
+/**
+ * phy_state_machine - Handle the state machine
+ * @work: work_struct that describes the work to be done
+ *
+ * Description: Scheduled by the state_queue workqueue each time
+ * phy_timer is triggered.
+ */
+static void phy_state_machine(struct work_struct *work)
{
- struct phy_device *phydev = (struct phy_device *)data;
+ struct phy_device *phydev =
+ container_of(work, struct phy_device, state_queue);
int needs_aneg = 0;
int err = 0;
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
if (phydev->adjust_state)
phydev->adjust_state(phydev->attached_dev);
break;
}
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
if (needs_aneg)
err = phy_start_aneg(phydev);
mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
}
+/* PHY timer which schedules the state machine work */
+static void phy_timer(unsigned long data)
+{
+ struct phy_device *phydev = (struct phy_device *)data;
+
+ /*
+ * PHY I/O operations can potentially sleep so we ensure that
+ * it's done from a process context
+ */
+ schedule_work(&phydev->state_queue);
+}
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mii.h>
dev->state = PHY_DOWN;
- spin_lock_init(&dev->lock);
+ mutex_init(&dev->lock);
return dev;
}
if (!(phydrv->flags & PHY_HAS_INTERRUPT))
phydev->irq = PHY_POLL;
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
/* Start out supporting everything. Eventually,
* a controller will attach, and may modify one
if (phydev->drv->probe)
err = phydev->drv->probe(phydev);
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
return err;
phydev = to_phy_device(dev);
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
phydev->state = PHY_DOWN;
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
if (phydev->drv->remove)
phydev->drv->remove(phydev);
--- /dev/null
+/*
+ * drivers/net/phy/realtek.c
+ *
+ * Driver for Realtek PHYs
+ *
+ * Author: Johnson Leung <r58129@freescale.com>
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/phy.h>
+
+#define RTL821x_PHYSR 0x11
+#define RTL821x_PHYSR_DUPLEX 0x2000
+#define RTL821x_PHYSR_SPEED 0xc000
+#define RTL821x_INER 0x12
+#define RTL821x_INER_INIT 0x6400
+#define RTL821x_INSR 0x13
+
+MODULE_DESCRIPTION("Realtek PHY driver");
+MODULE_AUTHOR("Johnson Leung");
+MODULE_LICENSE("GPL");
+
+static int rtl821x_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, RTL821x_INSR);
+
+ return (err < 0) ? err : 0;
+}
+
+static int rtl821x_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, RTL821x_INER,
+ RTL821x_INER_INIT);
+ else
+ err = phy_write(phydev, RTL821x_INER, 0);
+
+ return err;
+}
+
+/* RTL8211B */
+static struct phy_driver rtl821x_driver = {
+ .phy_id = 0x001cc912,
+ .name = "RTL821x Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &rtl821x_ack_interrupt,
+ .config_intr = &rtl821x_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init realtek_init(void)
+{
+ int ret;
+
+ ret = phy_driver_register(&rtl821x_driver);
+
+ return ret;
+}
+
+static void __exit realtek_exit(void)
+{
+ phy_driver_unregister(&rtl821x_driver);
+}
+
+module_init(realtek_init);
+module_exit(realtek_exit);
lro->iph = ip;
lro->tcph = tcp;
lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
- lro->tcp_ack = ntohl(tcp->ack_seq);
+ lro->tcp_ack = tcp->ack_seq;
lro->sg_num = 1;
lro->total_len = ntohs(ip->tot_len);
lro->frags_len = 0;
* already been done.
*/
if (tcp->doff == 8) {
- u32 *ptr;
- ptr = (u32 *)(tcp+1);
+ __be32 *ptr;
+ ptr = (__be32 *)(tcp+1);
lro->saw_ts = 1;
- lro->cur_tsval = *(ptr+1);
+ lro->cur_tsval = ntohl(*(ptr+1));
lro->cur_tsecr = *(ptr+2);
}
lro->in_use = 1;
/* Update tsecr field if this session has timestamps enabled */
if (lro->saw_ts) {
- u32 *ptr = (u32 *)(tcp + 1);
+ __be32 *ptr = (__be32 *)(tcp + 1);
*(ptr+2) = lro->cur_tsecr;
}
lro->window = tcp->window;
if (lro->saw_ts) {
- u32 *ptr;
+ __be32 *ptr;
/* Update tsecr and tsval from this packet */
- ptr = (u32 *) (tcp + 1);
- lro->cur_tsval = *(ptr + 1);
+ ptr = (__be32 *)(tcp+1);
+ lro->cur_tsval = ntohl(*(ptr+1));
lro->cur_tsecr = *(ptr + 2);
}
}
/* Ensure timestamp value increases monotonically */
if (l_lro)
- if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
+ if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
return -1;
/* timestamp echo reply should be non-zero */
- if (*((u32 *)(ptr+6)) == 0)
+ if (*((__be32 *)(ptr+6)) == 0)
return -1;
}
int in_use;
__be16 window;
u32 cur_tsval;
- u32 cur_tsecr;
+ __be32 cur_tsecr;
u8 saw_ts;
};
{ "SiS 191 PCI Gigabit Ethernet adapter" },
};
-static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
+static struct pci_device_id sis190_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
{ 0, },
static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
/* Turn on/off phy power saving */
if (onoff)
reg1 |= coma_mode[port];
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
- reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+ sky2_pci_read32(hw, PCI_DEV_REG1);
udelay(100);
}
imask |= portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask);
+ sky2_set_multicast(dev);
return 0;
err_out:
if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
u16 pci_err;
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
pci_err = sky2_pci_read16(hw, PCI_STATUS);
if (net_ratelimit())
dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
sky2_pci_write16(hw, PCI_STATUS,
pci_err | PCI_STATUS_ERROR_BITS);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
if (status & Y2_IS_PCI_EXP) {
/* PCI-Express uncorrectable Error occurred */
u32 err;
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
0xfffffffful);
dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
if (status & Y2_HWE_L1_MASK)
}
sky2_power_on(hw);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
for (i = 0; i < hw->ports; i++) {
sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
err = sky2_up(dev);
if (err)
dev_close(dev);
- else
- sky2_set_multicast(dev);
}
return err;
dev_close(dev);
goto out;
}
-
- sky2_set_multicast(dev);
}
}
.get_link = bigmac_get_link,
};
-static int __init bigmac_ether_init(struct sbus_dev *qec_sdev)
+static int __devinit bigmac_ether_init(struct sbus_dev *qec_sdev)
{
struct net_device *dev;
static int version_printed;
qecp->gregs + GLOB_RSIZE);
}
-static u8 __init qec_get_burst(struct device_node *dp)
+static u8 __devinit qec_get_burst(struct device_node *dp)
{
u8 bsizes, bsizes_more;
return bsizes;
}
-static struct sunqec * __init get_qec(struct sbus_dev *child_sdev)
+static struct sunqec * __devinit get_qec(struct sbus_dev *child_sdev)
{
struct sbus_dev *qec_sdev = child_sdev->parent;
struct sunqec *qecp;
return NULL;
}
-static int __init qec_ether_init(struct sbus_dev *sdev)
+static int __devinit qec_ether_init(struct sbus_dev *sdev)
{
static unsigned version_printed;
struct net_device *dev;
.handshake_complete = vnet_handshake_complete,
};
-static void print_version(void)
+static void __devinit print_version(void)
{
static int version_printed;
}
-static int olympic_open(struct net_device *dev)
+static int __devinit olympic_open(struct net_device *dev)
{
struct olympic_private *olympic_priv=netdev_priv(dev);
u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
if (!ugeth)
return;
- if (ugeth->uccf)
+ if (ugeth->uccf) {
ucc_fast_free(ugeth->uccf);
+ ugeth->uccf = NULL;
+ }
if (ugeth->p_thread_data_tx) {
qe_muram_free(ugeth->thread_dat_tx_offset);
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
- /* Create CQs for hash tables */
- INIT_LIST_HEAD(&ugeth->group_hash_q);
- INIT_LIST_HEAD(&ugeth->ind_hash_q);
-
if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
(uf_info->bd_mem_part == MEM_PART_MURAM))) {
if (netif_msg_probe(ugeth))
return IRQ_HANDLED;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void ucc_netpoll(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ int irq = ugeth->ug_info->uf_info.irq;
+
+ disable_irq(irq);
+ ucc_geth_irq_handler(irq, dev);
+ enable_irq(irq);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
/* Called when something needs to use the ethernet device */
/* Returns 0 for success. */
static int ucc_geth_open(struct net_device *dev)
ugeth = netdev_priv(dev);
spin_lock_init(&ugeth->lock);
+ /* Create CQs for hash tables */
+ INIT_LIST_HEAD(&ugeth->group_hash_q);
+ INIT_LIST_HEAD(&ugeth->ind_hash_q);
+
dev_set_drvdata(device, dev);
/* Set the dev->base_addr to the gfar reg region */
#ifdef CONFIG_UGETH_NAPI
netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT);
#endif /* CONFIG_UGETH_NAPI */
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ucc_netpoll;
+#endif
dev->stop = ucc_geth_close;
// dev->change_mtu = ucc_geth_change_mtu;
dev->mtu = 1500;
struct net_device *dev = dev_get_drvdata(device);
struct ucc_geth_private *ugeth = netdev_priv(dev);
- dev_set_drvdata(device, NULL);
- ucc_geth_memclean(ugeth);
+ unregister_netdev(dev);
free_netdev(dev);
+ ucc_geth_memclean(ugeth);
+ dev_set_drvdata(device, NULL);
return 0;
}
netdev->set_multicast_list = rtl8150_set_multicast;
netdev->set_mac_address = rtl8150_set_mac_address;
netdev->get_stats = rtl8150_netdev_stats;
- netdev->mtu = RTL8150_MTU;
SET_ETHTOOL_OPS(netdev, &ops);
dev->intr_interval = 100; /* 100ms */
}
#endif
-static void rhine_hw_init(struct net_device *dev, long pioaddr)
+static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
{
struct rhine_private *rp = netdev_priv(dev);
* for 64bit hardware platforms.
*
* TODO
- * Big-endian support
* rx_copybreak/alignment
* Scatter gather
* More testing
* Init state, all RD entries belong to the NIC
*/
for (i = 0; i < vptr->options.numrx; ++i)
- vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC;
+ vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC;
writew(vptr->options.numrx, ®s->RBRDU);
writel(vptr->rd_pool_dma, ®s->RDBaseLo);
vptr->int_mask = INT_MASK_DEF;
- writel(cpu_to_le32(vptr->rd_pool_dma), ®s->RDBaseLo);
+ writel(vptr->rd_pool_dma, ®s->RDBaseLo);
writew(vptr->options.numrx - 1, ®s->RDCSize);
mac_rx_queue_run(regs);
mac_rx_queue_wake(regs);
writew(vptr->options.numtx - 1, ®s->TDCSize);
for (i = 0; i < vptr->num_txq; i++) {
- writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i]));
+ writel(vptr->td_pool_dma[i], ®s->TDBaseLo[i]);
mac_tx_queue_run(regs, i);
}
dirty = vptr->rd_dirty - unusable;
for (avail = vptr->rd_filled & 0xfffc; avail; avail--) {
dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
- vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC;
+ vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC;
}
writew(vptr->rd_filled & 0xfffc, ®s->RBRDU);
struct rx_desc *rd = vptr->rd_ring + dirty;
/* Fine for an all zero Rx desc at init time as well */
- if (rd->rdesc0.owner == OWNED_BY_NIC)
+ if (rd->rdesc0.len & OWNED_BY_NIC)
break;
if (!vptr->rd_info[dirty].skb) {
if (!vptr->rd_info[rd_curr].skb)
break;
- if (rd->rdesc0.owner == OWNED_BY_NIC)
+ if (rd->rdesc0.len & OWNED_BY_NIC)
break;
rmb();
/*
* Don't drop CE or RL error frame although RXOK is off
*/
- if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) {
+ if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
if (velocity_receive_frame(vptr, rd_curr) < 0)
stats->rx_dropped++;
} else {
stats->rx_dropped++;
}
- rd->inten = 1;
+ rd->size |= RX_INTEN;
vptr->dev->last_rx = jiffies;
struct net_device_stats *stats = &vptr->stats;
struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
struct rx_desc *rd = &(vptr->rd_ring[idx]);
- int pkt_len = rd->rdesc0.len;
+ int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
struct sk_buff *skb;
if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
*/
*((u32 *) & (rd->rdesc0)) = 0;
- rd->len = cpu_to_le32(vptr->rx_buf_sz);
- rd->inten = 1;
+ rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN;
rd->pa_low = cpu_to_le32(rd_info->skb_dma);
rd->pa_high = 0;
return 0;
td = &(vptr->td_rings[qnum][idx]);
tdinfo = &(vptr->td_infos[qnum][idx]);
- if (td->tdesc0.owner == OWNED_BY_NIC)
+ if (td->tdesc0.len & OWNED_BY_NIC)
break;
if ((works++ > 15))
for (i = 0; i < tdinfo->nskb_dma; i++) {
#ifdef VELOCITY_ZERO_COPY_SUPPORT
- pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], td->tdesc1.len, PCI_DMA_TODEVICE);
+ pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
#else
pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE);
#endif
struct velocity_td_info *tdinfo;
unsigned long flags;
int index;
-
int pktlen = skb->len;
+ __le16 len = cpu_to_le16(pktlen);
#ifdef VELOCITY_ZERO_COPY_SUPPORT
if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
td_ptr = &(vptr->td_rings[qnum][index]);
tdinfo = &(vptr->td_infos[qnum][index]);
- td_ptr->tdesc1.TCPLS = TCPLS_NORMAL;
td_ptr->tdesc1.TCR = TCR0_TIC;
- td_ptr->td_buf[0].queue = 0;
+ td_ptr->td_buf[0].size &= ~TD_QUEUE;
/*
* Pad short frames.
if (pktlen < ETH_ZLEN) {
/* Cannot occur until ZC support */
pktlen = ETH_ZLEN;
+ len = cpu_to_le16(ETH_ZLEN);
skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
tdinfo->skb = skb;
tdinfo->skb_dma[0] = tdinfo->buf_dma;
- td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
tdinfo->nskb_dma = 1;
- td_ptr->tdesc1.CMDZ = 2;
} else
#ifdef VELOCITY_ZERO_COPY_SUPPORT
if (skb_shinfo(skb)->nr_frags > 0) {
if (nfrags > 6) {
skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
tdinfo->skb_dma[0] = tdinfo->buf_dma;
- td_ptr->tdesc0.pktsize =
+ td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
tdinfo->nskb_dma = 1;
- td_ptr->tdesc1.CMDZ = 2;
} else {
int i = 0;
tdinfo->nskb_dma = 0;
- tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE);
+ tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
- td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->tdesc0.len = len;
/* FIXME: support 48bit DMA later */
td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
td_ptr->td_buf[i].pa_high = 0;
- td_ptr->td_buf[i].bufsize = skb->len->skb->data_len;
+ td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb));
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- void *addr = ((void *) page_address(frag->page + frag->page_offset));
+ void *addr = (void *)page_address(frag->page) + frag->page_offset;
tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
td_ptr->td_buf[i + 1].pa_high = 0;
- td_ptr->td_buf[i + 1].bufsize = frag->size;
+ td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
}
tdinfo->nskb_dma = i - 1;
- td_ptr->tdesc1.CMDZ = i;
}
} else
*/
tdinfo->skb = skb;
tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
- td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ td_ptr->td_buf[0].size = len;
tdinfo->nskb_dma = 1;
- td_ptr->tdesc1.CMDZ = 2;
}
+ td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
if (vptr->vlgrp && vlan_tx_tag_present(skb)) {
- td_ptr->tdesc1.pqinf.VID = vlan_tx_tag_get(skb);
- td_ptr->tdesc1.pqinf.priority = 0;
- td_ptr->tdesc1.pqinf.CFI = 0;
+ td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
td_ptr->tdesc1.TCR |= TCR0_VETAG;
}
if (prev < 0)
prev = vptr->options.numtx - 1;
- td_ptr->tdesc0.owner = OWNED_BY_NIC;
+ td_ptr->tdesc0.len |= OWNED_BY_NIC;
vptr->td_used[qnum]++;
vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx;
netif_stop_queue(dev);
td_ptr = &(vptr->td_rings[qnum][prev]);
- td_ptr->td_buf[0].queue = 1;
+ td_ptr->td_buf[0].size |= TD_QUEUE;
mac_tx_queue_wake(vptr->mac_regs, qnum);
}
dev->trans_start = jiffies;
velocity_save_context(vptr, &vptr->context);
velocity_shutdown(vptr);
velocity_set_wol(vptr);
- pci_enable_wake(pdev, 3, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
pci_set_power_state(pdev, PCI_D3hot);
} else {
velocity_save_context(vptr, &vptr->context);
* Bits in the RSR0 register
*/
-#define RSR_DETAG 0x0080
-#define RSR_SNTAG 0x0040
-#define RSR_RXER 0x0020
-#define RSR_RL 0x0010
-#define RSR_CE 0x0008
-#define RSR_FAE 0x0004
-#define RSR_CRC 0x0002
-#define RSR_VIDM 0x0001
+#define RSR_DETAG cpu_to_le16(0x0080)
+#define RSR_SNTAG cpu_to_le16(0x0040)
+#define RSR_RXER cpu_to_le16(0x0020)
+#define RSR_RL cpu_to_le16(0x0010)
+#define RSR_CE cpu_to_le16(0x0008)
+#define RSR_FAE cpu_to_le16(0x0004)
+#define RSR_CRC cpu_to_le16(0x0002)
+#define RSR_VIDM cpu_to_le16(0x0001)
/*
* Bits in the RSR1 register
*/
-#define RSR_RXOK 0x8000 // rx OK
-#define RSR_PFT 0x4000 // Perfect filtering address match
-#define RSR_MAR 0x2000 // MAC accept multicast address packet
-#define RSR_BAR 0x1000 // MAC accept broadcast address packet
-#define RSR_PHY 0x0800 // MAC accept physical address packet
-#define RSR_VTAG 0x0400 // 802.1p/1q tagging packet indicator
-#define RSR_STP 0x0200 // start of packet
-#define RSR_EDP 0x0100 // end of packet
-
-/*
- * Bits in the RSR1 register
- */
-
-#define RSR1_RXOK 0x80 // rx OK
-#define RSR1_PFT 0x40 // Perfect filtering address match
-#define RSR1_MAR 0x20 // MAC accept multicast address packet
-#define RSR1_BAR 0x10 // MAC accept broadcast address packet
-#define RSR1_PHY 0x08 // MAC accept physical address packet
-#define RSR1_VTAG 0x04 // 802.1p/1q tagging packet indicator
-#define RSR1_STP 0x02 // start of packet
-#define RSR1_EDP 0x01 // end of packet
+#define RSR_RXOK cpu_to_le16(0x8000) // rx OK
+#define RSR_PFT cpu_to_le16(0x4000) // Perfect filtering address match
+#define RSR_MAR cpu_to_le16(0x2000) // MAC accept multicast address packet
+#define RSR_BAR cpu_to_le16(0x1000) // MAC accept broadcast address packet
+#define RSR_PHY cpu_to_le16(0x0800) // MAC accept physical address packet
+#define RSR_VTAG cpu_to_le16(0x0400) // 802.1p/1q tagging packet indicator
+#define RSR_STP cpu_to_le16(0x0200) // start of packet
+#define RSR_EDP cpu_to_le16(0x0100) // end of packet
/*
* Bits in the CSM register
* Bits in the TSR0 register
*/
-#define TSR0_ABT 0x0080 // Tx abort because of excessive collision
-#define TSR0_OWT 0x0040 // Jumbo frame Tx abort
-#define TSR0_OWC 0x0020 // Out of window collision
-#define TSR0_COLS 0x0010 // experience collision in this transmit event
-#define TSR0_NCR3 0x0008 // collision retry counter[3]
-#define TSR0_NCR2 0x0004 // collision retry counter[2]
-#define TSR0_NCR1 0x0002 // collision retry counter[1]
-#define TSR0_NCR0 0x0001 // collision retry counter[0]
-#define TSR0_TERR 0x8000 //
-#define TSR0_FDX 0x4000 // current transaction is serviced by full duplex mode
-#define TSR0_GMII 0x2000 // current transaction is serviced by GMII mode
-#define TSR0_LNKFL 0x1000 // packet serviced during link down
-#define TSR0_SHDN 0x0400 // shutdown case
-#define TSR0_CRS 0x0200 // carrier sense lost
-#define TSR0_CDH 0x0100 // AQE test fail (CD heartbeat)
-
-/*
- * Bits in the TSR1 register
- */
-
-#define TSR1_TERR 0x80 //
-#define TSR1_FDX 0x40 // current transaction is serviced by full duplex mode
-#define TSR1_GMII 0x20 // current transaction is serviced by GMII mode
-#define TSR1_LNKFL 0x10 // packet serviced during link down
-#define TSR1_SHDN 0x04 // shutdown case
-#define TSR1_CRS 0x02 // carrier sense lost
-#define TSR1_CDH 0x01 // AQE test fail (CD heartbeat)
+#define TSR0_ABT cpu_to_le16(0x0080) // Tx abort because of excessive collision
+#define TSR0_OWT cpu_to_le16(0x0040) // Jumbo frame Tx abort
+#define TSR0_OWC cpu_to_le16(0x0020) // Out of window collision
+#define TSR0_COLS cpu_to_le16(0x0010) // experience collision in this transmit event
+#define TSR0_NCR3 cpu_to_le16(0x0008) // collision retry counter[3]
+#define TSR0_NCR2 cpu_to_le16(0x0004) // collision retry counter[2]
+#define TSR0_NCR1 cpu_to_le16(0x0002) // collision retry counter[1]
+#define TSR0_NCR0 cpu_to_le16(0x0001) // collision retry counter[0]
+#define TSR0_TERR cpu_to_le16(0x8000) //
+#define TSR0_FDX cpu_to_le16(0x4000) // current transaction is serviced by full duplex mode
+#define TSR0_GMII cpu_to_le16(0x2000) // current transaction is serviced by GMII mode
+#define TSR0_LNKFL cpu_to_le16(0x1000) // packet serviced during link down
+#define TSR0_SHDN cpu_to_le16(0x0400) // shutdown case
+#define TSR0_CRS cpu_to_le16(0x0200) // carrier sense lost
+#define TSR0_CDH cpu_to_le16(0x0100) // AQE test fail (CD heartbeat)
//
// Bits in the TCR0 register
*/
struct rdesc0 {
- u16 RSR; /* Receive status */
- u16 len:14; /* Received packet length */
- u16 reserved:1;
- u16 owner:1; /* Who owns this buffer ? */
+ __le16 RSR; /* Receive status */
+ __le16 len; /* bits 0--13; bit 15 - owner */
};
struct rdesc1 {
- u16 PQTAG;
+ __le16 PQTAG;
u8 CSM;
u8 IPKT;
};
+enum {
+ RX_INTEN = __constant_cpu_to_le16(0x8000)
+};
+
struct rx_desc {
struct rdesc0 rdesc0;
struct rdesc1 rdesc1;
- u32 pa_low; /* Low 32 bit PCI address */
- u16 pa_high; /* Next 16 bit PCI address (48 total) */
- u16 len:15; /* Frame size */
- u16 inten:1; /* Enable interrupt */
+ __le32 pa_low; /* Low 32 bit PCI address */
+ __le16 pa_high; /* Next 16 bit PCI address (48 total) */
+ __le16 size; /* bits 0--14 - frame size, bit 15 - enable int. */
} __attribute__ ((__packed__));
/*
*/
struct tdesc0 {
- u16 TSR; /* Transmit status register */
- u16 pktsize:14; /* Size of frame */
- u16 reserved:1;
- u16 owner:1; /* Who owns the buffer */
+ __le16 TSR; /* Transmit status register */
+ __le16 len; /* bits 0--13 - size of frame, bit 15 - owner */
};
-struct pqinf { /* Priority queue info */
- u16 VID:12;
- u16 CFI:1;
- u16 priority:3;
-} __attribute__ ((__packed__));
-
struct tdesc1 {
- struct pqinf pqinf;
+ __le16 vlan;
u8 TCR;
- u8 TCPLS:2;
- u8 reserved:2;
- u8 CMDZ:4;
+ u8 cmd; /* bits 0--1 - TCPLS, bits 4--7 - CMDZ */
} __attribute__ ((__packed__));
+enum {
+ TD_QUEUE = __constant_cpu_to_le16(0x8000)
+};
+
struct td_buf {
- u32 pa_low;
- u16 pa_high;
- u16 bufsize:14;
- u16 reserved:1;
- u16 queue:1;
+ __le32 pa_low;
+ __le16 pa_high;
+ __le16 size; /* bits 0--13 - size, bit 15 - queue */
} __attribute__ ((__packed__));
struct tx_desc {
enum velocity_owner {
OWNED_BY_HOST = 0,
- OWNED_BY_NIC = 1
+ OWNED_BY_NIC = __constant_cpu_to_le16(0x8000)
};
volatile u8 RCR;
volatile u8 TCR;
- volatile u32 CR0Set; /* 0x08 */
- volatile u32 CR0Clr; /* 0x0C */
+ volatile __le32 CR0Set; /* 0x08 */
+ volatile __le32 CR0Clr; /* 0x0C */
volatile u8 MARCAM[8]; /* 0x10 */
- volatile u32 DecBaseHi; /* 0x18 */
- volatile u16 DbfBaseHi; /* 0x1C */
- volatile u16 reserved_1E;
+ volatile __le32 DecBaseHi; /* 0x18 */
+ volatile __le16 DbfBaseHi; /* 0x1C */
+ volatile __le16 reserved_1E;
- volatile u16 ISRCTL; /* 0x20 */
+ volatile __le16 ISRCTL; /* 0x20 */
volatile u8 TXESR;
volatile u8 RXESR;
- volatile u32 ISR; /* 0x24 */
- volatile u32 IMR;
+ volatile __le32 ISR; /* 0x24 */
+ volatile __le32 IMR;
- volatile u32 TDStatusPort; /* 0x2C */
+ volatile __le32 TDStatusPort; /* 0x2C */
- volatile u16 TDCSRSet; /* 0x30 */
+ volatile __le16 TDCSRSet; /* 0x30 */
volatile u8 RDCSRSet;
volatile u8 reserved_33;
- volatile u16 TDCSRClr;
+ volatile __le16 TDCSRClr;
volatile u8 RDCSRClr;
volatile u8 reserved_37;
- volatile u32 RDBaseLo; /* 0x38 */
- volatile u16 RDIdx; /* 0x3C */
- volatile u16 reserved_3E;
+ volatile __le32 RDBaseLo; /* 0x38 */
+ volatile __le16 RDIdx; /* 0x3C */
+ volatile __le16 reserved_3E;
- volatile u32 TDBaseLo[4]; /* 0x40 */
+ volatile __le32 TDBaseLo[4]; /* 0x40 */
- volatile u16 RDCSize; /* 0x50 */
- volatile u16 TDCSize; /* 0x52 */
- volatile u16 TDIdx[4]; /* 0x54 */
- volatile u16 tx_pause_timer; /* 0x5C */
- volatile u16 RBRDU; /* 0x5E */
+ volatile __le16 RDCSize; /* 0x50 */
+ volatile __le16 TDCSize; /* 0x52 */
+ volatile __le16 TDIdx[4]; /* 0x54 */
+ volatile __le16 tx_pause_timer; /* 0x5C */
+ volatile __le16 RBRDU; /* 0x5E */
- volatile u32 FIFOTest0; /* 0x60 */
- volatile u32 FIFOTest1; /* 0x64 */
+ volatile __le32 FIFOTest0; /* 0x60 */
+ volatile __le32 FIFOTest1; /* 0x64 */
volatile u8 CAMADDR; /* 0x68 */
volatile u8 CAMCR; /* 0x69 */
volatile u8 PHYSR1;
volatile u8 MIICR;
volatile u8 MIIADR;
- volatile u16 MIIDATA;
+ volatile __le16 MIIDATA;
- volatile u16 SoftTimer0; /* 0x74 */
- volatile u16 SoftTimer1;
+ volatile __le16 SoftTimer0; /* 0x74 */
+ volatile __le16 SoftTimer1;
volatile u8 CFGA; /* 0x78 */
volatile u8 CFGB;
volatile u8 CFGC;
volatile u8 CFGD;
- volatile u16 DCFG; /* 0x7C */
- volatile u16 MCFG;
+ volatile __le16 DCFG; /* 0x7C */
+ volatile __le16 MCFG;
volatile u8 TBIST; /* 0x80 */
volatile u8 RBIST;
volatile u8 rev_id;
volatile u8 PORSTS;
- volatile u32 MIBData; /* 0x88 */
+ volatile __le32 MIBData; /* 0x88 */
- volatile u16 EEWrData;
+ volatile __le16 EEWrData;
volatile u8 reserved_8E;
volatile u8 BPMDWr;
volatile u8 EECHKSUM; /* 0x92 */
volatile u8 EECSR;
- volatile u16 EERdData; /* 0x94 */
+ volatile __le16 EERdData; /* 0x94 */
volatile u8 EADDR;
volatile u8 EMBCMD;
volatile u8 DEBUG;
volatile u8 CHIPGCR;
- volatile u16 WOLCRSet; /* 0xA0 */
+ volatile __le16 WOLCRSet; /* 0xA0 */
volatile u8 PWCFGSet;
volatile u8 WOLCFGSet;
- volatile u16 WOLCRClr; /* 0xA4 */
+ volatile __le16 WOLCRClr; /* 0xA4 */
volatile u8 PWCFGCLR;
volatile u8 WOLCFGClr;
- volatile u16 WOLSRSet; /* 0xA8 */
- volatile u16 reserved_AA;
+ volatile __le16 WOLSRSet; /* 0xA8 */
+ volatile __le16 reserved_AA;
- volatile u16 WOLSRClr; /* 0xAC */
- volatile u16 reserved_AE;
+ volatile __le16 WOLSRClr; /* 0xAC */
+ volatile __le16 reserved_AE;
- volatile u16 PatternCRC[8]; /* 0xB0 */
- volatile u32 ByteMask[4][4]; /* 0xC0 */
+ volatile __le16 PatternCRC[8]; /* 0xB0 */
+ volatile __le32 ByteMask[4][4]; /* 0xC0 */
} __attribute__ ((__packed__));
struct arp_packet {
u8 dest_mac[ETH_ALEN];
u8 src_mac[ETH_ALEN];
- u16 type;
- u16 ar_hrd;
- u16 ar_pro;
+ __be16 type;
+ __be16 ar_hrd;
+ __be16 ar_pro;
u8 ar_hln;
u8 ar_pln;
- u16 ar_op;
+ __be16 ar_op;
u8 ar_sha[ETH_ALEN];
u8 ar_sip[4];
u8 ar_tha[ETH_ALEN];
struct _magic_packet {
u8 dest_mac[6];
u8 src_mac[6];
- u16 type;
+ __be16 type;
u8 MAC[16][6];
u8 password[6];
} __attribute__ ((__packed__));
#define ath5k_pci_resume NULL
#endif /* CONFIG_PM */
-static struct pci_driver ath5k_pci_drv_id = {
+static struct pci_driver ath5k_pci_driver = {
.name = "ath5k_pci",
.id_table = ath5k_pci_id_table,
.probe = ath5k_pci_probe,
ath5k_debug_init();
- ret = pci_register_driver(&ath5k_pci_drv_id);
+ ret = pci_register_driver(&ath5k_pci_driver);
if (ret) {
printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
return ret;
static void __exit
exit_ath5k_pci(void)
{
- pci_unregister_driver(&ath5k_pci_drv_id);
+ pci_unregister_driver(&ath5k_pci_driver);
ath5k_debug_finish();
}
priv->last_statistics_time = jiffies;
}
-void iwl3945_add_radiotap(struct iwl3945_priv *priv, struct sk_buff *skb,
- struct iwl3945_rx_frame_hdr *rx_hdr,
- struct ieee80211_rx_status *stats)
+static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
+ struct sk_buff *skb,
+ struct iwl3945_rx_frame_hdr *rx_hdr,
+ struct ieee80211_rx_status *stats)
{
/* First cache any information we need before we overwrite
* the information provided in the skb from the hardware */
struct ieee80211_ht_info *sta_ht_inf)
{
__le32 sta_flags;
+ u8 mimo_ps_mode;
if (!sta_ht_inf || !sta_ht_inf->ht_supported)
goto done;
+ mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
+
sta_flags = priv->stations[index].sta.station_flags;
- if (((sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS >> 2))
- == IWL_MIMO_PS_DYNAMIC)
+ sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
+
+ switch (mimo_ps_mode) {
+ case WLAN_HT_CAP_MIMO_PS_STATIC:
+ sta_flags |= STA_FLG_MIMO_DIS_MSK;
+ break;
+ case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
- else
- sta_flags &= ~STA_FLG_RTS_MIMO_PROT_MSK;
+ break;
+ case WLAN_HT_CAP_MIMO_PS_DISABLED:
+ break;
+ default:
+ IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
+ break;
+ }
sta_flags |= cpu_to_le32(
(u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf))
sta_flags |= STA_FLG_FAT_EN_MSK;
else
- sta_flags &= (~STA_FLG_FAT_EN_MSK);
+ sta_flags &= ~STA_FLG_FAT_EN_MSK;
priv->stations[index].sta.station_flags = sta_flags;
done:
#define QOS_CONTROL_LEN 2
-#define IEEE80211_STYPE_BACK_REQ 0x0080
-#define IEEE80211_STYPE_BACK 0x0090
-
static inline int ieee80211_is_management(u16 fc)
{
return -ENODEV;
}
+ if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
+ IWL_ERROR("ucode not available for device bringup\n");
+ return -EIO;
+ }
+
/* If platform's RF_KILL switch is NOT set to KILL */
if (iwl3945_read32(priv, CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
}
}
- if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
- IWL_ERROR("ucode not available for device bringup\n");
- return -EIO;
- }
-
iwl3945_write32(priv, CSR_INT, 0xFFFFFFFF);
rc = iwl3945_hw_nic_init(priv);
return -ENODEV;
}
+ if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
+ IWL_ERROR("ucode not available for device bringup\n");
+ return -EIO;
+ }
+
/* If platform's RF_KILL switch is NOT set to KILL */
if (iwl4965_read32(priv, CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
}
}
- if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
- IWL_ERROR("ucode not available for device bringup\n");
- return -EIO;
- }
-
iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
rc = iwl4965_hw_nic_init(priv);
# Build the PCI Hotplug drivers if we were asked to
obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
+ifdef CONFIG_HOTPLUG_PCI
+obj-y += hotplug-pci.o
+endif
# Build the PCI MSI interrupt support
obj-$(CONFIG_PCI_MSI) += msi.o
--- /dev/null
+/* Core PCI functionality used only by PCI hotplug */
+
+#include <linux/pci.h>
+#include "pci.h"
+
+
+unsigned int pci_do_scan_bus(struct pci_bus *bus)
+{
+ unsigned int max;
+
+ max = pci_scan_child_bus(bus);
+
+ /*
+ * Make the discovered devices available.
+ */
+ pci_bus_add_devices(bus);
+
+ return max;
+}
+EXPORT_SYMBOL(pci_do_scan_bus);
#include <linux/topology.h>
#include <linux/mm.h>
#include <linux/capability.h>
-#include <linux/aspm.h>
#include "pci.h"
static int sysfs_initialized; /* = 0 */
if (pcibios_add_platform_entries(pdev))
goto err_rom_file;
- pcie_aspm_create_sysfs_dev_files(pdev);
-
return 0;
err_rom_file:
if (!sysfs_initialized)
return;
- pcie_aspm_remove_sysfs_dev_files(pdev);
-
if (pdev->cfg_size < 4096)
sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
else
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/log2.h>
-#include <linux/aspm.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
if (need_restore)
pci_restore_bars(dev);
- if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
-
return 0;
}
When in doubt, say N.
source "drivers/pci/pcie/aer/Kconfig"
-
-#
-# PCI Express ASPM
-#
-config PCIEASPM
- bool "PCI Express ASPM support(Experimental)"
- depends on PCI && EXPERIMENTAL
- default y
- help
- This enables PCI Express ASPM (Active State Power Management) and
- Clock Power Management. ASPM supports state L0/L0s/L1.
-
- When in doubt, say N.
-config PCIEASPM_DEBUG
- bool "Debug PCI Express ASPM"
- depends on PCIEASPM
- default n
- help
- This enables PCI Express ASPM debug support. It will add per-device
- interface to control ASPM.
# Makefile for PCI-Express PORT Driver
#
-# Build PCI Express ASPM if needed
-obj-$(CONFIG_PCIEASPM) += aspm.o
-
pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
+++ /dev/null
-/*
- * File: drivers/pci/pcie/aspm.c
- * Enabling PCIE link L0s/L1 state and Clock Power Management
- *
- * Copyright (C) 2007 Intel
- * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
- * Copyright (C) Shaohua Li (shaohua.li@intel.com)
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/pci_regs.h>
-#include <linux/errno.h>
-#include <linux/pm.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/aspm.h>
-#include <acpi/acpi_bus.h>
-#include <linux/pci-acpi.h>
-#include "../pci.h"
-
-#ifdef MODULE_PARAM_PREFIX
-#undef MODULE_PARAM_PREFIX
-#endif
-#define MODULE_PARAM_PREFIX "pcie_aspm."
-
-struct endpoint_state {
- unsigned int l0s_acceptable_latency;
- unsigned int l1_acceptable_latency;
-};
-
-struct pcie_link_state {
- struct list_head sibiling;
- struct pci_dev *pdev;
-
- /* ASPM state */
- unsigned int support_state;
- unsigned int enabled_state;
- unsigned int bios_aspm_state;
- /* upstream component */
- unsigned int l0s_upper_latency;
- unsigned int l1_upper_latency;
- /* downstream component */
- unsigned int l0s_down_latency;
- unsigned int l1_down_latency;
- /* Clock PM state*/
- unsigned int clk_pm_capable;
- unsigned int clk_pm_enabled;
- unsigned int bios_clk_state;
-
- /*
- * A pcie downstream port only has one slot under it, so at most there
- * are 8 functions
- */
- struct endpoint_state endpoints[8];
-};
-
-static int aspm_disabled;
-static DEFINE_MUTEX(aspm_lock);
-static LIST_HEAD(link_list);
-
-#define POLICY_DEFAULT 0 /* BIOS default setting */
-#define POLICY_PERFORMANCE 1 /* high performance */
-#define POLICY_POWERSAVE 2 /* high power saving */
-static int aspm_policy;
-static const char *policy_str[] = {
- [POLICY_DEFAULT] = "default",
- [POLICY_PERFORMANCE] = "performance",
- [POLICY_POWERSAVE] = "powersave"
-};
-
-static int policy_to_aspm_state(struct pci_dev *pdev)
-{
- struct pcie_link_state *link_state = pdev->link_state;
-
- switch (aspm_policy) {
- case POLICY_PERFORMANCE:
- /* Disable ASPM and Clock PM */
- return 0;
- case POLICY_POWERSAVE:
- /* Enable ASPM L0s/L1 */
- return PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
- case POLICY_DEFAULT:
- return link_state->bios_aspm_state;
- }
- return 0;
-}
-
-static int policy_to_clkpm_state(struct pci_dev *pdev)
-{
- struct pcie_link_state *link_state = pdev->link_state;
-
- switch (aspm_policy) {
- case POLICY_PERFORMANCE:
- /* Disable ASPM and Clock PM */
- return 0;
- case POLICY_POWERSAVE:
- /* Disable Clock PM */
- return 1;
- case POLICY_DEFAULT:
- return link_state->bios_clk_state;
- }
- return 0;
-}
-
-static void pcie_set_clock_pm(struct pci_dev *pdev, int enable)
-{
- struct pci_dev *child_dev;
- int pos;
- u16 reg16;
- struct pcie_link_state *link_state = pdev->link_state;
-
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- if (!pos)
- return;
- pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, ®16);
- if (enable)
- reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN;
- else
- reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
- pci_write_config_word(child_dev, pos + PCI_EXP_LNKCTL, reg16);
- }
- link_state->clk_pm_enabled = !!enable;
-}
-
-static void pcie_check_clock_pm(struct pci_dev *pdev)
-{
- int pos;
- u32 reg32;
- u16 reg16;
- int capable = 1, enabled = 1;
- struct pci_dev *child_dev;
- struct pcie_link_state *link_state = pdev->link_state;
-
- /* All functions should have the same cap and state, take the worst */
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- if (!pos)
- return;
- pci_read_config_dword(child_dev, pos + PCI_EXP_LNKCAP, ®32);
- if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
- capable = 0;
- enabled = 0;
- break;
- }
- pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, ®16);
- if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
- enabled = 0;
- }
- link_state->clk_pm_capable = capable;
- link_state->clk_pm_enabled = enabled;
- link_state->bios_clk_state = enabled;
- pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
-}
-
-/*
- * pcie_aspm_configure_common_clock: check if the 2 ends of a link
- * could use common clock. If they are, configure them to use the
- * common clock. That will reduce the ASPM state exit latency.
- */
-static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
-{
- int pos, child_pos;
- u16 reg16 = 0;
- struct pci_dev *child_dev;
- int same_clock = 1;
-
- /*
- * all functions of a slot should have the same Slot Clock
- * Configuration, so just check one function
- * */
- child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev,
- bus_list);
- BUG_ON(!child_dev->is_pcie);
-
- /* Check downstream component if bit Slot Clock Configuration is 1 */
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKSTA, ®16);
- if (!(reg16 & PCI_EXP_LNKSTA_SLC))
- same_clock = 0;
-
- /* Check upstream component if bit Slot Clock Configuration is 1 */
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, ®16);
- if (!(reg16 & PCI_EXP_LNKSTA_SLC))
- same_clock = 0;
-
- /* Configure downstream component, all functions */
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
- ®16);
- if (same_clock)
- reg16 |= PCI_EXP_LNKCTL_CCC;
- else
- reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
- reg16);
- }
-
- /* Configure upstream component */
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16);
- if (same_clock)
- reg16 |= PCI_EXP_LNKCTL_CCC;
- else
- reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
-
- /* retrain link */
- reg16 |= PCI_EXP_LNKCTL_RL;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
-
- /* Wait for link training end */
- while (1) {
- pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, ®16);
- if (!(reg16 & PCI_EXP_LNKSTA_LT))
- break;
- cpu_relax();
- }
-}
-
-/*
- * calc_L0S_latency: Convert L0s latency encoding to ns
- */
-static unsigned int calc_L0S_latency(unsigned int latency_encoding, int ac)
-{
- unsigned int ns = 64;
-
- if (latency_encoding == 0x7) {
- if (ac)
- ns = -1U;
- else
- ns = 5*1000; /* > 4us */
- } else
- ns *= (1 << latency_encoding);
- return ns;
-}
-
-/*
- * calc_L1_latency: Convert L1 latency encoding to ns
- */
-static unsigned int calc_L1_latency(unsigned int latency_encoding, int ac)
-{
- unsigned int ns = 1000;
-
- if (latency_encoding == 0x7) {
- if (ac)
- ns = -1U;
- else
- ns = 65*1000; /* > 64us */
- } else
- ns *= (1 << latency_encoding);
- return ns;
-}
-
-static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state,
- unsigned int *l0s, unsigned int *l1, unsigned int *enabled)
-{
- int pos;
- u16 reg16;
- u32 reg32;
- unsigned int latency;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, ®32);
- *state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
- if (*state != PCIE_LINK_STATE_L0S &&
- *state != (PCIE_LINK_STATE_L1|PCIE_LINK_STATE_L0S))
- * state = 0;
- if (*state == 0)
- return;
-
- latency = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
- *l0s = calc_L0S_latency(latency, 0);
- if (*state & PCIE_LINK_STATE_L1) {
- latency = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
- *l1 = calc_L1_latency(latency, 0);
- }
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16);
- *enabled = reg16 & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1);
-}
-
-static void pcie_aspm_cap_init(struct pci_dev *pdev)
-{
- struct pci_dev *child_dev;
- u32 state, tmp;
- struct pcie_link_state *link_state = pdev->link_state;
-
- /* upstream component states */
- pcie_aspm_get_cap_device(pdev, &link_state->support_state,
- &link_state->l0s_upper_latency,
- &link_state->l1_upper_latency,
- &link_state->enabled_state);
- /* downstream component states, all functions have the same setting */
- child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev,
- bus_list);
- pcie_aspm_get_cap_device(child_dev, &state,
- &link_state->l0s_down_latency,
- &link_state->l1_down_latency,
- &tmp);
- link_state->support_state &= state;
- if (!link_state->support_state)
- return;
- link_state->enabled_state &= link_state->support_state;
- link_state->bios_aspm_state = link_state->enabled_state;
-
- /* ENDPOINT states*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- int pos;
- u32 reg32;
- unsigned int latency;
- struct endpoint_state *ep_state =
- &link_state->endpoints[PCI_FUNC(child_dev->devfn)];
-
- if (child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child_dev->pcie_type != PCI_EXP_TYPE_LEG_END)
- continue;
-
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_dword(child_dev, pos + PCI_EXP_DEVCAP, ®32);
- latency = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
- latency = calc_L0S_latency(latency, 1);
- ep_state->l0s_acceptable_latency = latency;
- if (link_state->support_state & PCIE_LINK_STATE_L1) {
- latency = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
- latency = calc_L1_latency(latency, 1);
- ep_state->l1_acceptable_latency = latency;
- }
- }
-}
-
-static unsigned int __pcie_aspm_check_state_one(struct pci_dev *pdev,
- unsigned int state)
-{
- struct pci_dev *parent_dev, *tmp_dev;
- unsigned int latency, l1_latency = 0;
- struct pcie_link_state *link_state;
- struct endpoint_state *ep_state;
-
- parent_dev = pdev->bus->self;
- link_state = parent_dev->link_state;
- state &= link_state->support_state;
- if (state == 0)
- return 0;
- ep_state = &link_state->endpoints[PCI_FUNC(pdev->devfn)];
-
- /*
- * Check latency for endpoint device.
- * TBD: The latency from the endpoint to root complex vary per
- * switch's upstream link state above the device. Here we just do a
- * simple check which assumes all links above the device can be in L1
- * state, that is we just consider the worst case. If switch's upstream
- * link can't be put into L0S/L1, then our check is too strictly.
- */
- tmp_dev = pdev;
- while (state & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
- parent_dev = tmp_dev->bus->self;
- link_state = parent_dev->link_state;
- if (state & PCIE_LINK_STATE_L0S) {
- latency = max_t(unsigned int,
- link_state->l0s_upper_latency,
- link_state->l0s_down_latency);
- if (latency > ep_state->l0s_acceptable_latency)
- state &= ~PCIE_LINK_STATE_L0S;
- }
- if (state & PCIE_LINK_STATE_L1) {
- latency = max_t(unsigned int,
- link_state->l1_upper_latency,
- link_state->l1_down_latency);
- if (latency + l1_latency >
- ep_state->l1_acceptable_latency)
- state &= ~PCIE_LINK_STATE_L1;
- }
- if (!parent_dev->bus->self) /* parent_dev is a root port */
- break;
- else {
- /*
- * parent_dev is the downstream port of a switch, make
- * tmp_dev the upstream port of the switch
- */
- tmp_dev = parent_dev->bus->self;
- /*
- * every switch on the path to root complex need 1 more
- * microsecond for L1. Spec doesn't mention L0S.
- */
- if (state & PCIE_LINK_STATE_L1)
- l1_latency += 1000;
- }
- }
- return state;
-}
-
-static unsigned int pcie_aspm_check_state(struct pci_dev *pdev,
- unsigned int state)
-{
- struct pci_dev *child_dev;
-
- /* If no child, disable the link */
- if (list_empty(&pdev->subordinate->devices))
- return 0;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
- /*
- * If downstream component of a link is pci bridge, we
- * disable ASPM for now for the link
- * */
- state = 0;
- break;
- }
- if ((child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child_dev->pcie_type != PCI_EXP_TYPE_LEG_END))
- continue;
- /* Device not in D0 doesn't need check latency */
- if (child_dev->current_state == PCI_D1 ||
- child_dev->current_state == PCI_D2 ||
- child_dev->current_state == PCI_D3hot ||
- child_dev->current_state == PCI_D3cold)
- continue;
- state = __pcie_aspm_check_state_one(child_dev, state);
- }
- return state;
-}
-
-static void __pcie_aspm_config_one_dev(struct pci_dev *pdev, unsigned int state)
-{
- u16 reg16;
- int pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16);
- reg16 &= ~0x3;
- reg16 |= state;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
-}
-
-static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state)
-{
- struct pci_dev *child_dev;
- int valid = 1;
- struct pcie_link_state *link_state = pdev->link_state;
-
- /*
- * if the downstream component has pci bridge function, don't do ASPM
- * now
- */
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
- valid = 0;
- break;
- }
- }
- if (!valid)
- return;
-
- /*
- * spec 2.0 suggests all functions should be configured the same
- * setting for ASPM. Enabling ASPM L1 should be done in upstream
- * component first and then downstream, and vice versa for disabling
- * ASPM L1. Spec doesn't mention L0S.
- */
- if (state & PCIE_LINK_STATE_L1)
- __pcie_aspm_config_one_dev(pdev, state);
-
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list)
- __pcie_aspm_config_one_dev(child_dev, state);
-
- if (!(state & PCIE_LINK_STATE_L1))
- __pcie_aspm_config_one_dev(pdev, state);
-
- link_state->enabled_state = state;
-}
-
-static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
- unsigned int state)
-{
- struct pcie_link_state *link_state = pdev->link_state;
-
- if (link_state->support_state == 0)
- return;
- state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
-
- /* state 0 means disabling aspm */
- state = pcie_aspm_check_state(pdev, state);
- if (link_state->enabled_state == state)
- return;
- __pcie_aspm_config_link(pdev, state);
-}
-
-/*
- * pcie_aspm_configure_link_state: enable/disable PCI express link state
- * @pdev: the root port or switch downstream port
- */
-static void pcie_aspm_configure_link_state(struct pci_dev *pdev,
- unsigned int state)
-{
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
- __pcie_aspm_configure_link_state(pdev, state);
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
-}
-
-static void free_link_state(struct pci_dev *pdev)
-{
- kfree(pdev->link_state);
- pdev->link_state = NULL;
-}
-
-/*
- * pcie_aspm_init_link_state: Initiate PCI express link state.
- * It is called after the pcie and its children devices are scaned.
- * @pdev: the root port or switch downstream port
- */
-void pcie_aspm_init_link_state(struct pci_dev *pdev)
-{
- unsigned int state;
- struct pcie_link_state *link_state;
- int error = 0;
-
- if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
- return;
- if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
- return;
- down_read(&pci_bus_sem);
- if (list_empty(&pdev->subordinate->devices))
- goto out;
-
- mutex_lock(&aspm_lock);
-
- link_state = kzalloc(sizeof(*link_state), GFP_KERNEL);
- if (!link_state)
- goto unlock_out;
- pdev->link_state = link_state;
-
- pcie_aspm_configure_common_clock(pdev);
-
- pcie_aspm_cap_init(pdev);
-
- /* config link state to avoid BIOS error */
- state = pcie_aspm_check_state(pdev, policy_to_aspm_state(pdev));
- __pcie_aspm_config_link(pdev, state);
-
- pcie_check_clock_pm(pdev);
-
- link_state->pdev = pdev;
- list_add(&link_state->sibiling, &link_list);
-
-unlock_out:
- if (error)
- free_link_state(pdev);
- mutex_unlock(&aspm_lock);
-out:
- up_read(&pci_bus_sem);
-}
-
-/* @pdev: the endpoint device */
-void pcie_aspm_exit_link_state(struct pci_dev *pdev)
-{
- struct pci_dev *parent = pdev->bus->self;
- struct pcie_link_state *link_state = parent->link_state;
-
- if (aspm_disabled || !pdev->is_pcie || !parent || !link_state)
- return;
- if (parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
- return;
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
-
- /*
- * All PCIe functions are in one slot, remove one function will remove
- * the the whole slot, so just wait
- */
- if (!list_empty(&parent->subordinate->devices))
- goto out;
-
- /* All functions are removed, so just disable ASPM for the link */
- __pcie_aspm_config_one_dev(parent, 0);
- list_del(&link_state->sibiling);
- /* Clock PM is for endpoint device */
-
- free_link_state(parent);
-out:
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
-}
-
-/* @pdev: the root port or switch downstream port */
-void pcie_aspm_pm_state_change(struct pci_dev *pdev)
-{
- struct pcie_link_state *link_state = pdev->link_state;
-
- if (aspm_disabled || !pdev->is_pcie || !pdev->link_state)
- return;
- if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
- return;
- /*
- * devices changed PM state, we should recheck if latency meets all
- * functions' requirement
- */
- pcie_aspm_configure_link_state(pdev, link_state->enabled_state);
-}
-
-/*
- * pci_disable_link_state - disable pci device's link state, so the link will
- * never enter specific states
- */
-void pci_disable_link_state(struct pci_dev *pdev, int state)
-{
- struct pci_dev *parent = pdev->bus->self;
- struct pcie_link_state *link_state;
-
- if (aspm_disabled || !pdev->is_pcie)
- return;
- if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
- pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
- parent = pdev;
- if (!parent)
- return;
-
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
- link_state = parent->link_state;
- link_state->support_state &=
- ~(state & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1));
- if (state & PCIE_LINK_STATE_CLKPM)
- link_state->clk_pm_capable = 0;
-
- __pcie_aspm_configure_link_state(parent, link_state->enabled_state);
- if (!link_state->clk_pm_capable && link_state->clk_pm_enabled)
- pcie_set_clock_pm(parent, 0);
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
-}
-EXPORT_SYMBOL(pci_disable_link_state);
-
-static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
-{
- int i;
- struct pci_dev *pdev;
- struct pcie_link_state *link_state;
-
- for (i = 0; i < ARRAY_SIZE(policy_str); i++)
- if (!strncmp(val, policy_str[i], strlen(policy_str[i])))
- break;
- if (i >= ARRAY_SIZE(policy_str))
- return -EINVAL;
- if (i == aspm_policy)
- return 0;
-
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
- aspm_policy = i;
- list_for_each_entry(link_state, &link_list, sibiling) {
- pdev = link_state->pdev;
- __pcie_aspm_configure_link_state(pdev,
- policy_to_aspm_state(pdev));
- if (link_state->clk_pm_capable &&
- link_state->clk_pm_enabled != policy_to_clkpm_state(pdev))
- pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
-
- }
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
- return 0;
-}
-
-static int pcie_aspm_get_policy(char *buffer, struct kernel_param *kp)
-{
- int i, cnt = 0;
- for (i = 0; i < ARRAY_SIZE(policy_str); i++)
- if (i == aspm_policy)
- cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
- else
- cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
- return cnt;
-}
-
-module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
- NULL, 0644);
-
-#ifdef CONFIG_PCIEASPM_DEBUG
-static ssize_t link_state_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct pci_dev *pci_device = to_pci_dev(dev);
- struct pcie_link_state *link_state = pci_device->link_state;
-
- return sprintf(buf, "%d\n", link_state->enabled_state);
-}
-
-static ssize_t link_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t n)
-{
- struct pci_dev *pci_device = to_pci_dev(dev);
- int state;
-
- if (n < 1)
- return -EINVAL;
- state = buf[0]-'0';
- if (state >= 0 && state <= 3) {
- /* setup link aspm state */
- pcie_aspm_configure_link_state(pci_device, state);
- return n;
- }
-
- return -EINVAL;
-}
-
-static ssize_t clk_ctl_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct pci_dev *pci_device = to_pci_dev(dev);
- struct pcie_link_state *link_state = pci_device->link_state;
-
- return sprintf(buf, "%d\n", link_state->clk_pm_enabled);
-}
-
-static ssize_t clk_ctl_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t n)
-{
- struct pci_dev *pci_device = to_pci_dev(dev);
- int state;
-
- if (n < 1)
- return -EINVAL;
- state = buf[0]-'0';
-
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
- pcie_set_clock_pm(pci_device, !!state);
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
-
- return n;
-}
-
-static DEVICE_ATTR(link_state, 0644, link_state_show, link_state_store);
-static DEVICE_ATTR(clk_ctl, 0644, clk_ctl_show, clk_ctl_store);
-
-static char power_group[] = "power";
-void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
-{
- struct pcie_link_state *link_state = pdev->link_state;
-
- if (!pdev->is_pcie || (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
- return;
-
- if (link_state->support_state)
- sysfs_add_file_to_group(&pdev->dev.kobj,
- &dev_attr_link_state.attr, power_group);
- if (link_state->clk_pm_capable)
- sysfs_add_file_to_group(&pdev->dev.kobj,
- &dev_attr_clk_ctl.attr, power_group);
-}
-
-void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
-{
- struct pcie_link_state *link_state = pdev->link_state;
-
- if (!pdev->is_pcie || (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
- return;
-
- if (link_state->support_state)
- sysfs_remove_file_from_group(&pdev->dev.kobj,
- &dev_attr_link_state.attr, power_group);
- if (link_state->clk_pm_capable)
- sysfs_remove_file_from_group(&pdev->dev.kobj,
- &dev_attr_clk_ctl.attr, power_group);
-}
-#endif
-
-static int __init pcie_aspm_disable(char *str)
-{
- aspm_disabled = 1;
- return 1;
-}
-
-__setup("pcie_noaspm", pcie_aspm_disable);
-
-static int __init pcie_aspm_init(void)
-{
- if (aspm_disabled)
- return 0;
- pci_osc_support_set(OSC_ACTIVE_STATE_PWR_SUPPORT|
- OSC_CLOCK_PWR_CAPABILITY_SUPPORT);
- return 0;
-}
-
-fs_initcall(pcie_aspm_init);
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/cpumask.h>
-#include <linux/aspm.h>
#include "pci.h"
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
return child;
}
-struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
+struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
{
struct pci_bus *child;
up_write(&pci_bus_sem);
}
-struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
+struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
{
struct pci_dev *dev;
break;
}
}
-
- if (bus->self)
- pcie_aspm_init_link_state(bus->self);
-
return nr;
}
return max;
}
-unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus)
-{
- unsigned int max;
-
- max = pci_scan_child_bus(bus);
-
- /*
- * Make the discovered devices available.
- */
- pci_bus_add_devices(bus);
-
- return max;
-}
-
struct pci_bus * pci_create_bus(struct device *parent,
int bus, struct pci_ops *ops, void *sysdata)
{
#ifdef CONFIG_HOTPLUG
EXPORT_SYMBOL(pci_add_new_bus);
-EXPORT_SYMBOL(pci_do_scan_bus);
EXPORT_SYMBOL(pci_scan_slot);
EXPORT_SYMBOL(pci_scan_bridge);
EXPORT_SYMBOL_GPL(pci_scan_child_bus);
#include <linux/pci.h>
#include <linux/module.h>
-#include <linux/aspm.h>
#include "pci.h"
static void pci_free_resources(struct pci_dev *dev)
dev->global_list.next = dev->global_list.prev = NULL;
up_write(&pci_bus_sem);
}
-
- if (dev->bus->self)
- pcie_aspm_exit_link_state(dev);
}
static void pci_destroy_dev(struct pci_dev *dev)
}
}
-void pci_bus_size_bridges(struct pci_bus *bus)
+void __ref pci_bus_size_bridges(struct pci_bus *bus)
{
struct pci_dev *dev;
unsigned long mask, prefmask;
}
EXPORT_SYMBOL(pci_bus_size_bridges);
-void pci_bus_assign_resources(struct pci_bus *bus)
+void __ref pci_bus_assign_resources(struct pci_bus *bus)
{
struct pci_bus *b;
struct pci_dev *dev;
# Dependencies for generated files need to be listed explicitly
-$(obj)/aic7xxx_core.o: $(obj)/aic7xxx_seq.h
-$(obj)/aic7xxx_core.o: $(obj)/aic7xxx_reg.h
-$(obj)/aic79xx_core.o: $(obj)/aic79xx_seq.h
-$(obj)/aic79xx_core.o: $(obj)/aic79xx_reg.h
-
-$(addprefix $(obj)/,$(aic7xxx-y)): $(obj)/aic7xxx_seq.h
-$(addprefix $(obj)/,$(aic79xx-y)): $(obj)/aic79xx_seq.h
+$(addprefix $(src)/,$(aic7xxx-y:.o=.c)): $(obj)/aic7xxx_seq.h $(obj)/aic7xxx_reg.h
+$(addprefix $(src)/,$(aic79xx-y:.o=.c)): $(obj)/aic79xx_seq.h $(obj)/aic79xx_reg.h
aic7xxx-gen-$(CONFIG_AIC7XXX_BUILD_FIRMWARE) := $(obj)/aic7xxx_reg.h
aic7xxx-gen-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) += $(obj)/aic7xxx_reg_print.c
uint16_t iotag;
int bars = pci_select_bars(pdev, IORESOURCE_MEM);
- if (pci_enable_device_bars(pdev, bars))
+ if (pci_enable_device_mem(pdev))
goto out;
if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
goto out_disable_device;
#include <linux/syscalls.h>
#include <linux/time.h>
#include <linux/rcupdate.h>
+#include <linux/pid_namespace.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
fl->fl_fasync = NULL;
fl->fl_owner = NULL;
fl->fl_pid = 0;
+ fl->fl_nspid = NULL;
fl->fl_file = NULL;
fl->fl_flags = 0;
fl->fl_type = 0;
{
list_add(&fl->fl_link, &file_lock_list);
+ fl->fl_nspid = get_pid(task_tgid(current));
+
/* insert into file's list */
fl->fl_next = *pos;
*pos = fl;
if (fl->fl_ops && fl->fl_ops->fl_remove)
fl->fl_ops->fl_remove(fl);
+ if (fl->fl_nspid) {
+ put_pid(fl->fl_nspid);
+ fl->fl_nspid = NULL;
+ }
+
locks_wake_up_blocks(fl);
locks_free_lock(fl);
}
return (locks_conflict(caller_fl, sys_fl));
}
-static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout)
-{
- int result = 0;
- DECLARE_WAITQUEUE(wait, current);
-
- __set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(fl_wait, &wait);
- if (timeout == 0)
- schedule();
- else
- result = schedule_timeout(timeout);
- if (signal_pending(current))
- result = -ERESTARTSYS;
- remove_wait_queue(fl_wait, &wait);
- __set_current_state(TASK_RUNNING);
- return result;
-}
-
-static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time)
-{
- int result;
- locks_insert_block(blocker, waiter);
- result = interruptible_sleep_on_locked(&waiter->fl_wait, time);
- __locks_delete_block(waiter);
- return result;
-}
-
void
posix_test_lock(struct file *filp, struct file_lock *fl)
{
if (posix_locks_conflict(fl, cfl))
break;
}
- if (cfl)
+ if (cfl) {
__locks_copy_lock(fl, cfl);
- else
+ if (cfl->fl_nspid)
+ fl->fl_pid = pid_nr_ns(cfl->fl_nspid,
+ task_active_pid_ns(current));
+ } else
fl->fl_type = F_UNLCK;
unlock_kernel();
return;
}
-
EXPORT_SYMBOL(posix_test_lock);
-/* This function tests for deadlock condition before putting a process to
- * sleep. The detection scheme is no longer recursive. Recursive was neat,
- * but dangerous - we risked stack corruption if the lock data was bad, or
- * if the recursion was too deep for any other reason.
+/*
+ * Deadlock detection:
+ *
+ * We attempt to detect deadlocks that are due purely to posix file
+ * locks.
*
- * We rely on the fact that a task can only be on one lock's wait queue
- * at a time. When we find blocked_task on a wait queue we can re-search
- * with blocked_task equal to that queue's owner, until either blocked_task
- * isn't found, or blocked_task is found on a queue owned by my_task.
+ * We assume that a task can be waiting for at most one lock at a time.
+ * So for any acquired lock, the process holding that lock may be
+ * waiting on at most one other lock. That lock in turns may be held by
+ * someone waiting for at most one other lock. Given a requested lock
+ * caller_fl which is about to wait for a conflicting lock block_fl, we
+ * follow this chain of waiters to ensure we are not about to create a
+ * cycle.
*
- * Note: the above assumption may not be true when handling lock requests
- * from a broken NFS client. But broken NFS clients have a lot more to
- * worry about than proper deadlock detection anyway... --okir
+ * Since we do this before we ever put a process to sleep on a lock, we
+ * are ensured that there is never a cycle; that is what guarantees that
+ * the while() loop in posix_locks_deadlock() eventually completes.
*
- * However, the failure of this assumption (also possible in the case of
- * multiple tasks sharing the same open file table) also means there's no
- * guarantee that the loop below will terminate. As a hack, we give up
- * after a few iterations.
+ * Note: the above assumption may not be true when handling lock
+ * requests from a broken NFS client. It may also fail in the presence
+ * of tasks (such as posix threads) sharing the same open file table.
+ *
+ * To handle those cases, we just bail out after a few iterations.
*/
#define MAX_DEADLK_ITERATIONS 10
+/* Find a lock that the owner of the given block_fl is blocking on. */
+static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
+{
+ struct file_lock *fl;
+
+ list_for_each_entry(fl, &blocked_list, fl_link) {
+ if (posix_same_owner(fl, block_fl))
+ return fl->fl_next;
+ }
+ return NULL;
+}
+
static int posix_locks_deadlock(struct file_lock *caller_fl,
struct file_lock *block_fl)
{
- struct file_lock *fl;
int i = 0;
-next_task:
- if (posix_same_owner(caller_fl, block_fl))
- return 1;
- list_for_each_entry(fl, &blocked_list, fl_link) {
- if (posix_same_owner(fl, block_fl)) {
- if (i++ > MAX_DEADLK_ITERATIONS)
- return 0;
- fl = fl->fl_next;
- block_fl = fl;
- goto next_task;
- }
+ while ((block_fl = what_owner_is_waiting_for(block_fl))) {
+ if (i++ > MAX_DEADLK_ITERATIONS)
+ return 0;
+ if (posix_same_owner(caller_fl, block_fl))
+ return 1;
}
return 0;
}
if (break_time == 0)
break_time++;
}
- error = locks_block_on_timeout(flock, new_fl, break_time);
+ locks_insert_block(flock, new_fl);
+ error = wait_event_interruptible_timeout(new_fl->fl_wait,
+ !new_fl->fl_next, break_time);
+ __locks_delete_block(new_fl);
if (error >= 0) {
if (error == 0)
time_out_leases(inode);
int id, char *pfx)
{
struct inode *inode = NULL;
+ unsigned int fl_pid;
+
+ if (fl->fl_nspid)
+ fl_pid = pid_nr_ns(fl->fl_nspid, task_active_pid_ns(current));
+ else
+ fl_pid = fl->fl_pid;
if (fl->fl_file != NULL)
inode = fl->fl_file->f_path.dentry->d_inode;
}
if (inode) {
#ifdef WE_CAN_BREAK_LSLK_NOW
- seq_printf(f, "%d %s:%ld ", fl->fl_pid,
+ seq_printf(f, "%d %s:%ld ", fl_pid,
inode->i_sb->s_id, inode->i_ino);
#else
/* userspace relies on this representation of dev_t ;-( */
- seq_printf(f, "%d %02x:%02x:%ld ", fl->fl_pid,
+ seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
MAJOR(inode->i_sb->s_dev),
MINOR(inode->i_sb->s_dev), inode->i_ino);
#endif
} else {
- seq_printf(f, "%d <none>:0 ", fl->fl_pid);
+ seq_printf(f, "%d <none>:0 ", fl_pid);
}
if (IS_POSIX(fl)) {
if (fl->fl_end == OFFSET_MAX)
pos = vmf->pgoff << PAGE_SHIFT;
count = PAGE_SIZE;
- if ((unsigned long)vmf->virtual_address + PAGE_SIZE > area->vm_end) {
- WARN_ON(1); /* shouldn't happen? */
- count = area->vm_end - (unsigned long)vmf->virtual_address;
- }
/* what we can read in one go */
bufsize = NCP_SERVER(inode)->buffer_size;
+++ /dev/null
-/*
- * aspm.h
- *
- * PCI Express ASPM defines and function prototypes
- *
- * Copyright (C) 2007 Intel Corp.
- * Zhang Yanmin (yanmin.zhang@intel.com)
- * Shaohua Li (shaohua.li@intel.com)
- *
- * For more information, please consult the following manuals (look at
- * http://www.pcisig.com/ for how to get them):
- *
- * PCI Express Specification
- */
-
-#ifndef LINUX_ASPM_H
-#define LINUX_ASPM_H
-
-#include <linux/pci.h>
-
-#define PCIE_LINK_STATE_L0S 1
-#define PCIE_LINK_STATE_L1 2
-#define PCIE_LINK_STATE_CLKPM 4
-
-#ifdef CONFIG_PCIEASPM
-extern void pcie_aspm_init_link_state(struct pci_dev *pdev);
-extern void pcie_aspm_exit_link_state(struct pci_dev *pdev);
-extern void pcie_aspm_pm_state_change(struct pci_dev *pdev);
-extern void pci_disable_link_state(struct pci_dev *pdev, int state);
-#else
-#define pcie_aspm_init_link_state(pdev) do {} while (0)
-#define pcie_aspm_exit_link_state(pdev) do {} while (0)
-#define pcie_aspm_pm_state_change(pdev) do {} while (0)
-#define pci_disable_link_state(pdev, state) do {} while (0)
-#endif
-
-#ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */
-extern void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev);
-extern void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev);
-#else
-#define pcie_aspm_create_sysfs_dev_files(pdev) do {} while (0)
-#define pcie_aspm_remove_sysfs_dev_files(pdev) do {} while (0)
-#endif
-#endif /* LINUX_ASPM_H */
__u64 dccps_gsr;
__u64 dccps_gar;
__be32 dccps_service;
+ __u32 dccps_mss_cache;
struct dccp_service_list *dccps_service_list;
__u32 dccps_timestamp_echo;
__u32 dccps_timestamp_time;
__u16 dccps_pcslen;
__u16 dccps_pcrlen;
unsigned long dccps_ndp_count;
- __u32 dccps_mss_cache;
unsigned long dccps_rate_last;
struct dccp_minisock dccps_minisock;
struct dccp_ackvec *dccps_hc_rx_ackvec;
extern void device_destroy(struct class *cls, dev_t devt);
#ifdef CONFIG_PM_SLEEP
extern void destroy_suspended_device(struct class *cls, dev_t devt);
+extern void device_pm_schedule_removal(struct device *);
#else /* !CONFIG_PM_SLEEP */
static inline void destroy_suspended_device(struct class *cls, dev_t devt)
{
device_destroy(cls, devt);
}
+
+static inline void device_pm_schedule_removal(struct device *dev)
+{
+ device_unregister(dev);
+}
#endif /* !CONFIG_PM_SLEEP */
/*
struct list_head fl_block; /* circular list of blocked processes */
fl_owner_t fl_owner;
unsigned int fl_pid;
+ struct pid *fl_nspid;
wait_queue_head_t fl_wait;
struct file *fl_file;
unsigned char fl_flags;
#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004
#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010
+/* MIMO Power Save Modes */
+#define WLAN_HT_CAP_MIMO_PS_STATIC 0
+#define WLAN_HT_CAP_MIMO_PS_DYNAMIC 1
+#define WLAN_HT_CAP_MIMO_PS_INVALID 2
+#define WLAN_HT_CAP_MIMO_PS_DISABLED 3
+
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1
#define __REF .section ".ref.text", "ax"
#define __REFDATA .section ".ref.data", "aw"
#define __REFCONST .section ".ref.rodata", "aw"
-/* backward compatibility */
-#define __INIT_REFOK .section __REF
-#define __INITDATA_REFOK .section __REFDATA
#ifndef __ASSEMBLY__
/*
u32 data[0];
};
-struct pcie_link_state;
/*
* The pci_dev structure is used to describe PCI devices.
*/
this is D0-D3, D0 being fully functional,
and D3 being off. */
-#ifdef CONFIG_PCIEASPM
- struct pcie_link_state *link_state; /* ASPM link state. */
-#endif
-
pci_channel_state_t error_state; /* current connectivity state */
struct device dev; /* Generic device interface */
#define PCI_EXP_DEVSTA_AUXPD 0x10 /* AUX Power Detected */
#define PCI_EXP_DEVSTA_TRPND 0x20 /* Transactions Pending */
#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
-#define PCI_EXP_LNKCAP_ASPMS 0xc00 /* ASPM Support */
-#define PCI_EXP_LNKCAP_L0SEL 0x7000 /* L0s Exit Latency */
-#define PCI_EXP_LNKCAP_L1EL 0x38000 /* L1 Exit Latency */
-#define PCI_EXP_LNKCAP_CLKPM 0x40000 /* L1 Clock Power Management */
#define PCI_EXP_LNKCTL 16 /* Link Control */
-#define PCI_EXP_LNKCTL_RL 0x20 /* Retrain Link */
-#define PCI_EXP_LNKCTL_CCC 0x40 /* Common Clock COnfiguration */
#define PCI_EXP_LNKCTL_CLKREQ_EN 0x100 /* Enable clkreq */
#define PCI_EXP_LNKSTA 18 /* Link Status */
-#define PCI_EXP_LNKSTA_LT 0x800 /* Link Training */
-#define PCI_EXP_LNKSTA_SLC 0x1000 /* Slot Clock Configuration */
#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
#define PCI_EXP_SLTCTL 24 /* Slot Control */
#define PCI_EXP_SLTSTA 26 /* Slot Status */
/* A lock to ensure that only one thing can read/write
* the MDIO bus at a time */
- spinlock_t mdio_lock;
+ struct mutex mdio_lock;
struct device *dev;
/* Interrupt and Polling infrastructure */
struct work_struct phy_queue;
+ struct work_struct state_queue;
struct timer_list phy_timer;
atomic_t irq_disable;
- spinlock_t lock;
+ struct mutex lock;
struct net_device *attached_dev;
struct ip6_sf_list *mca_sources;
struct ip6_sf_list *mca_tomb;
unsigned int mca_sfmode;
+ unsigned char mca_crcount;
unsigned long mca_sfcount[2];
struct timer_list mca_timer;
unsigned mca_flags;
int mca_users;
atomic_t mca_refcnt;
spinlock_t mca_lock;
- unsigned char mca_crcount;
unsigned long mca_cstamp;
unsigned long mca_tstamp;
};
struct ifmcaddr6 *mc_list;
struct ifmcaddr6 *mc_tomb;
rwlock_t mc_lock;
- unsigned long mc_v1_seen;
- unsigned long mc_maxdelay;
unsigned char mc_qrv;
unsigned char mc_gq_running;
unsigned char mc_ifc_count;
+ unsigned long mc_v1_seen;
+ unsigned long mc_maxdelay;
struct timer_list mc_gq_timer; /* general query timer */
struct timer_list mc_ifc_timer; /* interface change timer */
return inet6_ehashfn(laddr, lport, faddr, fport);
}
-extern void __inet6_hash(struct inet_hashinfo *hashinfo, struct sock *sk);
+extern void __inet6_hash(struct sock *sk);
/*
* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
#undef INET_CSK_CLEAR_TIMERS
struct inet_bind_bucket;
-struct inet_hashinfo;
struct tcp_congestion_ops;
/*
int level, int optname,
char __user *optval, int __user *optlen);
void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
+ int (*bind_conflict)(const struct sock *sk,
+ const struct inet_bind_bucket *tb);
};
/** inet_connection_sock - INET connection oriented sock
const __be32 laddr);
extern int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb);
-extern int inet_csk_get_port(struct inet_hashinfo *hashinfo,
- struct sock *sk, unsigned short snum,
- int (*bind_conflict)(const struct sock *sk,
- const struct inet_bind_bucket *tb));
+extern int inet_csk_get_port(struct sock *sk, unsigned short snum);
extern struct dst_entry* inet_csk_route_req(struct sock *sk,
const struct request_sock *req);
}
/* Caller must disable local BH processing. */
-static inline void __inet_inherit_port(struct inet_hashinfo *table,
- struct sock *sk, struct sock *child)
+static inline void __inet_inherit_port(struct sock *sk, struct sock *child)
{
+ struct inet_hashinfo *table = sk->sk_prot->hashinfo;
const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size);
struct inet_bind_hashbucket *head = &table->bhash[bhash];
struct inet_bind_bucket *tb;
spin_unlock(&head->lock);
}
-static inline void inet_inherit_port(struct inet_hashinfo *table,
- struct sock *sk, struct sock *child)
+static inline void inet_inherit_port(struct sock *sk, struct sock *child)
{
local_bh_disable();
- __inet_inherit_port(table, sk, child);
+ __inet_inherit_port(sk, child);
local_bh_enable();
}
-extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk);
+extern void inet_put_port(struct sock *sk);
extern void inet_listen_wlock(struct inet_hashinfo *hashinfo);
wake_up(&hashinfo->lhash_wait);
}
-extern void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk);
-extern void __inet_hash_nolisten(struct inet_hashinfo *hinfo, struct sock *sk);
-
-static inline void inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
-{
- if (sk->sk_state != TCP_CLOSE) {
- local_bh_disable();
- __inet_hash(hashinfo, sk);
- local_bh_enable();
- }
-}
-
-static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk)
-{
- rwlock_t *lock;
-
- if (sk_unhashed(sk))
- goto out;
-
- if (sk->sk_state == TCP_LISTEN) {
- local_bh_disable();
- inet_listen_wlock(hashinfo);
- lock = &hashinfo->lhash_lock;
- } else {
- lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
- write_lock_bh(lock);
- }
-
- if (__sk_del_node_init(sk))
- sock_prot_inuse_add(sk->sk_prot, -1);
- write_unlock_bh(lock);
-out:
- if (sk->sk_state == TCP_LISTEN)
- wake_up(&hashinfo->lhash_wait);
-}
+extern void __inet_hash_nolisten(struct sock *sk);
+extern void inet_hash(struct sock *sk);
+extern void inet_unhash(struct sock *sk);
extern struct sock *__inet_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
struct sock *sk,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16, struct inet_timewait_sock **),
- void (*hash)(struct inet_hashinfo *, struct sock *));
+ void (*hash)(struct sock *sk));
extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk);
#endif /* _INET_HASHTABLES_H */
#define tw_hash __tw_common.skc_hash
#define tw_prot __tw_common.skc_prot
#define tw_net __tw_common.skc_net
+ int tw_timeout;
volatile unsigned char tw_substate;
/* 3 bits hole, try to pack */
unsigned char tw_rcv_wscale;
__u8 tw_ipv6only:1;
/* 15 bits hole, try to pack */
__u16 tw_ipv6_offset;
- int tw_timeout;
unsigned long tw_ttd;
struct inet_bind_bucket *tw_tb;
struct hlist_node tw_death_node;
struct request_sock_ops;
struct timewait_sock_ops;
+struct inet_hashinfo;
/* Networking protocol blocks we attach to sockets.
* socket layer -> transport layer interface
struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot;
+ struct inet_hashinfo *hashinfo;
+
struct module *owner;
char name[32];
endchoice
+config PROFILING
+ bool "Profiling support (EXPERIMENTAL)"
+ help
+ Say Y here to enable the extended profiling support mechanisms used
+ by profilers such as OProfile.
+
+config MARKERS
+ bool "Activate markers"
+ help
+ Place an empty function call at each marker site. Can be
+ dynamically changed for a probe function.
+
+source "arch/Kconfig"
+
endmenu # General setup
config SLABINFO
+++ /dev/null
-menuconfig INSTRUMENTATION
- bool "Instrumentation Support"
- default y
- ---help---
- Say Y here to get to see options related to performance measurement,
- system-wide debugging, and testing. This option alone does not add any
- kernel code.
-
- If you say N, all options in this submenu will be skipped and
- disabled. If you're trying to debug the kernel itself, go see the
- Kernel Hacking menu.
-
-if INSTRUMENTATION
-
-config PROFILING
- bool "Profiling support (EXPERIMENTAL)"
- help
- Say Y here to enable the extended profiling support mechanisms used
- by profilers such as OProfile.
-
-config OPROFILE
- tristate "OProfile system profiling (EXPERIMENTAL)"
- depends on PROFILING && !UML
- depends on ARCH_SUPPORTS_OPROFILE || ALPHA || ARM || BLACKFIN || IA64 || M32R || PARISC || PPC || S390 || SUPERH || SPARC
- help
- OProfile is a profiling system capable of profiling the
- whole system, include the kernel, kernel modules, libraries,
- and applications.
-
- If unsure, say N.
-
-config KPROBES
- bool "Kprobes"
- depends on KALLSYMS && MODULES && !UML
- depends on X86_32 || IA64 || PPC || S390 || SPARC64 || X86_64 || AVR32
- help
- Kprobes allows you to trap at almost any kernel address and
- execute a callback function. register_kprobe() establishes
- a probepoint and specifies the callback. Kprobes is useful
- for kernel debugging, non-intrusive instrumentation and testing.
- If in doubt, say "N".
-
-config MARKERS
- bool "Activate markers"
- help
- Place an empty function call at each marker site. Can be
- dynamically changed for a probe function.
-
-endif # INSTRUMENTATION
rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o rwsem.o latency.o nsproxy.o srcu.o \
- utsname.o notifier.o
+ utsname.o notifier.o ksysfs.o
obj-$(CONFIG_SYSCTL) += sysctl_check.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_SYSFS) += ksysfs.o
obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
obj-$(CONFIG_SECCOMP) += seccomp.o
return -EINVAL;
vma->vm_ops = &relay_file_mmap_ops;
+ vma->vm_flags |= VM_DONTEXPAND;
vma->vm_private_data = buf;
buf->chan->cb->buf_mapped(buf, filp);
config DEBUG_SECTION_MISMATCH
bool "Enable full Section mismatch analysis"
- default n
+ depends on UNDEFINED
help
The section mismatch analysis checks if there are illegal
references from one section to another section.
most likely result in an oops.
In the code functions and variables are annotated with
__init, __devinit etc. (see full list in include/linux/init.h)
- which result in the code/data being placed in specific sections.
- The section mismatch anaylsis are always done after a full
- kernel build but enabling this options will in addition
+ which results in the code/data being placed in specific sections.
+ The section mismatch analysis is always done after a full
+ kernel build but enabling this option will in addition
do the following:
- Add the option -fno-inline-functions-called-once to gcc
When inlining a function annotated __init in a non-init
- function we would loose the section information and thus
+ function we would lose the section information and thus
the analysis would not catch the illegal reference.
- This options tell gcc to inline less but will also
+ This option tells gcc to inline less but will also
result in a larger kernel.
- Run the section mismatch analysis for each module/built-in.o
When we run the section mismatch analysis on vmlinux.o we
- looses valueable information about where the mismatch was
+ lose valueble information about where the mismatch was
introduced.
Running the analysis for each module/built-in.o file
will tell where the mismatch happens much closer to the
* @name: the name for the kset
* @parent: the parent kobject of this kobject, if any.
*
- * This function creates a kset structure dynamically and registers it
+ * This function creates a kobject structure dynamically and registers it
* with sysfs. When you are finished with this structure, call
* kobject_put() and the structure will be dynamically freed when
* it is no longer being used.
vma->vm_start = addr;
vma->vm_end = addr + len;
- vma->vm_flags = vm_flags | mm->def_flags;
+ vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_ops = &special_mapping_vmops;
extern int dccp_connect(struct sock *sk);
extern int dccp_disconnect(struct sock *sk, int flags);
-extern void dccp_hash(struct sock *sk);
-extern void dccp_unhash(struct sock *sk);
extern int dccp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
extern int dccp_setsockopt(struct sock *sk, int level, int optname,
*/
static struct socket *dccp_v4_ctl_socket;
-static int dccp_v4_get_port(struct sock *sk, const unsigned short snum)
-{
- return inet_csk_get_port(&dccp_hashinfo, sk, snum,
- inet_csk_bind_conflict);
-}
-
int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
dccp_sync_mss(newsk, dst_mtu(dst));
- __inet_hash_nolisten(&dccp_hashinfo, newsk);
- __inet_inherit_port(&dccp_hashinfo, sk, newsk);
+ __inet_hash_nolisten(newsk);
+ __inet_inherit_port(sk, newsk);
return newsk;
.getsockopt = ip_getsockopt,
.addr2sockaddr = inet_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in),
+ .bind_conflict = inet_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ip_setsockopt,
.compat_getsockopt = compat_ip_getsockopt,
.sendmsg = dccp_sendmsg,
.recvmsg = dccp_recvmsg,
.backlog_rcv = dccp_v4_do_rcv,
- .hash = dccp_hash,
- .unhash = dccp_unhash,
+ .hash = inet_hash,
+ .unhash = inet_unhash,
.accept = inet_csk_accept,
- .get_port = dccp_v4_get_port,
+ .get_port = inet_csk_get_port,
.shutdown = dccp_shutdown,
.destroy = dccp_destroy_sock,
.orphan_count = &dccp_orphan_count,
.obj_size = sizeof(struct dccp_sock),
.rsk_prot = &dccp_request_sock_ops,
.twsk_prot = &dccp_timewait_sock_ops,
+ .hashinfo = &dccp_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_dccp_setsockopt,
.compat_getsockopt = compat_dccp_getsockopt,
static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
-static int dccp_v6_get_port(struct sock *sk, unsigned short snum)
-{
- return inet_csk_get_port(&dccp_hashinfo, sk, snum,
- inet6_csk_bind_conflict);
-}
-
static void dccp_v6_hash(struct sock *sk)
{
if (sk->sk_state != DCCP_CLOSED) {
if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
- dccp_hash(sk);
+ inet_hash(sk);
return;
}
local_bh_disable();
- __inet6_hash(&dccp_hashinfo, sk);
+ __inet6_hash(sk);
local_bh_enable();
}
}
newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
- __inet6_hash(&dccp_hashinfo, newsk);
- inet_inherit_port(&dccp_hashinfo, sk, newsk);
+ __inet6_hash(newsk);
+ inet_inherit_port(sk, newsk);
return newsk;
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
+ .bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
.recvmsg = dccp_recvmsg,
.backlog_rcv = dccp_v6_do_rcv,
.hash = dccp_v6_hash,
- .unhash = dccp_unhash,
+ .unhash = inet_unhash,
.accept = inet_csk_accept,
- .get_port = dccp_v6_get_port,
+ .get_port = inet_csk_get_port,
.shutdown = dccp_shutdown,
.destroy = dccp_v6_destroy_sock,
.orphan_count = &dccp_orphan_count,
.obj_size = sizeof(struct dccp6_sock),
.rsk_prot = &dccp6_request_sock_ops,
.twsk_prot = &dccp6_timewait_sock_ops,
+ .hashinfo = &dccp_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_dccp_setsockopt,
.compat_getsockopt = compat_dccp_getsockopt,
sk->sk_prot->unhash(sk);
if (inet_csk(sk)->icsk_bind_hash != NULL &&
!(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
- inet_put_port(&dccp_hashinfo, sk);
+ inet_put_port(sk);
/* fall through */
default:
if (oldstate == DCCP_OPEN)
EXPORT_SYMBOL_GPL(dccp_state_name);
-void dccp_hash(struct sock *sk)
-{
- inet_hash(&dccp_hashinfo, sk);
-}
-
-EXPORT_SYMBOL_GPL(dccp_hash);
-
-void dccp_unhash(struct sock *sk)
-{
- inet_unhash(&dccp_hashinfo, sk);
-}
-
-EXPORT_SYMBOL_GPL(dccp_unhash);
-
int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
{
struct dccp_sock *dp = dccp_sk(sk);
/* Clean up a referenced DCCP bind bucket. */
if (inet_csk(sk)->icsk_bind_hash != NULL)
- inet_put_port(&dccp_hashinfo, sk);
+ inet_put_port(sk);
kfree(dp->dccps_service_list);
dp->dccps_service_list = NULL;
/* Obtain a reference to a local port for the given sock,
* if snum is zero it means select any available local port.
*/
-int inet_csk_get_port(struct inet_hashinfo *hashinfo,
- struct sock *sk, unsigned short snum,
- int (*bind_conflict)(const struct sock *sk,
- const struct inet_bind_bucket *tb))
+int inet_csk_get_port(struct sock *sk, unsigned short snum)
{
+ struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo;
struct inet_bind_hashbucket *head;
struct hlist_node *node;
struct inet_bind_bucket *tb;
goto success;
} else {
ret = 1;
- if (bind_conflict(sk, tb))
+ if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb))
goto fail_unlock;
}
}
/*
* Get rid of any references to a local port held by the given sock.
*/
-static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk)
+static void __inet_put_port(struct sock *sk)
{
+ struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo;
const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size);
struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
struct inet_bind_bucket *tb;
spin_unlock(&head->lock);
}
-void inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk)
+void inet_put_port(struct sock *sk)
{
local_bh_disable();
- __inet_put_port(hashinfo, sk);
+ __inet_put_port(sk);
local_bh_enable();
}
inet->dport);
}
-void __inet_hash_nolisten(struct inet_hashinfo *hashinfo, struct sock *sk)
+void __inet_hash_nolisten(struct sock *sk)
{
+ struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo;
struct hlist_head *list;
rwlock_t *lock;
struct inet_ehash_bucket *head;
}
EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
-void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
+static void __inet_hash(struct sock *sk)
{
+ struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo;
struct hlist_head *list;
rwlock_t *lock;
if (sk->sk_state != TCP_LISTEN) {
- __inet_hash_nolisten(hashinfo, sk);
+ __inet_hash_nolisten(sk);
return;
}
write_unlock(lock);
wake_up(&hashinfo->lhash_wait);
}
-EXPORT_SYMBOL_GPL(__inet_hash);
+
+void inet_hash(struct sock *sk)
+{
+ if (sk->sk_state != TCP_CLOSE) {
+ local_bh_disable();
+ __inet_hash(sk);
+ local_bh_enable();
+ }
+}
+EXPORT_SYMBOL_GPL(inet_hash);
+
+void inet_unhash(struct sock *sk)
+{
+ rwlock_t *lock;
+ struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo;
+
+ if (sk_unhashed(sk))
+ goto out;
+
+ if (sk->sk_state == TCP_LISTEN) {
+ local_bh_disable();
+ inet_listen_wlock(hashinfo);
+ lock = &hashinfo->lhash_lock;
+ } else {
+ lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
+ write_lock_bh(lock);
+ }
+
+ if (__sk_del_node_init(sk))
+ sock_prot_inuse_add(sk->sk_prot, -1);
+ write_unlock_bh(lock);
+out:
+ if (sk->sk_state == TCP_LISTEN)
+ wake_up(&hashinfo->lhash_wait);
+}
+EXPORT_SYMBOL_GPL(inet_unhash);
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16, struct inet_timewait_sock **),
- void (*hash)(struct inet_hashinfo *, struct sock *))
+ void (*hash)(struct sock *sk))
{
struct inet_hashinfo *hinfo = death_row->hashinfo;
const unsigned short snum = inet_sk(sk)->num;
inet_bind_hash(sk, tb, port);
if (sk_unhashed(sk)) {
inet_sk(sk)->sport = htons(port);
- hash(hinfo, sk);
+ hash(sk);
}
spin_unlock(&head->lock);
tb = inet_csk(sk)->icsk_bind_hash;
spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
- hash(hinfo, sk);
+ hash(sk);
spin_unlock_bh(&head->lock);
return 0;
} else {
sk->sk_prot->unhash(sk);
if (inet_csk(sk)->icsk_bind_hash &&
!(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
- inet_put_port(&tcp_hashinfo, sk);
+ inet_put_port(sk);
/* fall through */
default:
if (oldstate==TCP_ESTABLISHED)
.lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
};
-static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
-{
- return inet_csk_get_port(&tcp_hashinfo, sk, snum,
- inet_csk_bind_conflict);
-}
-
-static void tcp_v4_hash(struct sock *sk)
-{
- inet_hash(&tcp_hashinfo, sk);
-}
-
-void tcp_unhash(struct sock *sk)
-{
- inet_unhash(&tcp_hashinfo, sk);
-}
-
static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
{
return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
}
#endif
- __inet_hash_nolisten(&tcp_hashinfo, newsk);
- __inet_inherit_port(&tcp_hashinfo, sk, newsk);
+ __inet_hash_nolisten(newsk);
+ __inet_inherit_port(sk, newsk);
return newsk;
.getsockopt = ip_getsockopt,
.addr2sockaddr = inet_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in),
+ .bind_conflict = inet_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ip_setsockopt,
.compat_getsockopt = compat_ip_getsockopt,
/* Clean up a referenced TCP bind bucket. */
if (inet_csk(sk)->icsk_bind_hash)
- inet_put_port(&tcp_hashinfo, sk);
+ inet_put_port(sk);
/*
* If sendmsg cached page exists, toss it.
.getsockopt = tcp_getsockopt,
.recvmsg = tcp_recvmsg,
.backlog_rcv = tcp_v4_do_rcv,
- .hash = tcp_v4_hash,
- .unhash = tcp_unhash,
- .get_port = tcp_v4_get_port,
+ .hash = inet_hash,
+ .unhash = inet_unhash,
+ .get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure,
.sockets_allocated = &tcp_sockets_allocated,
.orphan_count = &tcp_orphan_count,
.obj_size = sizeof(struct tcp_sock),
.twsk_prot = &tcp_timewait_sock_ops,
.rsk_prot = &tcp_request_sock_ops,
+ .hashinfo = &tcp_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt,
EXPORT_SYMBOL(ipv4_specific);
EXPORT_SYMBOL(tcp_hashinfo);
EXPORT_SYMBOL(tcp_prot);
-EXPORT_SYMBOL(tcp_unhash);
EXPORT_SYMBOL(tcp_v4_conn_request);
EXPORT_SYMBOL(tcp_v4_connect);
EXPORT_SYMBOL(tcp_v4_do_rcv);
#include <net/inet6_hashtables.h>
#include <net/ip.h>
-void __inet6_hash(struct inet_hashinfo *hashinfo,
- struct sock *sk)
+void __inet6_hash(struct sock *sk)
{
+ struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo;
struct hlist_head *list;
rwlock_t *lock;
static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
#endif
-static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
-{
- return inet_csk_get_port(&tcp_hashinfo, sk, snum,
- inet6_csk_bind_conflict);
-}
-
static void tcp_v6_hash(struct sock *sk)
{
if (sk->sk_state != TCP_CLOSE) {
return;
}
local_bh_disable();
- __inet6_hash(&tcp_hashinfo, sk);
+ __inet6_hash(sk);
local_bh_enable();
}
}
}
#endif
- __inet6_hash(&tcp_hashinfo, newsk);
- inet_inherit_port(&tcp_hashinfo, sk, newsk);
+ __inet6_hash(newsk);
+ inet_inherit_port(sk, newsk);
return newsk;
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
+ .bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
+ .bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
.recvmsg = tcp_recvmsg,
.backlog_rcv = tcp_v6_do_rcv,
.hash = tcp_v6_hash,
- .unhash = tcp_unhash,
- .get_port = tcp_v6_get_port,
+ .unhash = inet_unhash,
+ .get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure,
.sockets_allocated = &tcp_sockets_allocated,
.memory_allocated = &tcp_memory_allocated,
.obj_size = sizeof(struct tcp6_sock),
.twsk_prot = &tcp6_timewait_sock_ops,
.rsk_prot = &tcp6_request_sock_ops,
+ .hashinfo = &tcp_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt,
Say N unless you know you need this.
+config MAC80211_DEBUG_PACKET_ALIGNMENT
+ bool "Enable packet alignment debugging"
+ depends on MAC80211
+ help
+ This option is recommended for driver authors and strongly
+ discouraged for everybody else, it will trigger a warning
+ when a driver hands mac80211 a buffer that is aligned in
+ a way that will cause problems with the IP stack on some
+ architectures.
+
+ Say N unless you're writing a mac80211 based driver.
+
config MAC80211_DEBUG
bool "Enable debugging output"
depends on MAC80211
ret = rc80211_simple_init();
if (ret)
- goto fail;
+ goto out;
ret = rc80211_pid_init();
if (ret)
- goto fail_simple;
+ goto out_cleanup_simple;
ret = ieee80211_wme_register();
if (ret) {
printk(KERN_DEBUG "ieee80211_init: failed to "
"initialize WME (err=%d)\n", ret);
- goto fail_pid;
+ goto out_cleanup_pid;
}
ieee80211_debugfs_netdev_init();
return 0;
- fail_pid:
- rc80211_simple_exit();
- fail_simple:
+ out_cleanup_pid:
rc80211_pid_exit();
- fail:
+ out_cleanup_simple:
+ rc80211_simple_exit();
+ out:
return ret;
}
return ieee80211_rate_control_register(&mac80211_rcpid);
}
-void __exit rc80211_pid_exit(void)
+void rc80211_pid_exit(void)
{
ieee80211_rate_control_unregister(&mac80211_rcpid);
}
return ieee80211_rate_control_register(&mac80211_rcsimple);
}
-void __exit rc80211_simple_exit(void)
+void rc80211_simple_exit(void)
{
ieee80211_rate_control_unregister(&mac80211_rcsimple);
}
return load;
}
+#ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
static ieee80211_txrx_result
ieee80211_rx_h_verify_ip_alignment(struct ieee80211_txrx_data *rx)
{
int hdrlen;
+ if (!WLAN_FC_DATA_PRESENT(rx->fc))
+ return TXRX_CONTINUE;
+
/*
* Drivers are required to align the payload data in a way that
* guarantees that the contained IP header is aligned to a four-
return TXRX_CONTINUE;
}
+#endif
ieee80211_rx_handler ieee80211_rx_pre_handlers[] =
{
ieee80211_rx_h_parse_qos,
+#ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
ieee80211_rx_h_verify_ip_alignment,
+#endif
NULL
};
return;
if (!conf_read(dialog_input_result)) {
set_config_filename(dialog_input_result);
+ sym_set_change_count(1);
return;
}
show_textbox(NULL, _("File does not exist!"), 5, 38);
prop_warn(prop,
"config symbol '%s' uses select, but is "
"not boolean or tristate", sym->name);
- else if (sym2->type == S_UNKNOWN)
- prop_warn(prop,
- "'select' used by config symbol '%s' "
- "refers to undefined symbol '%s'",
- sym->name, sym2->name);
- else if (sym2->type != S_BOOLEAN && sym2->type != S_TRISTATE)
+ else if (sym2->type != S_UNKNOWN &&
+ sym2->type != S_BOOLEAN &&
+ sym2->type != S_TRISTATE)
prop_warn(prop,
"'%s' has wrong type. 'select' only "
"accept arguments of boolean and "
/* Do not export init/exit functions or data */
{
.fromsec = { "__ksymtab*", NULL },
- .tosec = { ALL_INIT_SECTIONS, ALL_EXIT_SECTIONS, NULL },
+ .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
.mismatch = EXPORT_TO_INIT_EXIT
}
};
to = to_is_func ? "function" : "variable";
to_p = to_is_func ? "()" : "";
+ sec_mismatch_count++;
+ if (!sec_mismatch_verbose)
+ return;
+
fprintf(stderr, "WARNING: %s(%s+0x%llx): Section mismatch in"
" reference from the %s %s%s to the %s %s:%s%s\n",
modname, fromsec, fromaddr, from, fromsym, from_p,
to, tosec, tosym, to_p);
- sec_mismatch_count++;
- if (!sec_mismatch_verbose)
- return;
-
switch (mismatch) {
case TEXT_TO_INIT:
fprintf(stderr,
write_dump(dump_write);
if (sec_mismatch_count && !sec_mismatch_verbose)
fprintf(stderr, "modpost: Found %d section mismatch(es).\n"
- "To see additional details select \"Enable full "
- "Section mismatch analysis\"\n"
- "in the Kernel Hacking menu "
- "(CONFIG_SECTION_MISMATCH).\n", sec_mismatch_count);
+ "To see full details build your kernel with:\n"
+ "'make CONFIG_DEBUG_SECTION_MISMATCH=y'\n",
+ sec_mismatch_count);
return err;
}
# All done with mercurial
exit
fi
+
+# Check for svn and a svn repo.
+if rev=`svn info 2>/dev/null | grep '^Revision'`; then
+ rev=`echo $rev | awk '{print $NF}'`
+ changes=`svn status 2>/dev/null | grep '^[AMD]' | wc -l`
+
+ # Are there uncommitted changes?
+ if [ $changes != 0 ]; then
+ printf -- '-svn%s%s%s' "$rev" -dirty "$changes"
+ else
+ printf -- '-svn%s' "$rev"
+ fi
+
+ # All done with svn
+ exit
+fi
{
struct via_info *card = vma->vm_private_data;
struct via_channel *chan = &card->ch_out;
+ unsigned long max_bufs;
struct page *dmapage;
unsigned long pgoff;
int rd, wr;
rd = card->ch_in.is_mapped;
wr = card->ch_out.is_mapped;
-#ifndef VIA_NDEBUG
- {
- unsigned long max_bufs = chan->frag_number;
- if (rd && wr) max_bufs *= 2;
- /* via_dsp_mmap() should ensure this */
- assert (pgoff < max_bufs);
- }
-#endif
+ max_bufs = chan->frag_number;
+ if (rd && wr)
+ max_bufs *= 2;
+ if (pgoff >= max_bufs)
+ return NOPAGE_SIGBUS;
/* if full-duplex (read+write) and we have two sets of bufs,
* then the playback buffers come first, sez soundcard.c */
us428->us428ctls_sharedmem->CtlSnapShotLast = -2;
}
area->vm_ops = &us428ctls_vm_ops;
- area->vm_flags |= VM_RESERVED;
+ area->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
area->vm_private_data = hw->private_data;
return 0;
}
return -ENODEV;
}
area->vm_ops = &snd_usX2Y_hwdep_pcm_vm_ops;
- area->vm_flags |= VM_RESERVED;
+ area->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
area->vm_private_data = hw->private_data;
return 0;
}