Bug
        
                          [В начало]
Ошибка # 9
Показать/спрятать трассу ошибок|            Error trace     
         {    95     struct kernel_symbol {   unsigned long value;   const char *name; } ;    33     struct module ;    19     typedef signed char __s8;    20     typedef unsigned char __u8;    22     typedef short __s16;    23     typedef unsigned short __u16;    25     typedef int __s32;    26     typedef unsigned int __u32;    30     typedef unsigned long long __u64;    15     typedef signed char s8;    16     typedef unsigned char u8;    18     typedef short s16;    19     typedef unsigned short u16;    21     typedef int s32;    22     typedef unsigned int u32;    24     typedef long long s64;    25     typedef unsigned long long u64;    14     typedef long __kernel_long_t;    15     typedef unsigned long __kernel_ulong_t;    27     typedef int __kernel_pid_t;    48     typedef unsigned int __kernel_uid32_t;    49     typedef unsigned int __kernel_gid32_t;    71     typedef __kernel_ulong_t __kernel_size_t;    72     typedef __kernel_long_t __kernel_ssize_t;    87     typedef long long __kernel_loff_t;    88     typedef __kernel_long_t __kernel_time_t;    89     typedef __kernel_long_t __kernel_clock_t;    90     typedef int __kernel_timer_t;    91     typedef int __kernel_clockid_t;    32     typedef __u16 __le16;    33     typedef __u16 __be16;    34     typedef __u32 __le32;    35     typedef __u32 __be32;    36     typedef __u64 __le64;    39     typedef __u16 __sum16;    40     typedef __u32 __wsum;    12     typedef __u32 __kernel_dev_t;    15     typedef __kernel_dev_t dev_t;    18     typedef unsigned short umode_t;    21     typedef __kernel_pid_t pid_t;    26     typedef __kernel_clockid_t clockid_t;    29     typedef _Bool bool;    31     typedef __kernel_uid32_t uid_t;    32     typedef __kernel_gid32_t gid_t;    45     typedef __kernel_loff_t loff_t;    54     typedef __kernel_size_t size_t;    59     typedef __kernel_ssize_t ssize_t;    69     typedef __kernel_time_t time_t;   102     typedef __s32 int32_t;   106     typedef __u8 uint8_t;   108     typedef __u32 uint32_t;   111     typedef __u64 uint64_t;   133     typedef unsigned long sector_t;   134     typedef unsigned long blkcnt_t;   147     typedef u64 dma_addr_t;   158     typedef unsigned int gfp_t;   159     typedef unsigned int fmode_t;   160     typedef unsigned int oom_flags_t;   163     typedef u64 phys_addr_t;   168     typedef phys_addr_t resource_size_t;   178     struct __anonstruct_atomic_t_6 {   int counter; } ;   178     typedef struct __anonstruct_atomic_t_6 atomic_t;   183     struct __anonstruct_atomic64_t_7 {   long counter; } ;   183     typedef struct __anonstruct_atomic64_t_7 atomic64_t;   184     struct list_head {   struct list_head *next;   struct list_head *prev; } ;   189     struct hlist_node ;   189     struct hlist_head {   struct hlist_node *first; } ;   193     struct hlist_node {   struct hlist_node *next;   struct hlist_node **pprev; } ;   204     struct callback_head {   struct callback_head *next;   void (*func)(struct callback_head *); } ;    65     struct pt_regs {   unsigned long r15;   unsigned long r14;   unsigned long r13;   unsigned long r12;   unsigned long bp;   unsigned long bx;   unsigned long r11;   unsigned long r10;   unsigned long r9;   unsigned long r8;   unsigned long ax;   unsigned long cx;   unsigned long dx;   unsigned long si;   unsigned long di;   unsigned long orig_ax;   unsigned long ip;   unsigned long cs;   unsigned long flags;   unsigned long sp;   unsigned long ss; } ;    59     struct __anonstruct_ldv_1016_9 {   unsigned int a;   unsigned int b; } ;    59     struct __anonstruct_ldv_1031_10 {   u16 limit0;   u16 base0;   unsigned char base1;   unsigned char type;   unsigned char s;   unsigned char dpl;   unsigned char p;   unsigned char limit;   unsigned char avl;   unsigned char l;   unsigned char d;   unsigned char g;   unsigned char base2; } ;    59     union __anonunion_ldv_1032_8 {   struct __anonstruct_ldv_1016_9 ldv_1016;   struct __anonstruct_ldv_1031_10 ldv_1031; } ;    59     struct desc_struct {   union __anonunion_ldv_1032_8 ldv_1032; } ;    12     typedef unsigned long pteval_t;    15     typedef unsigned long pgdval_t;    16     typedef unsigned long pgprotval_t;    18     struct __anonstruct_pte_t_11 {   pteval_t pte; } ;    18     typedef struct __anonstruct_pte_t_11 pte_t;    20     struct pgprot {   pgprotval_t pgprot; } ;   242     typedef struct pgprot pgprot_t;   244     struct __anonstruct_pgd_t_12 {   pgdval_t pgd; } ;   244     typedef struct __anonstruct_pgd_t_12 pgd_t;   332     struct page ;   332     typedef struct page *pgtable_t;   340     struct file ;   353     struct seq_file ;   390     struct thread_struct ;   392     struct mm_struct ;   393     struct task_struct ;   394     struct cpumask ;   395     struct paravirt_callee_save {   void *func; } ;   196     struct pv_irq_ops {   struct paravirt_callee_save save_fl;   struct paravirt_callee_save restore_fl;   struct paravirt_callee_save irq_disable;   struct paravirt_callee_save irq_enable;   void (*safe_halt)();   void (*halt)();   void (*adjust_exception_frame)(); } ;   327     struct arch_spinlock ;    18     typedef u16 __ticket_t;    19     typedef u32 __ticketpair_t;    20     struct __raw_tickets {   __ticket_t head;   __ticket_t tail; } ;    32     union __anonunion_ldv_1452_15 {   __ticketpair_t head_tail;   struct __raw_tickets tickets; } ;    32     struct arch_spinlock {   union __anonunion_ldv_1452_15 ldv_1452; } ;    33     typedef struct arch_spinlock arch_spinlock_t;    34     struct qrwlock {   atomic_t cnts;   arch_spinlock_t lock; } ;    14     typedef struct qrwlock arch_rwlock_t;   142     typedef void (*ctor_fn_t)();    48     struct device ;    54     struct net_device ;   400     struct file_operations ;   412     struct completion ;   416     struct pid ;   527     struct bug_entry {   int bug_addr_disp;   int file_disp;   unsigned short line;   unsigned short flags; } ;   102     struct timespec ;   127     struct kernel_vm86_regs {   struct pt_regs pt;   unsigned short es;   unsigned short __esh;   unsigned short ds;   unsigned short __dsh;   unsigned short fs;   unsigned short __fsh;   unsigned short gs;   unsigned short __gsh; } ;    79     union __anonunion_ldv_2961_20 {   struct pt_regs *regs;   struct kernel_vm86_regs *vm86; } ;    79     struct math_emu_info {   long ___orig_eip;   union __anonunion_ldv_2961_20 ldv_2961; } ;   306     struct cpumask {   unsigned long bits[128U]; } ;    14     typedef struct cpumask cpumask_t;   671     typedef struct cpumask *cpumask_var_t;   162     struct seq_operations ;   294     struct i387_fsave_struct {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u32 status; } ;   312     struct __anonstruct_ldv_5248_25 {   u64 rip;   u64 rdp; } ;   312     struct __anonstruct_ldv_5254_26 {   u32 fip;   u32 fcs;   u32 foo;   u32 fos; } ;   312     union __anonunion_ldv_5255_24 {   struct __anonstruct_ldv_5248_25 ldv_5248;   struct __anonstruct_ldv_5254_26 ldv_5254; } ;   312     union __anonunion_ldv_5264_27 {   u32 padding1[12U];   u32 sw_reserved[12U]; } ;   312     struct i387_fxsave_struct {   u16 cwd;   u16 swd;   u16 twd;   u16 fop;   union __anonunion_ldv_5255_24 ldv_5255;   u32 mxcsr;   u32 mxcsr_mask;   u32 st_space[32U];   u32 xmm_space[64U];   u32 padding[12U];   union __anonunion_ldv_5264_27 ldv_5264; } ;   346     struct i387_soft_struct {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u8 ftop;   u8 changed;   u8 lookahead;   u8 no_update;   u8 rm;   u8 alimit;   struct math_emu_info *info;   u32 entry_eip; } ;   367     struct ymmh_struct {   u32 ymmh_space[64U]; } ;   372     struct lwp_struct {   u8 reserved[128U]; } ;   377     struct bndregs_struct {   u64 bndregs[8U]; } ;   381     struct bndcsr_struct {   u64 cfg_reg_u;   u64 status_reg; } ;   386     struct xsave_hdr_struct {   u64 xstate_bv;   u64 reserved1[2U];   u64 reserved2[5U]; } ;   392     struct xsave_struct {   struct i387_fxsave_struct i387;   struct xsave_hdr_struct xsave_hdr;   struct ymmh_struct ymmh;   struct lwp_struct lwp;   struct bndregs_struct bndregs;   struct bndcsr_struct bndcsr; } ;   401     union thread_xstate {   struct i387_fsave_struct fsave;   struct i387_fxsave_struct fxsave;   struct i387_soft_struct soft;   struct xsave_struct xsave; } ;   409     struct fpu {   unsigned int last_cpu;   unsigned int has_fpu;   union thread_xstate *state; } ;   465     struct kmem_cache ;   466     struct perf_event ;   467     struct thread_struct {   struct desc_struct tls_array[3U];   unsigned long sp0;   unsigned long sp;   unsigned long usersp;   unsigned short es;   unsigned short ds;   unsigned short fsindex;   unsigned short gsindex;   unsigned long fs;   unsigned long gs;   struct perf_event *ptrace_bps[4U];   unsigned long debugreg6;   unsigned long ptrace_dr7;   unsigned long cr2;   unsigned long trap_nr;   unsigned long error_code;   struct fpu fpu;   unsigned long *io_bitmap_ptr;   unsigned long iopl;   unsigned int io_bitmap_max;   unsigned char fpu_counter; } ;    23     typedef atomic64_t atomic_long_t;    35     struct lockdep_map ;    55     struct stack_trace {   unsigned int nr_entries;   unsigned int max_entries;   unsigned long *entries;   int skip; } ;    26     struct lockdep_subclass_key {   char __one_byte; } ;    53     struct lock_class_key {   struct lockdep_subclass_key subkeys[8U]; } ;    59     struct lock_class {   struct list_head hash_entry;   struct list_head lock_entry;   struct lockdep_subclass_key *key;   unsigned int subclass;   unsigned int dep_gen_id;   unsigned long usage_mask;   struct stack_trace usage_traces[13U];   struct list_head locks_after;   struct list_head locks_before;   unsigned int version;   unsigned long ops;   const char *name;   int name_version;   unsigned long contention_point[4U];   unsigned long contending_point[4U]; } ;   144     struct lockdep_map {   struct lock_class_key *key;   struct lock_class *class_cache[2U];   const char *name;   int cpu;   unsigned long ip; } ;   205     struct held_lock {   u64 prev_chain_key;   unsigned long acquire_ip;   struct lockdep_map *instance;   struct lockdep_map *nest_lock;   u64 waittime_stamp;   u64 holdtime_stamp;   unsigned short class_idx;   unsigned char irq_context;   unsigned char trylock;   unsigned char read;   unsigned char check;   unsigned char hardirqs_off;   unsigned short references; } ;   530     struct raw_spinlock {   arch_spinlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    32     typedef struct raw_spinlock raw_spinlock_t;    33     struct __anonstruct_ldv_6305_31 {   u8 __padding[24U];   struct lockdep_map dep_map; } ;    33     union __anonunion_ldv_6306_30 {   struct raw_spinlock rlock;   struct __anonstruct_ldv_6305_31 ldv_6305; } ;    33     struct spinlock {   union __anonunion_ldv_6306_30 ldv_6306; } ;    76     typedef struct spinlock spinlock_t;    23     struct __anonstruct_rwlock_t_32 {   arch_rwlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    23     typedef struct __anonstruct_rwlock_t_32 rwlock_t;   135     struct seqcount {   unsigned int sequence;   struct lockdep_map dep_map; } ;    51     typedef struct seqcount seqcount_t;   259     struct __anonstruct_seqlock_t_33 {   struct seqcount seqcount;   spinlock_t lock; } ;   259     typedef struct __anonstruct_seqlock_t_33 seqlock_t;   433     struct timespec {   __kernel_time_t tv_sec;   long tv_nsec; } ;    83     struct user_namespace ;    22     struct __anonstruct_kuid_t_34 {   uid_t val; } ;    22     typedef struct __anonstruct_kuid_t_34 kuid_t;    27     struct __anonstruct_kgid_t_35 {   gid_t val; } ;    27     typedef struct __anonstruct_kgid_t_35 kgid_t;   127     struct kstat {   u64 ino;   dev_t dev;   umode_t mode;   unsigned int nlink;   kuid_t uid;   kgid_t gid;   dev_t rdev;   loff_t size;   struct timespec atime;   struct timespec mtime;   struct timespec ctime;   unsigned long blksize;   unsigned long long blocks; } ;    34     struct __wait_queue_head {   spinlock_t lock;   struct list_head task_list; } ;    39     typedef struct __wait_queue_head wait_queue_head_t;    98     struct __anonstruct_nodemask_t_36 {   unsigned long bits[16U]; } ;    98     typedef struct __anonstruct_nodemask_t_36 nodemask_t;   814     struct optimistic_spin_queue ;   815     struct mutex {   atomic_t count;   spinlock_t wait_lock;   struct list_head wait_list;   struct task_struct *owner;   const char *name;   void *magic;   struct lockdep_map dep_map; } ;    68     struct mutex_waiter {   struct list_head list;   struct task_struct *task;   void *magic; } ;   178     struct rw_semaphore ;   179     struct rw_semaphore {   long count;   raw_spinlock_t wait_lock;   struct list_head wait_list;   struct task_struct *owner;   struct optimistic_spin_queue *osq;   struct lockdep_map dep_map; } ;   174     struct completion {   unsigned int done;   wait_queue_head_t wait; } ;   105     struct llist_node ;    64     struct llist_node {   struct llist_node *next; } ;    72     struct resource {   resource_size_t start;   resource_size_t end;   const char *name;   unsigned long flags;   struct resource *parent;   struct resource *sibling;   struct resource *child; } ;   172     struct pci_dev ;   323     union ktime {   s64 tv64; } ;    59     typedef union ktime ktime_t;   412     struct tvec_base ;   413     struct timer_list {   struct list_head entry;   unsigned long expires;   struct tvec_base *base;   void (*function)(unsigned long);   unsigned long data;   int slack;   int start_pid;   void *start_site;   char start_comm[16U];   struct lockdep_map lockdep_map; } ;   254     struct hrtimer ;   255     enum hrtimer_restart ;   266     struct workqueue_struct ;   267     struct work_struct ;    53     struct work_struct {   atomic_long_t data;   struct list_head entry;   void (*func)(struct work_struct *);   struct lockdep_map lockdep_map; } ;   106     struct delayed_work {   struct work_struct work;   struct timer_list timer;   struct workqueue_struct *wq;   int cpu; } ;    58     struct pm_message {   int event; } ;    64     typedef struct pm_message pm_message_t;    65     struct dev_pm_ops {   int (*prepare)(struct device *);   void (*complete)(struct device *);   int (*suspend)(struct device *);   int (*resume)(struct device *);   int (*freeze)(struct device *);   int (*thaw)(struct device *);   int (*poweroff)(struct device *);   int (*restore)(struct device *);   int (*suspend_late)(struct device *);   int (*resume_early)(struct device *);   int (*freeze_late)(struct device *);   int (*thaw_early)(struct device *);   int (*poweroff_late)(struct device *);   int (*restore_early)(struct device *);   int (*suspend_noirq)(struct device *);   int (*resume_noirq)(struct device *);   int (*freeze_noirq)(struct device *);   int (*thaw_noirq)(struct device *);   int (*poweroff_noirq)(struct device *);   int (*restore_noirq)(struct device *);   int (*runtime_suspend)(struct device *);   int (*runtime_resume)(struct device *);   int (*runtime_idle)(struct device *); } ;   320     enum rpm_status {   RPM_ACTIVE = 0,   RPM_RESUMING = 1,   RPM_SUSPENDED = 2,   RPM_SUSPENDING = 3 } ;   327     enum rpm_request {   RPM_REQ_NONE = 0,   RPM_REQ_IDLE = 1,   RPM_REQ_SUSPEND = 2,   RPM_REQ_AUTOSUSPEND = 3,   RPM_REQ_RESUME = 4 } ;   335     struct wakeup_source ;   546     struct pm_subsys_data {   spinlock_t lock;   unsigned int refcount;   struct list_head clock_list; } ;   553     struct dev_pm_qos ;   553     struct dev_pm_info {   pm_message_t power_state;   unsigned char can_wakeup;   unsigned char async_suspend;   bool is_prepared;   bool is_suspended;   bool is_noirq_suspended;   bool is_late_suspended;   bool ignore_children;   bool early_init;   bool direct_complete;   spinlock_t lock;   struct list_head entry;   struct completion completion;   struct wakeup_source *wakeup;   bool wakeup_path;   bool syscore;   struct timer_list suspend_timer;   unsigned long timer_expires;   struct work_struct work;   wait_queue_head_t wait_queue;   atomic_t usage_count;   atomic_t child_count;   unsigned char disable_depth;   unsigned char idle_notification;   unsigned char request_pending;   unsigned char deferred_resume;   unsigned char run_wake;   unsigned char runtime_auto;   unsigned char no_callbacks;   unsigned char irq_safe;   unsigned char use_autosuspend;   unsigned char timer_autosuspends;   unsigned char memalloc_noio;   enum rpm_request request;   enum rpm_status runtime_status;   int runtime_error;   int autosuspend_delay;   unsigned long last_busy;   unsigned long active_jiffies;   unsigned long suspended_jiffies;   unsigned long accounting_timestamp;   struct pm_subsys_data *subsys_data;   void (*set_latency_tolerance)(struct device *, s32 );   struct dev_pm_qos *qos; } ;   614     struct dev_pm_domain {   struct dev_pm_ops ops; } ;   133     struct pci_bus ;    22     struct __anonstruct_mm_context_t_101 {   void *ldt;   int size;   unsigned short ia32_compat;   struct mutex lock;   void *vdso; } ;    22     typedef struct __anonstruct_mm_context_t_101 mm_context_t;    18     struct rb_node {   unsigned long __rb_parent_color;   struct rb_node *rb_right;   struct rb_node *rb_left; } ;    40     struct rb_root {   struct rb_node *rb_node; } ;    87     struct vm_area_struct ;    22     struct bio_vec ;   167     struct notifier_block ;    51     struct notifier_block {   int (*notifier_call)(struct notifier_block *, unsigned long, void *);   struct notifier_block *next;   int priority; } ;    63     struct blocking_notifier_head {   struct rw_semaphore rwsem;   struct notifier_block *head; } ;   906     struct ctl_table ;   835     struct nsproxy ;   836     struct ctl_table_root ;   837     struct ctl_table_header ;   838     struct ctl_dir ;    39     typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *);    59     struct ctl_table_poll {   atomic_t event;   wait_queue_head_t wait; } ;    98     struct ctl_table {   const char *procname;   void *data;   int maxlen;   umode_t mode;   struct ctl_table *child;   proc_handler *proc_handler;   struct ctl_table_poll *poll;   void *extra1;   void *extra2; } ;   119     struct ctl_node {   struct rb_node node;   struct ctl_table_header *header; } ;   124     struct __anonstruct_ldv_14188_129 {   struct ctl_table *ctl_table;   int used;   int count;   int nreg; } ;   124     union __anonunion_ldv_14190_128 {   struct __anonstruct_ldv_14188_129 ldv_14188;   struct callback_head rcu; } ;   124     struct ctl_table_set ;   124     struct ctl_table_header {   union __anonunion_ldv_14190_128 ldv_14190;   struct completion *unregistering;   struct ctl_table *ctl_table_arg;   struct ctl_table_root *root;   struct ctl_table_set *set;   struct ctl_dir *parent;   struct ctl_node *node; } ;   145     struct ctl_dir {   struct ctl_table_header header;   struct rb_root root; } ;   151     struct ctl_table_set {   int (*is_seen)(struct ctl_table_set *);   struct ctl_dir dir; } ;   156     struct ctl_table_root {   struct ctl_table_set default_set;   struct ctl_table_set * (*lookup)(struct ctl_table_root *, struct nsproxy *);   int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ;    37     struct cred ;    24     struct inode ;    58     struct arch_uprobe_task {   unsigned long saved_scratch_register;   unsigned int saved_trap_nr;   unsigned int saved_tf; } ;    66     enum uprobe_task_state {   UTASK_RUNNING = 0,   UTASK_SSTEP = 1,   UTASK_SSTEP_ACK = 2,   UTASK_SSTEP_TRAPPED = 3 } ;    73     struct __anonstruct_ldv_14434_136 {   struct arch_uprobe_task autask;   unsigned long vaddr; } ;    73     struct __anonstruct_ldv_14438_137 {   struct callback_head dup_xol_work;   unsigned long dup_xol_addr; } ;    73     union __anonunion_ldv_14439_135 {   struct __anonstruct_ldv_14434_136 ldv_14434;   struct __anonstruct_ldv_14438_137 ldv_14438; } ;    73     struct uprobe ;    73     struct return_instance ;    73     struct uprobe_task {   enum uprobe_task_state state;   union __anonunion_ldv_14439_135 ldv_14439;   struct uprobe *active_uprobe;   unsigned long xol_vaddr;   struct return_instance *return_instances;   unsigned int depth; } ;    94     struct xol_area ;    95     struct uprobes_state {   struct xol_area *xol_area; } ;   133     struct address_space ;   134     union __anonunion_ldv_14548_138 {   struct address_space *mapping;   void *s_mem; } ;   134     union __anonunion_ldv_14554_140 {   unsigned long index;   void *freelist;   bool pfmemalloc; } ;   134     struct __anonstruct_ldv_14564_144 {   unsigned short inuse;   unsigned short objects;   unsigned char frozen; } ;   134     union __anonunion_ldv_14566_143 {   atomic_t _mapcount;   struct __anonstruct_ldv_14564_144 ldv_14564;   int units; } ;   134     struct __anonstruct_ldv_14568_142 {   union __anonunion_ldv_14566_143 ldv_14566;   atomic_t _count; } ;   134     union __anonunion_ldv_14570_141 {   unsigned long counters;   struct __anonstruct_ldv_14568_142 ldv_14568;   unsigned int active; } ;   134     struct __anonstruct_ldv_14571_139 {   union __anonunion_ldv_14554_140 ldv_14554;   union __anonunion_ldv_14570_141 ldv_14570; } ;   134     struct __anonstruct_ldv_14578_146 {   struct page *next;   int pages;   int pobjects; } ;   134     struct slab ;   134     union __anonunion_ldv_14583_145 {   struct list_head lru;   struct __anonstruct_ldv_14578_146 ldv_14578;   struct slab *slab_page;   struct callback_head callback_head;   pgtable_t pmd_huge_pte; } ;   134     union __anonunion_ldv_14589_147 {   unsigned long private;   spinlock_t *ptl;   struct kmem_cache *slab_cache;   struct page *first_page; } ;   134     struct page {   unsigned long flags;   union __anonunion_ldv_14548_138 ldv_14548;   struct __anonstruct_ldv_14571_139 ldv_14571;   union __anonunion_ldv_14583_145 ldv_14583;   union __anonunion_ldv_14589_147 ldv_14589;   unsigned long debug_flags; } ;   187     struct page_frag {   struct page *page;   __u32 offset;   __u32 size; } ;   239     struct __anonstruct_linear_149 {   struct rb_node rb;   unsigned long rb_subtree_last; } ;   239     union __anonunion_shared_148 {   struct __anonstruct_linear_149 linear;   struct list_head nonlinear; } ;   239     struct anon_vma ;   239     struct vm_operations_struct ;   239     struct mempolicy ;   239     struct vm_area_struct {   unsigned long vm_start;   unsigned long vm_end;   struct vm_area_struct *vm_next;   struct vm_area_struct *vm_prev;   struct rb_node vm_rb;   unsigned long rb_subtree_gap;   struct mm_struct *vm_mm;   pgprot_t vm_page_prot;   unsigned long vm_flags;   union __anonunion_shared_148 shared;   struct list_head anon_vma_chain;   struct anon_vma *anon_vma;   const struct vm_operations_struct *vm_ops;   unsigned long vm_pgoff;   struct file *vm_file;   void *vm_private_data;   struct mempolicy *vm_policy; } ;   311     struct core_thread {   struct task_struct *task;   struct core_thread *next; } ;   317     struct core_state {   atomic_t nr_threads;   struct core_thread dumper;   struct completion startup; } ;   330     struct task_rss_stat {   int events;   int count[3U]; } ;   338     struct mm_rss_stat {   atomic_long_t count[3U]; } ;   343     struct kioctx_table ;   344     struct linux_binfmt ;   344     struct mmu_notifier_mm ;   344     struct mm_struct {   struct vm_area_struct *mmap;   struct rb_root mm_rb;   u32 vmacache_seqnum;   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   unsigned long mmap_base;   unsigned long mmap_legacy_base;   unsigned long task_size;   unsigned long highest_vm_end;   pgd_t *pgd;   atomic_t mm_users;   atomic_t mm_count;   atomic_long_t nr_ptes;   int map_count;   spinlock_t page_table_lock;   struct rw_semaphore mmap_sem;   struct list_head mmlist;   unsigned long hiwater_rss;   unsigned long hiwater_vm;   unsigned long total_vm;   unsigned long locked_vm;   unsigned long pinned_vm;   unsigned long shared_vm;   unsigned long exec_vm;   unsigned long stack_vm;   unsigned long def_flags;   unsigned long start_code;   unsigned long end_code;   unsigned long start_data;   unsigned long end_data;   unsigned long start_brk;   unsigned long brk;   unsigned long start_stack;   unsigned long arg_start;   unsigned long arg_end;   unsigned long env_start;   unsigned long env_end;   unsigned long saved_auxv[46U];   struct mm_rss_stat rss_stat;   struct linux_binfmt *binfmt;   cpumask_var_t cpu_vm_mask_var;   mm_context_t context;   unsigned long flags;   struct core_state *core_state;   spinlock_t ioctx_lock;   struct kioctx_table *ioctx_table;   struct task_struct *owner;   struct file *exe_file;   struct mmu_notifier_mm *mmu_notifier_mm;   struct cpumask cpumask_allocation;   unsigned long numa_next_scan;   unsigned long numa_scan_offset;   int numa_scan_seq;   bool tlb_flush_pending;   struct uprobes_state uprobes_state; } ;    15     typedef __u64 Elf64_Addr;    16     typedef __u16 Elf64_Half;    20     typedef __u32 Elf64_Word;    21     typedef __u64 Elf64_Xword;   190     struct elf64_sym {   Elf64_Word st_name;   unsigned char st_info;   unsigned char st_other;   Elf64_Half st_shndx;   Elf64_Addr st_value;   Elf64_Xword st_size; } ;   198     typedef struct elf64_sym Elf64_Sym;    48     union __anonunion_ldv_14952_153 {   unsigned long bitmap[4U];   struct callback_head callback_head; } ;    48     struct idr_layer {   int prefix;   int layer;   struct idr_layer *ary[256U];   int count;   union __anonunion_ldv_14952_153 ldv_14952; } ;    41     struct idr {   struct idr_layer *hint;   struct idr_layer *top;   int layers;   int cur;   spinlock_t lock;   int id_free_cnt;   struct idr_layer *id_free; } ;   124     struct ida_bitmap {   long nr_busy;   unsigned long bitmap[15U]; } ;   153     struct ida {   struct idr idr;   struct ida_bitmap *free_bitmap; } ;   185     struct dentry ;   186     struct iattr ;   187     struct super_block ;   188     struct file_system_type ;   189     struct kernfs_open_node ;   190     struct kernfs_iattrs ;   213     struct kernfs_root ;   213     struct kernfs_elem_dir {   unsigned long subdirs;   struct rb_root children;   struct kernfs_root *root; } ;    85     struct kernfs_node ;    85     struct kernfs_elem_symlink {   struct kernfs_node *target_kn; } ;    89     struct kernfs_ops ;    89     struct kernfs_elem_attr {   const struct kernfs_ops *ops;   struct kernfs_open_node *open;   loff_t size; } ;    95     union __anonunion_ldv_15096_154 {   struct kernfs_elem_dir dir;   struct kernfs_elem_symlink symlink;   struct kernfs_elem_attr attr; } ;    95     struct kernfs_node {   atomic_t count;   atomic_t active;   struct lockdep_map dep_map;   struct kernfs_node *parent;   const char *name;   struct rb_node rb;   const void *ns;   unsigned int hash;   union __anonunion_ldv_15096_154 ldv_15096;   void *priv;   unsigned short flags;   umode_t mode;   unsigned int ino;   struct kernfs_iattrs *iattr; } ;   137     struct kernfs_syscall_ops {   int (*remount_fs)(struct kernfs_root *, int *, char *);   int (*show_options)(struct seq_file *, struct kernfs_root *);   int (*mkdir)(struct kernfs_node *, const char *, umode_t );   int (*rmdir)(struct kernfs_node *);   int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ;   154     struct kernfs_root {   struct kernfs_node *kn;   unsigned int flags;   struct ida ino_ida;   struct kernfs_syscall_ops *syscall_ops;   struct list_head supers;   wait_queue_head_t deactivate_waitq; } ;   170     struct kernfs_open_file {   struct kernfs_node *kn;   struct file *file;   void *priv;   struct mutex mutex;   int event;   struct list_head list;   size_t atomic_write_len;   bool mmapped;   const struct vm_operations_struct *vm_ops; } ;   186     struct kernfs_ops {   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   ssize_t  (*read)(struct kernfs_open_file *, char *, size_t , loff_t );   size_t atomic_write_len;   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *);   struct lock_class_key lockdep_key; } ;   462     struct sock ;   463     struct kobject ;   464     enum kobj_ns_type {   KOBJ_NS_TYPE_NONE = 0,   KOBJ_NS_TYPE_NET = 1,   KOBJ_NS_TYPES = 2 } ;   470     struct kobj_ns_type_operations {   enum kobj_ns_type type;   bool  (*current_may_mount)();   void * (*grab_current_ns)();   const void * (*netlink_ns)(struct sock *);   const void * (*initial_ns)();   void (*drop_ns)(void *); } ;    59     struct bin_attribute ;    60     struct attribute {   const char *name;   umode_t mode;   bool ignore_lockdep;   struct lock_class_key *key;   struct lock_class_key skey; } ;    37     struct attribute_group {   const char *name;   umode_t  (*is_visible)(struct kobject *, struct attribute *, int);   struct attribute **attrs;   struct bin_attribute **bin_attrs; } ;    67     struct bin_attribute {   struct attribute attr;   size_t size;   void *private;   ssize_t  (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   ssize_t  (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;   131     struct sysfs_ops {   ssize_t  (*show)(struct kobject *, struct attribute *, char *);   ssize_t  (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;   470     struct kref {   atomic_t refcount; } ;    52     struct kset ;    52     struct kobj_type ;    52     struct kobject {   const char *name;   struct list_head entry;   struct kobject *parent;   struct kset *kset;   struct kobj_type *ktype;   struct kernfs_node *sd;   struct kref kref;   struct delayed_work release;   unsigned char state_initialized;   unsigned char state_in_sysfs;   unsigned char state_add_uevent_sent;   unsigned char state_remove_uevent_sent;   unsigned char uevent_suppress; } ;   114     struct kobj_type {   void (*release)(struct kobject *);   const struct sysfs_ops *sysfs_ops;   struct attribute **default_attrs;   const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *);   const void * (*namespace)(struct kobject *); } ;   122     struct kobj_uevent_env {   char *argv[3U];   char *envp[32U];   int envp_idx;   char buf[2048U];   int buflen; } ;   130     struct kset_uevent_ops {   const int (*filter)(struct kset *, struct kobject *);   const const char * (*name)(struct kset *, struct kobject *);   const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;   147     struct kset {   struct list_head list;   spinlock_t list_lock;   struct kobject kobj;   const struct kset_uevent_ops *uevent_ops; } ;   222     struct kernel_param ;   227     struct kernel_param_ops {   unsigned int flags;   int (*set)(const char *, const struct kernel_param *);   int (*get)(char *, const struct kernel_param *);   void (*free)(void *); } ;    58     struct kparam_string ;    58     struct kparam_array ;    58     union __anonunion_ldv_15771_155 {   void *arg;   const struct kparam_string *str;   const struct kparam_array *arr; } ;    58     struct kernel_param {   const char *name;   const struct kernel_param_ops *ops;   u16 perm;   s16 level;   union __anonunion_ldv_15771_155 ldv_15771; } ;    70     struct kparam_string {   unsigned int maxlen;   char *string; } ;    76     struct kparam_array {   unsigned int max;   unsigned int elemsize;   unsigned int *num;   const struct kernel_param_ops *ops;   void *elem; } ;   461     struct mod_arch_specific { } ;    36     struct module_param_attrs ;    36     struct module_kobject {   struct kobject kobj;   struct module *mod;   struct kobject *drivers_dir;   struct module_param_attrs *mp;   struct completion *kobj_completion; } ;    46     struct module_attribute {   struct attribute attr;   ssize_t  (*show)(struct module_attribute *, struct module_kobject *, char *);   ssize_t  (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t );   void (*setup)(struct module *, const char *);   int (*test)(struct module *);   void (*free)(struct module *); } ;    72     struct exception_table_entry ;   205     enum module_state {   MODULE_STATE_LIVE = 0,   MODULE_STATE_COMING = 1,   MODULE_STATE_GOING = 2,   MODULE_STATE_UNFORMED = 3 } ;   212     struct module_ref {   unsigned long incs;   unsigned long decs; } ;   226     struct module_sect_attrs ;   226     struct module_notes_attrs ;   226     struct tracepoint ;   226     struct ftrace_event_call ;   226     struct module {   enum module_state state;   struct list_head list;   char name[56U];   struct module_kobject mkobj;   struct module_attribute *modinfo_attrs;   const char *version;   const char *srcversion;   struct kobject *holders_dir;   const struct kernel_symbol *syms;   const unsigned long *crcs;   unsigned int num_syms;   struct kernel_param *kp;   unsigned int num_kp;   unsigned int num_gpl_syms;   const struct kernel_symbol *gpl_syms;   const unsigned long *gpl_crcs;   const struct kernel_symbol *unused_syms;   const unsigned long *unused_crcs;   unsigned int num_unused_syms;   unsigned int num_unused_gpl_syms;   const struct kernel_symbol *unused_gpl_syms;   const unsigned long *unused_gpl_crcs;   bool sig_ok;   const struct kernel_symbol *gpl_future_syms;   const unsigned long *gpl_future_crcs;   unsigned int num_gpl_future_syms;   unsigned int num_exentries;   struct exception_table_entry *extable;   int (*init)();   void *module_init;   void *module_core;   unsigned int init_size;   unsigned int core_size;   unsigned int init_text_size;   unsigned int core_text_size;   unsigned int init_ro_size;   unsigned int core_ro_size;   struct mod_arch_specific arch;   unsigned int taints;   unsigned int num_bugs;   struct list_head bug_list;   struct bug_entry *bug_table;   Elf64_Sym *symtab;   Elf64_Sym *core_symtab;   unsigned int num_symtab;   unsigned int core_num_syms;   char *strtab;   char *core_strtab;   struct module_sect_attrs *sect_attrs;   struct module_notes_attrs *notes_attrs;   char *args;   void *percpu;   unsigned int percpu_size;   unsigned int num_tracepoints;   const struct tracepoint **tracepoints_ptrs;   unsigned int num_trace_bprintk_fmt;   const char **trace_bprintk_fmt_start;   struct ftrace_event_call **trace_events;   unsigned int num_trace_events;   struct list_head source_list;   struct list_head target_list;   void (*exit)();   struct module_ref *refptr;   ctor_fn_t  (**ctors)();   unsigned int num_ctors; } ;    22     struct kernel_cap_struct {   __u32 cap[2U]; } ;    25     typedef struct kernel_cap_struct kernel_cap_t;   218     struct plist_head {   struct list_head node_list; } ;    84     struct plist_node {   int prio;   struct list_head prio_list;   struct list_head node_list; } ;     4     typedef unsigned long cputime_t;    25     struct sem_undo_list ;    25     struct sysv_sem {   struct sem_undo_list *undo_list; } ;    24     struct __anonstruct_sigset_t_157 {   unsigned long sig[1U]; } ;    24     typedef struct __anonstruct_sigset_t_157 sigset_t;    25     struct siginfo ;    17     typedef void __signalfn_t(int);    18     typedef __signalfn_t *__sighandler_t;    20     typedef void __restorefn_t();    21     typedef __restorefn_t *__sigrestore_t;    34     union sigval {   int sival_int;   void *sival_ptr; } ;    10     typedef union sigval sigval_t;    11     struct __anonstruct__kill_159 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid; } ;    11     struct __anonstruct__timer_160 {   __kernel_timer_t _tid;   int _overrun;   char _pad[0U];   sigval_t _sigval;   int _sys_private; } ;    11     struct __anonstruct__rt_161 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   sigval_t _sigval; } ;    11     struct __anonstruct__sigchld_162 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   int _status;   __kernel_clock_t _utime;   __kernel_clock_t _stime; } ;    11     struct __anonstruct__sigfault_163 {   void *_addr;   short _addr_lsb; } ;    11     struct __anonstruct__sigpoll_164 {   long _band;   int _fd; } ;    11     struct __anonstruct__sigsys_165 {   void *_call_addr;   int _syscall;   unsigned int _arch; } ;    11     union __anonunion__sifields_158 {   int _pad[28U];   struct __anonstruct__kill_159 _kill;   struct __anonstruct__timer_160 _timer;   struct __anonstruct__rt_161 _rt;   struct __anonstruct__sigchld_162 _sigchld;   struct __anonstruct__sigfault_163 _sigfault;   struct __anonstruct__sigpoll_164 _sigpoll;   struct __anonstruct__sigsys_165 _sigsys; } ;    11     struct siginfo {   int si_signo;   int si_errno;   int si_code;   union __anonunion__sifields_158 _sifields; } ;   109     typedef struct siginfo siginfo_t;    11     struct user_struct ;    21     struct sigpending {   struct list_head list;   sigset_t signal; } ;   246     struct sigaction {   __sighandler_t sa_handler;   unsigned long sa_flags;   __sigrestore_t sa_restorer;   sigset_t sa_mask; } ;   260     struct k_sigaction {   struct sigaction sa; } ;   459     enum pid_type {   PIDTYPE_PID = 0,   PIDTYPE_PGID = 1,   PIDTYPE_SID = 2,   PIDTYPE_MAX = 3 } ;   466     struct pid_namespace ;   466     struct upid {   int nr;   struct pid_namespace *ns;   struct hlist_node pid_chain; } ;    56     struct pid {   atomic_t count;   unsigned int level;   struct hlist_head tasks[3U];   struct callback_head rcu;   struct upid numbers[1U]; } ;    68     struct pid_link {   struct hlist_node node;   struct pid *pid; } ;   174     struct percpu_counter {   raw_spinlock_t lock;   s64 count;   struct list_head list;   s32 *counters; } ;    46     struct seccomp_filter ;    47     struct seccomp {   int mode;   struct seccomp_filter *filter; } ;    40     struct rt_mutex_waiter ;    41     struct rlimit {   __kernel_ulong_t rlim_cur;   __kernel_ulong_t rlim_max; } ;    11     struct timerqueue_node {   struct rb_node node;   ktime_t expires; } ;    12     struct timerqueue_head {   struct rb_root head;   struct timerqueue_node *next; } ;    50     struct hrtimer_clock_base ;    51     struct hrtimer_cpu_base ;    60     enum hrtimer_restart {   HRTIMER_NORESTART = 0,   HRTIMER_RESTART = 1 } ;    65     struct hrtimer {   struct timerqueue_node node;   ktime_t _softexpires;   enum hrtimer_restart  (*function)(struct hrtimer *);   struct hrtimer_clock_base *base;   unsigned long state;   int start_pid;   void *start_site;   char start_comm[16U]; } ;   132     struct hrtimer_clock_base {   struct hrtimer_cpu_base *cpu_base;   int index;   clockid_t clockid;   struct timerqueue_head active;   ktime_t resolution;   ktime_t  (*get_time)();   ktime_t softirq_time;   ktime_t offset; } ;   163     struct hrtimer_cpu_base {   raw_spinlock_t lock;   unsigned int active_bases;   unsigned int clock_was_set;   ktime_t expires_next;   int hres_active;   int hang_detected;   unsigned long nr_events;   unsigned long nr_retries;   unsigned long nr_hangs;   ktime_t max_hang_time;   struct hrtimer_clock_base clock_base[4U]; } ;   463     struct task_io_accounting {   u64 rchar;   u64 wchar;   u64 syscr;   u64 syscw;   u64 read_bytes;   u64 write_bytes;   u64 cancelled_write_bytes; } ;    45     struct latency_record {   unsigned long backtrace[12U];   unsigned int count;   unsigned long time;   unsigned long max; } ;    39     struct assoc_array_ptr ;    39     struct assoc_array {   struct assoc_array_ptr *root;   unsigned long nr_leaves_on_tree; } ;    31     typedef int32_t key_serial_t;    34     typedef uint32_t key_perm_t;    35     struct key ;    36     struct signal_struct ;    37     struct key_type ;    41     struct keyring_index_key {   struct key_type *type;   const char *description;   size_t desc_len; } ;   123     union __anonunion_ldv_17540_168 {   struct list_head graveyard_link;   struct rb_node serial_node; } ;   123     struct key_user ;   123     union __anonunion_ldv_17548_169 {   time_t expiry;   time_t revoked_at; } ;   123     struct __anonstruct_ldv_17561_171 {   struct key_type *type;   char *description; } ;   123     union __anonunion_ldv_17562_170 {   struct keyring_index_key index_key;   struct __anonstruct_ldv_17561_171 ldv_17561; } ;   123     union __anonunion_type_data_172 {   struct list_head link;   unsigned long x[2U];   void *p[2U];   int reject_error; } ;   123     union __anonunion_payload_174 {   unsigned long value;   void *rcudata;   void *data;   void *data2[2U]; } ;   123     union __anonunion_ldv_17577_173 {   union __anonunion_payload_174 payload;   struct assoc_array keys; } ;   123     struct key {   atomic_t usage;   key_serial_t serial;   union __anonunion_ldv_17540_168 ldv_17540;   struct rw_semaphore sem;   struct key_user *user;   void *security;   union __anonunion_ldv_17548_169 ldv_17548;   time_t last_used_at;   kuid_t uid;   kgid_t gid;   key_perm_t perm;   unsigned short quotalen;   unsigned short datalen;   unsigned long flags;   union __anonunion_ldv_17562_170 ldv_17562;   union __anonunion_type_data_172 type_data;   union __anonunion_ldv_17577_173 ldv_17577; } ;   356     struct audit_context ;    27     struct group_info {   atomic_t usage;   int ngroups;   int nblocks;   kgid_t small_block[32U];   kgid_t *blocks[0U]; } ;    78     struct cred {   atomic_t usage;   atomic_t subscribers;   void *put_addr;   unsigned int magic;   kuid_t uid;   kgid_t gid;   kuid_t suid;   kgid_t sgid;   kuid_t euid;   kgid_t egid;   kuid_t fsuid;   kgid_t fsgid;   unsigned int securebits;   kernel_cap_t cap_inheritable;   kernel_cap_t cap_permitted;   kernel_cap_t cap_effective;   kernel_cap_t cap_bset;   unsigned char jit_keyring;   struct key *session_keyring;   struct key *process_keyring;   struct key *thread_keyring;   struct key *request_key_auth;   void *security;   struct user_struct *user;   struct user_namespace *user_ns;   struct group_info *group_info;   struct callback_head rcu; } ;   125     struct futex_pi_state ;   126     struct robust_list_head ;   127     struct bio_list ;   128     struct fs_struct ;   129     struct perf_event_context ;   130     struct blk_plug ;   180     struct cfs_rq ;   181     struct task_group ;   426     struct sighand_struct {   atomic_t count;   struct k_sigaction action[64U];   spinlock_t siglock;   wait_queue_head_t signalfd_wqh; } ;   465     struct pacct_struct {   int ac_flag;   long ac_exitcode;   unsigned long ac_mem;   cputime_t ac_utime;   cputime_t ac_stime;   unsigned long ac_minflt;   unsigned long ac_majflt; } ;   473     struct cpu_itimer {   cputime_t expires;   cputime_t incr;   u32 error;   u32 incr_error; } ;   480     struct cputime {   cputime_t utime;   cputime_t stime; } ;   492     struct task_cputime {   cputime_t utime;   cputime_t stime;   unsigned long long sum_exec_runtime; } ;   512     struct thread_group_cputimer {   struct task_cputime cputime;   int running;   raw_spinlock_t lock; } ;   554     struct autogroup ;   555     struct tty_struct ;   555     struct taskstats ;   555     struct tty_audit_buf ;   555     struct signal_struct {   atomic_t sigcnt;   atomic_t live;   int nr_threads;   struct list_head thread_head;   wait_queue_head_t wait_chldexit;   struct task_struct *curr_target;   struct sigpending shared_pending;   int group_exit_code;   int notify_count;   struct task_struct *group_exit_task;   int group_stop_count;   unsigned int flags;   unsigned char is_child_subreaper;   unsigned char has_child_subreaper;   int posix_timer_id;   struct list_head posix_timers;   struct hrtimer real_timer;   struct pid *leader_pid;   ktime_t it_real_incr;   struct cpu_itimer it[2U];   struct thread_group_cputimer cputimer;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   struct pid *tty_old_pgrp;   int leader;   struct tty_struct *tty;   struct autogroup *autogroup;   cputime_t utime;   cputime_t stime;   cputime_t cutime;   cputime_t cstime;   cputime_t gtime;   cputime_t cgtime;   struct cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   unsigned long cnvcsw;   unsigned long cnivcsw;   unsigned long min_flt;   unsigned long maj_flt;   unsigned long cmin_flt;   unsigned long cmaj_flt;   unsigned long inblock;   unsigned long oublock;   unsigned long cinblock;   unsigned long coublock;   unsigned long maxrss;   unsigned long cmaxrss;   struct task_io_accounting ioac;   unsigned long long sum_sched_runtime;   struct rlimit rlim[16U];   struct pacct_struct pacct;   struct taskstats *stats;   unsigned int audit_tty;   unsigned int audit_tty_log_passwd;   struct tty_audit_buf *tty_audit_buf;   struct rw_semaphore group_rwsem;   oom_flags_t oom_flags;   short oom_score_adj;   short oom_score_adj_min;   struct mutex cred_guard_mutex; } ;   735     struct user_struct {   atomic_t __count;   atomic_t processes;   atomic_t sigpending;   atomic_t inotify_watches;   atomic_t inotify_devs;   atomic_t fanotify_listeners;   atomic_long_t epoll_watches;   unsigned long mq_bytes;   unsigned long locked_shm;   struct key *uid_keyring;   struct key *session_keyring;   struct hlist_node uidhash_node;   kuid_t uid;   atomic_long_t locked_vm; } ;   778     struct backing_dev_info ;   779     struct reclaim_state ;   780     struct sched_info {   unsigned long pcount;   unsigned long long run_delay;   unsigned long long last_arrival;   unsigned long long last_queued; } ;   794     struct task_delay_info {   spinlock_t lock;   unsigned int flags;   struct timespec blkio_start;   struct timespec blkio_end;   u64 blkio_delay;   u64 swapin_delay;   u32 blkio_count;   u32 swapin_count;   struct timespec freepages_start;   struct timespec freepages_end;   u64 freepages_delay;   u32 freepages_count; } ;  1026     struct io_context ;  1060     struct pipe_inode_info ;  1061     struct uts_namespace ;  1062     struct load_weight {   unsigned long weight;   u32 inv_weight; } ;  1069     struct sched_avg {   u32 runnable_avg_sum;   u32 runnable_avg_period;   u64 last_runnable_update;   s64 decay_count;   unsigned long load_avg_contrib; } ;  1081     struct sched_statistics {   u64 wait_start;   u64 wait_max;   u64 wait_count;   u64 wait_sum;   u64 iowait_count;   u64 iowait_sum;   u64 sleep_start;   u64 sleep_max;   s64 sum_sleep_runtime;   u64 block_start;   u64 block_max;   u64 exec_max;   u64 slice_max;   u64 nr_migrations_cold;   u64 nr_failed_migrations_affine;   u64 nr_failed_migrations_running;   u64 nr_failed_migrations_hot;   u64 nr_forced_migrations;   u64 nr_wakeups;   u64 nr_wakeups_sync;   u64 nr_wakeups_migrate;   u64 nr_wakeups_local;   u64 nr_wakeups_remote;   u64 nr_wakeups_affine;   u64 nr_wakeups_affine_attempts;   u64 nr_wakeups_passive;   u64 nr_wakeups_idle; } ;  1116     struct sched_entity {   struct load_weight load;   struct rb_node run_node;   struct list_head group_node;   unsigned int on_rq;   u64 exec_start;   u64 sum_exec_runtime;   u64 vruntime;   u64 prev_sum_exec_runtime;   u64 nr_migrations;   struct sched_statistics statistics;   int depth;   struct sched_entity *parent;   struct cfs_rq *cfs_rq;   struct cfs_rq *my_q;   struct sched_avg avg; } ;  1148     struct rt_rq ;  1148     struct sched_rt_entity {   struct list_head run_list;   unsigned long timeout;   unsigned long watchdog_stamp;   unsigned int time_slice;   struct sched_rt_entity *back;   struct sched_rt_entity *parent;   struct rt_rq *rt_rq;   struct rt_rq *my_q; } ;  1164     struct sched_dl_entity {   struct rb_node rb_node;   u64 dl_runtime;   u64 dl_deadline;   u64 dl_period;   u64 dl_bw;   s64 runtime;   u64 deadline;   unsigned int flags;   int dl_throttled;   int dl_new;   int dl_boosted;   int dl_yielded;   struct hrtimer dl_timer; } ;  1222     struct mem_cgroup ;  1222     struct memcg_batch_info {   int do_batch;   struct mem_cgroup *memcg;   unsigned long nr_pages;   unsigned long memsw_nr_pages; } ;  1643     struct memcg_oom_info {   struct mem_cgroup *memcg;   gfp_t gfp_mask;   int order;   unsigned char may_oom; } ;  1650     struct sched_class ;  1650     struct files_struct ;  1650     struct css_set ;  1650     struct compat_robust_list_head ;  1650     struct numa_group ;  1650     struct task_struct {   volatile long state;   void *stack;   atomic_t usage;   unsigned int flags;   unsigned int ptrace;   struct llist_node wake_entry;   int on_cpu;   struct task_struct *last_wakee;   unsigned long wakee_flips;   unsigned long wakee_flip_decay_ts;   int wake_cpu;   int on_rq;   int prio;   int static_prio;   int normal_prio;   unsigned int rt_priority;   const struct sched_class *sched_class;   struct sched_entity se;   struct sched_rt_entity rt;   struct task_group *sched_task_group;   struct sched_dl_entity dl;   struct hlist_head preempt_notifiers;   unsigned int policy;   int nr_cpus_allowed;   cpumask_t cpus_allowed;   struct sched_info sched_info;   struct list_head tasks;   struct plist_node pushable_tasks;   struct rb_node pushable_dl_tasks;   struct mm_struct *mm;   struct mm_struct *active_mm;   unsigned char brk_randomized;   u32 vmacache_seqnum;   struct vm_area_struct *vmacache[4U];   struct task_rss_stat rss_stat;   int exit_state;   int exit_code;   int exit_signal;   int pdeath_signal;   unsigned int jobctl;   unsigned int personality;   unsigned char in_execve;   unsigned char in_iowait;   unsigned char no_new_privs;   unsigned char sched_reset_on_fork;   unsigned char sched_contributes_to_load;   pid_t pid;   pid_t tgid;   struct task_struct *real_parent;   struct task_struct *parent;   struct list_head children;   struct list_head sibling;   struct task_struct *group_leader;   struct list_head ptraced;   struct list_head ptrace_entry;   struct pid_link pids[3U];   struct list_head thread_group;   struct list_head thread_node;   struct completion *vfork_done;   int *set_child_tid;   int *clear_child_tid;   cputime_t utime;   cputime_t stime;   cputime_t utimescaled;   cputime_t stimescaled;   cputime_t gtime;   struct cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   struct timespec start_time;   struct timespec real_start_time;   unsigned long min_flt;   unsigned long maj_flt;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   const struct cred *real_cred;   const struct cred *cred;   char comm[16U];   int link_count;   int total_link_count;   struct sysv_sem sysvsem;   unsigned long last_switch_count;   struct thread_struct thread;   struct fs_struct *fs;   struct files_struct *files;   struct nsproxy *nsproxy;   struct signal_struct *signal;   struct sighand_struct *sighand;   sigset_t blocked;   sigset_t real_blocked;   sigset_t saved_sigmask;   struct sigpending pending;   unsigned long sas_ss_sp;   size_t sas_ss_size;   int (*notifier)(void *);   void *notifier_data;   sigset_t *notifier_mask;   struct callback_head *task_works;   struct audit_context *audit_context;   kuid_t loginuid;   unsigned int sessionid;   struct seccomp seccomp;   u32 parent_exec_id;   u32 self_exec_id;   spinlock_t alloc_lock;   raw_spinlock_t pi_lock;   struct rb_root pi_waiters;   struct rb_node *pi_waiters_leftmost;   struct rt_mutex_waiter *pi_blocked_on;   struct task_struct *pi_top_task;   struct mutex_waiter *blocked_on;   unsigned int irq_events;   unsigned long hardirq_enable_ip;   unsigned long hardirq_disable_ip;   unsigned int hardirq_enable_event;   unsigned int hardirq_disable_event;   int hardirqs_enabled;   int hardirq_context;   unsigned long softirq_disable_ip;   unsigned long softirq_enable_ip;   unsigned int softirq_disable_event;   unsigned int softirq_enable_event;   int softirqs_enabled;   int softirq_context;   u64 curr_chain_key;   int lockdep_depth;   unsigned int lockdep_recursion;   struct held_lock held_locks[48U];   gfp_t lockdep_reclaim_gfp;   void *journal_info;   struct bio_list *bio_list;   struct blk_plug *plug;   struct reclaim_state *reclaim_state;   struct backing_dev_info *backing_dev_info;   struct io_context *io_context;   unsigned long ptrace_message;   siginfo_t *last_siginfo;   struct task_io_accounting ioac;   u64 acct_rss_mem1;   u64 acct_vm_mem1;   cputime_t acct_timexpd;   nodemask_t mems_allowed;   seqcount_t mems_allowed_seq;   int cpuset_mem_spread_rotor;   int cpuset_slab_spread_rotor;   struct css_set *cgroups;   struct list_head cg_list;   struct robust_list_head *robust_list;   struct compat_robust_list_head *compat_robust_list;   struct list_head pi_state_list;   struct futex_pi_state *pi_state_cache;   struct perf_event_context *perf_event_ctxp[2U];   struct mutex perf_event_mutex;   struct list_head perf_event_list;   struct mempolicy *mempolicy;   short il_next;   short pref_node_fork;   int numa_scan_seq;   unsigned int numa_scan_period;   unsigned int numa_scan_period_max;   int numa_preferred_nid;   unsigned long numa_migrate_retry;   u64 node_stamp;   u64 last_task_numa_placement;   u64 last_sum_exec_runtime;   struct callback_head numa_work;   struct list_head numa_entry;   struct numa_group *numa_group;   unsigned long *numa_faults_memory;   unsigned long total_numa_faults;   unsigned long *numa_faults_buffer_memory;   unsigned long *numa_faults_cpu;   unsigned long *numa_faults_buffer_cpu;   unsigned long numa_faults_locality[2U];   unsigned long numa_pages_migrated;   struct callback_head rcu;   struct pipe_inode_info *splice_pipe;   struct page_frag task_frag;   struct task_delay_info *delays;   int make_it_fail;   int nr_dirtied;   int nr_dirtied_pause;   unsigned long dirty_paused_when;   int latency_record_count;   struct latency_record latency_record[32U];   unsigned long timer_slack_ns;   unsigned long default_timer_slack_ns;   unsigned long trace;   unsigned long trace_recursion;   struct memcg_batch_info memcg_batch;   unsigned int memcg_kmem_skip_account;   struct memcg_oom_info memcg_oom;   struct uprobe_task *utask;   unsigned int sequential_io;   unsigned int sequential_io_avg; } ;  2998     enum irqreturn {   IRQ_NONE = 0,   IRQ_HANDLED = 1,   IRQ_WAKE_THREAD = 2 } ;    16     typedef enum irqreturn irqreturn_t;   359     struct proc_dir_entry ;    62     struct exception_table_entry {   int insn;   int fixup; } ;   450     struct tasklet_struct {   struct tasklet_struct *next;   unsigned long state;   atomic_t count;   void (*func)(unsigned long);   unsigned long data; } ;    13     typedef unsigned long kernel_ulong_t;    14     struct pci_device_id {   __u32 vendor;   __u32 device;   __u32 subvendor;   __u32 subdevice;   __u32 class;   __u32 class_mask;   kernel_ulong_t driver_data; } ;   186     struct acpi_device_id {   __u8 id[9U];   kernel_ulong_t driver_data; } ;   219     struct of_device_id {   char name[32U];   char type[32U];   char compatible[128U];   const void *data; } ;   628     struct klist_node ;    37     struct klist_node {   void *n_klist;   struct list_head n_node;   struct kref n_ref; } ;    67     struct path ;    68     struct seq_file {   char *buf;   size_t size;   size_t from;   size_t count;   size_t pad_until;   loff_t index;   loff_t read_pos;   u64 version;   struct mutex lock;   const struct seq_operations *op;   int poll_event;   struct user_namespace *user_ns;   void *private; } ;    35     struct seq_operations {   void * (*start)(struct seq_file *, loff_t *);   void (*stop)(struct seq_file *, void *);   void * (*next)(struct seq_file *, void *, loff_t *);   int (*show)(struct seq_file *, void *); } ;   196     struct pinctrl ;   197     struct pinctrl_state ;   194     struct dev_pin_info {   struct pinctrl *p;   struct pinctrl_state *default_state;   struct pinctrl_state *sleep_state;   struct pinctrl_state *idle_state; } ;    42     struct dma_map_ops ;    42     struct dev_archdata {   struct dma_map_ops *dma_ops;   void *iommu; } ;    14     struct device_private ;    15     struct device_driver ;    16     struct driver_private ;    17     struct class ;    18     struct subsys_private ;    19     struct bus_type ;    20     struct device_node ;    21     struct iommu_ops ;    22     struct iommu_group ;    60     struct device_attribute ;    60     struct bus_type {   const char *name;   const char *dev_name;   struct device *dev_root;   struct device_attribute *dev_attrs;   const struct attribute_group **bus_groups;   const struct attribute_group **dev_groups;   const struct attribute_group **drv_groups;   int (*match)(struct device *, struct device_driver *);   int (*uevent)(struct device *, struct kobj_uevent_env *);   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*online)(struct device *);   int (*offline)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct dev_pm_ops *pm;   struct iommu_ops *iommu_ops;   struct subsys_private *p;   struct lock_class_key lock_key; } ;   138     struct device_type ;   195     struct device_driver {   const char *name;   struct bus_type *bus;   struct module *owner;   const char *mod_name;   bool suppress_bind_attrs;   const struct of_device_id *of_match_table;   const struct acpi_device_id *acpi_match_table;   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct attribute_group **groups;   const struct dev_pm_ops *pm;   struct driver_private *p; } ;   321     struct class_attribute ;   321     struct class {   const char *name;   struct module *owner;   struct class_attribute *class_attrs;   const struct attribute_group **dev_groups;   struct kobject *dev_kobj;   int (*dev_uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *);   void (*class_release)(struct class *);   void (*dev_release)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct kobj_ns_type_operations *ns_type;   const void * (*namespace)(struct device *);   const struct dev_pm_ops *pm;   struct subsys_private *p; } ;   414     struct class_attribute {   struct attribute attr;   ssize_t  (*show)(struct class *, struct class_attribute *, char *);   ssize_t  (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;   482     struct device_type {   const char *name;   const struct attribute_group **groups;   int (*uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *);   void (*release)(struct device *);   const struct dev_pm_ops *pm; } ;   510     struct device_attribute {   struct attribute attr;   ssize_t  (*show)(struct device *, struct device_attribute *, char *);   ssize_t  (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;   640     struct device_dma_parameters {   unsigned int max_segment_size;   unsigned long segment_boundary_mask; } ;   649     struct acpi_device ;   650     struct acpi_dev_node {   struct acpi_device *companion; } ;   656     struct dma_coherent_mem ;   656     struct cma ;   656     struct device {   struct device *parent;   struct device_private *p;   struct kobject kobj;   const char *init_name;   const struct device_type *type;   struct mutex mutex;   struct bus_type *bus;   struct device_driver *driver;   void *platform_data;   void *driver_data;   struct dev_pm_info power;   struct dev_pm_domain *pm_domain;   struct dev_pin_info *pins;   int numa_node;   u64 *dma_mask;   u64 coherent_dma_mask;   unsigned long dma_pfn_offset;   struct device_dma_parameters *dma_parms;   struct list_head dma_pools;   struct dma_coherent_mem *dma_mem;   struct cma *cma_area;   struct dev_archdata archdata;   struct device_node *of_node;   struct acpi_dev_node acpi_node;   dev_t devt;   u32 id;   spinlock_t devres_lock;   struct list_head devres_head;   struct klist_node knode_class;   struct class *class;   const struct attribute_group **groups;   void (*release)(struct device *);   struct iommu_group *iommu_group;   bool offline_disabled;   bool offline; } ;   803     struct wakeup_source {   const char *name;   struct list_head entry;   spinlock_t lock;   struct timer_list timer;   unsigned long timer_expires;   ktime_t total_time;   ktime_t max_time;   ktime_t last_time;   ktime_t start_prevent_time;   ktime_t prevent_sleep_time;   unsigned long event_count;   unsigned long active_count;   unsigned long relax_count;   unsigned long expire_count;   unsigned long wakeup_count;   bool active;   bool autosleep_enabled; } ;    69     struct hotplug_slot ;    69     struct pci_slot {   struct pci_bus *bus;   struct list_head list;   struct hotplug_slot *hotplug;   unsigned char number;   struct kobject kobj; } ;   109     typedef int pci_power_t;   136     typedef unsigned int pci_channel_state_t;   137     enum pci_channel_state {   pci_channel_io_normal = 1,   pci_channel_io_frozen = 2,   pci_channel_io_perm_failure = 3 } ;   162     typedef unsigned short pci_dev_flags_t;   185     typedef unsigned short pci_bus_flags_t;   242     struct pcie_link_state ;   243     struct pci_vpd ;   244     struct pci_sriov ;   245     struct pci_ats ;   246     struct pci_driver ;   246     union __anonunion_ldv_22518_181 {   struct pci_sriov *sriov;   struct pci_dev *physfn; } ;   246     struct pci_dev {   struct list_head bus_list;   struct pci_bus *bus;   struct pci_bus *subordinate;   void *sysdata;   struct proc_dir_entry *procent;   struct pci_slot *slot;   unsigned int devfn;   unsigned short vendor;   unsigned short device;   unsigned short subsystem_vendor;   unsigned short subsystem_device;   unsigned int class;   u8 revision;   u8 hdr_type;   u8 pcie_cap;   u8 msi_cap;   u8 msix_cap;   unsigned char pcie_mpss;   u8 rom_base_reg;   u8 pin;   u16 pcie_flags_reg;   u8 dma_alias_devfn;   struct pci_driver *driver;   u64 dma_mask;   struct device_dma_parameters dma_parms;   pci_power_t current_state;   u8 pm_cap;   unsigned char pme_support;   unsigned char pme_interrupt;   unsigned char pme_poll;   unsigned char d1_support;   unsigned char d2_support;   unsigned char no_d1d2;   unsigned char no_d3cold;   unsigned char d3cold_allowed;   unsigned char mmio_always_on;   unsigned char wakeup_prepared;   unsigned char runtime_d3cold;   unsigned int d3_delay;   unsigned int d3cold_delay;   struct pcie_link_state *link_state;   pci_channel_state_t error_state;   struct device dev;   int cfg_size;   unsigned int irq;   struct resource resource[17U];   bool match_driver;   unsigned char transparent;   unsigned char multifunction;   unsigned char is_added;   unsigned char is_busmaster;   unsigned char no_msi;   unsigned char block_cfg_access;   unsigned char broken_parity_status;   unsigned char irq_reroute_variant;   unsigned char msi_enabled;   unsigned char msix_enabled;   unsigned char ari_enabled;   unsigned char is_managed;   unsigned char needs_freset;   unsigned char state_saved;   unsigned char is_physfn;   unsigned char is_virtfn;   unsigned char reset_fn;   unsigned char is_hotplug_bridge;   unsigned char __aer_firmware_first_valid;   unsigned char __aer_firmware_first;   unsigned char broken_intx_masking;   unsigned char io_window_1k;   pci_dev_flags_t dev_flags;   atomic_t enable_cnt;   u32 saved_config_space[16U];   struct hlist_head saved_cap_space;   struct bin_attribute *rom_attr;   int rom_attr_enabled;   struct bin_attribute *res_attr[17U];   struct bin_attribute *res_attr_wc[17U];   struct list_head msi_list;   const struct attribute_group **msi_irq_groups;   struct pci_vpd *vpd;   union __anonunion_ldv_22518_181 ldv_22518;   struct pci_ats *ats;   phys_addr_t rom;   size_t romlen;   char *driver_override; } ;   436     struct pci_ops ;   436     struct msi_chip ;   436     struct pci_bus {   struct list_head node;   struct pci_bus *parent;   struct list_head children;   struct list_head devices;   struct pci_dev *self;   struct list_head slots;   struct resource *resource[4U];   struct list_head resources;   struct resource busn_res;   struct pci_ops *ops;   struct msi_chip *msi;   void *sysdata;   struct proc_dir_entry *procdir;   unsigned char number;   unsigned char primary;   unsigned char max_bus_speed;   unsigned char cur_bus_speed;   char name[48U];   unsigned short bridge_ctl;   pci_bus_flags_t bus_flags;   struct device *bridge;   struct device dev;   struct bin_attribute *legacy_io;   struct bin_attribute *legacy_mem;   unsigned char is_added; } ;   553     struct pci_ops {   int (*read)(struct pci_bus *, unsigned int, int, int, u32 *);   int (*write)(struct pci_bus *, unsigned int, int, int, u32 ); } ;   574     struct pci_dynids {   spinlock_t lock;   struct list_head list; } ;   588     typedef unsigned int pci_ers_result_t;   598     struct pci_error_handlers {   pci_ers_result_t  (*error_detected)(struct pci_dev *, enum pci_channel_state );   pci_ers_result_t  (*mmio_enabled)(struct pci_dev *);   pci_ers_result_t  (*link_reset)(struct pci_dev *);   pci_ers_result_t  (*slot_reset)(struct pci_dev *);   void (*reset_notify)(struct pci_dev *, bool );   void (*resume)(struct pci_dev *); } ;   631     struct pci_driver {   struct list_head node;   const char *name;   const struct pci_device_id *id_table;   int (*probe)(struct pci_dev *, const struct pci_device_id *);   void (*remove)(struct pci_dev *);   int (*suspend)(struct pci_dev *, pm_message_t );   int (*suspend_late)(struct pci_dev *, pm_message_t );   int (*resume_early)(struct pci_dev *);   int (*resume)(struct pci_dev *);   void (*shutdown)(struct pci_dev *);   int (*sriov_configure)(struct pci_dev *, int);   const struct pci_error_handlers *err_handler;   struct device_driver driver;   struct pci_dynids dynids; } ;  1153     struct scatterlist {   unsigned long sg_magic;   unsigned long page_link;   unsigned int offset;   unsigned int length;   dma_addr_t dma_address;   unsigned int dma_length; } ;    93     struct shrink_control {   gfp_t gfp_mask;   unsigned long nr_to_scan;   nodemask_t nodes_to_scan;   int nid; } ;    26     struct shrinker {   unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *);   unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *);   int seeks;   long batch;   unsigned long flags;   struct list_head list;   atomic_long_t *nr_deferred; } ;    71     struct file_ra_state ;    72     struct writeback_control ;   188     struct vm_fault {   unsigned int flags;   unsigned long pgoff;   void *virtual_address;   struct page *page;   unsigned long max_pgoff;   pte_t *pte; } ;   221     struct vm_operations_struct {   void (*open)(struct vm_area_struct *);   void (*close)(struct vm_area_struct *);   int (*fault)(struct vm_area_struct *, struct vm_fault *);   void (*map_pages)(struct vm_area_struct *, struct vm_fault *);   int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*access)(struct vm_area_struct *, unsigned long, void *, int, int);   const char * (*name)(struct vm_area_struct *);   int (*set_policy)(struct vm_area_struct *, struct mempolicy *);   struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long);   int (*migrate)(struct vm_area_struct *, const nodemask_t *, const nodemask_t *, unsigned long);   int (*remap_pages)(struct vm_area_struct *, unsigned long, unsigned long, unsigned long); } ;   368     struct kmem_cache_cpu {   void **freelist;   unsigned long tid;   struct page *page;   struct page *partial;   unsigned int stat[26U]; } ;    48     struct kmem_cache_order_objects {   unsigned long x; } ;    58     struct memcg_cache_params ;    58     struct kmem_cache_node ;    58     struct kmem_cache {   struct kmem_cache_cpu *cpu_slab;   unsigned long flags;   unsigned long min_partial;   int size;   int object_size;   int offset;   int cpu_partial;   struct kmem_cache_order_objects oo;   struct kmem_cache_order_objects max;   struct kmem_cache_order_objects min;   gfp_t allocflags;   int refcount;   void (*ctor)(void *);   int inuse;   int align;   int reserved;   const char *name;   struct list_head list;   struct kobject kobj;   struct memcg_cache_params *memcg_params;   int max_attr_size;   struct kset *memcg_kset;   int remote_node_defrag_ratio;   struct kmem_cache_node *node[1024U]; } ;   501     struct __anonstruct_ldv_26538_183 {   struct callback_head callback_head;   struct kmem_cache *memcg_caches[0U]; } ;   501     struct __anonstruct_ldv_26544_184 {   struct mem_cgroup *memcg;   struct list_head list;   struct kmem_cache *root_cache;   atomic_t nr_pages; } ;   501     union __anonunion_ldv_26545_182 {   struct __anonstruct_ldv_26538_183 ldv_26538;   struct __anonstruct_ldv_26544_184 ldv_26544; } ;   501     struct memcg_cache_params {   bool is_root_cache;   union __anonunion_ldv_26545_182 ldv_26545; } ;    34     struct dma_attrs {   unsigned long flags[1U]; } ;    70     enum dma_data_direction {   DMA_BIDIRECTIONAL = 0,   DMA_TO_DEVICE = 1,   DMA_FROM_DEVICE = 2,   DMA_NONE = 3 } ;    77     struct sg_table {   struct scatterlist *sgl;   unsigned int nents;   unsigned int orig_nents; } ;   351     struct dma_map_ops {   void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , struct dma_attrs *);   void (*free)(struct device *, size_t , void *, dma_addr_t , struct dma_attrs *);   int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , struct dma_attrs *);   int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , struct dma_attrs *);   dma_addr_t  (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , struct dma_attrs *);   void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs *);   int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *);   void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *);   void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction );   void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction );   int (*mapping_error)(struct device *, dma_addr_t );   int (*dma_supported)(struct device *, u64 );   int (*set_dma_mask)(struct device *, u64 );   int is_phys; } ;    84     struct pm_qos_request {   struct plist_node node;   int pm_qos_class;   struct delayed_work work; } ;    48     struct pm_qos_flags_request {   struct list_head node;   s32 flags; } ;    53     enum dev_pm_qos_req_type {   DEV_PM_QOS_RESUME_LATENCY = 1,   DEV_PM_QOS_LATENCY_TOLERANCE = 2,   DEV_PM_QOS_FLAGS = 3 } ;    59     union __anonunion_data_185 {   struct plist_node pnode;   struct pm_qos_flags_request flr; } ;    59     struct dev_pm_qos_request {   enum dev_pm_qos_req_type type;   union __anonunion_data_185 data;   struct device *dev; } ;    68     enum pm_qos_type {   PM_QOS_UNITIALIZED = 0,   PM_QOS_MAX = 1,   PM_QOS_MIN = 2 } ;    74     struct pm_qos_constraints {   struct plist_head list;   s32 target_value;   s32 default_value;   s32 no_constraint_value;   enum pm_qos_type type;   struct blocking_notifier_head *notifiers; } ;    88     struct pm_qos_flags {   struct list_head list;   s32 effective_flags; } ;    93     struct dev_pm_qos {   struct pm_qos_constraints resume_latency;   struct pm_qos_constraints latency_tolerance;   struct pm_qos_flags flags;   struct dev_pm_qos_request *resume_latency_req;   struct dev_pm_qos_request *latency_tolerance_req;   struct dev_pm_qos_request *flags_req; } ;    54     struct iovec {   void *iov_base;   __kernel_size_t iov_len; } ;    27     union __anonunion_ldv_28086_186 {   const struct iovec *iov;   const struct bio_vec *bvec; } ;    27     struct iov_iter {   int type;   size_t iov_offset;   size_t count;   union __anonunion_ldv_28086_186 ldv_28086;   unsigned long nr_segs; } ;    38     typedef s32 dma_cookie_t;  1153     struct dql {   unsigned int num_queued;   unsigned int adj_limit;   unsigned int last_obj_cnt;   unsigned int limit;   unsigned int num_completed;   unsigned int prev_ovlimit;   unsigned int prev_num_queued;   unsigned int prev_last_obj_cnt;   unsigned int lowest_slack;   unsigned long slack_start_time;   unsigned int max_limit;   unsigned int min_limit;   unsigned int slack_hold_time; } ;    11     typedef unsigned short __kernel_sa_family_t;    23     typedef __kernel_sa_family_t sa_family_t;    24     struct sockaddr {   sa_family_t sa_family;   char sa_data[14U]; } ;    43     struct __anonstruct_sync_serial_settings_188 {   unsigned int clock_rate;   unsigned int clock_type;   unsigned short loopback; } ;    43     typedef struct __anonstruct_sync_serial_settings_188 sync_serial_settings;    50     struct __anonstruct_te1_settings_189 {   unsigned int clock_rate;   unsigned int clock_type;   unsigned short loopback;   unsigned int slot_map; } ;    50     typedef struct __anonstruct_te1_settings_189 te1_settings;    55     struct __anonstruct_raw_hdlc_proto_190 {   unsigned short encoding;   unsigned short parity; } ;    55     typedef struct __anonstruct_raw_hdlc_proto_190 raw_hdlc_proto;    65     struct __anonstruct_fr_proto_191 {   unsigned int t391;   unsigned int t392;   unsigned int n391;   unsigned int n392;   unsigned int n393;   unsigned short lmi;   unsigned short dce; } ;    65     typedef struct __anonstruct_fr_proto_191 fr_proto;    69     struct __anonstruct_fr_proto_pvc_192 {   unsigned int dlci; } ;    69     typedef struct __anonstruct_fr_proto_pvc_192 fr_proto_pvc;    74     struct __anonstruct_fr_proto_pvc_info_193 {   unsigned int dlci;   char master[16U]; } ;    74     typedef struct __anonstruct_fr_proto_pvc_info_193 fr_proto_pvc_info;    79     struct __anonstruct_cisco_proto_194 {   unsigned int interval;   unsigned int timeout; } ;    79     typedef struct __anonstruct_cisco_proto_194 cisco_proto;   117     struct ifmap {   unsigned long mem_start;   unsigned long mem_end;   unsigned short base_addr;   unsigned char irq;   unsigned char dma;   unsigned char port; } ;   177     union __anonunion_ifs_ifsu_195 {   raw_hdlc_proto *raw_hdlc;   cisco_proto *cisco;   fr_proto *fr;   fr_proto_pvc *fr_pvc;   fr_proto_pvc_info *fr_pvc_info;   sync_serial_settings *sync;   te1_settings *te1; } ;   177     struct if_settings {   unsigned int type;   unsigned int size;   union __anonunion_ifs_ifsu_195 ifs_ifsu; } ;   195     union __anonunion_ifr_ifrn_196 {   char ifrn_name[16U]; } ;   195     union __anonunion_ifr_ifru_197 {   struct sockaddr ifru_addr;   struct sockaddr ifru_dstaddr;   struct sockaddr ifru_broadaddr;   struct sockaddr ifru_netmask;   struct sockaddr ifru_hwaddr;   short ifru_flags;   int ifru_ivalue;   int ifru_mtu;   struct ifmap ifru_map;   char ifru_slave[16U];   char ifru_newname[16U];   void *ifru_data;   struct if_settings ifru_settings; } ;   195     struct ifreq {   union __anonunion_ifr_ifrn_196 ifr_ifrn;   union __anonunion_ifr_ifru_197 ifr_ifru; } ;    91     struct hlist_bl_node ;    91     struct hlist_bl_head {   struct hlist_bl_node *first; } ;    36     struct hlist_bl_node {   struct hlist_bl_node *next;   struct hlist_bl_node **pprev; } ;   114     struct __anonstruct_ldv_29075_200 {   spinlock_t lock;   unsigned int count; } ;   114     union __anonunion_ldv_29076_199 {   struct __anonstruct_ldv_29075_200 ldv_29075; } ;   114     struct lockref {   union __anonunion_ldv_29076_199 ldv_29076; } ;    49     struct nameidata ;    50     struct vfsmount ;    51     struct __anonstruct_ldv_29099_202 {   u32 hash;   u32 len; } ;    51     union __anonunion_ldv_29101_201 {   struct __anonstruct_ldv_29099_202 ldv_29099;   u64 hash_len; } ;    51     struct qstr {   union __anonunion_ldv_29101_201 ldv_29101;   const unsigned char *name; } ;    90     struct dentry_operations ;    90     union __anonunion_d_u_203 {   struct list_head d_child;   struct callback_head d_rcu; } ;    90     struct dentry {   unsigned int d_flags;   seqcount_t d_seq;   struct hlist_bl_node d_hash;   struct dentry *d_parent;   struct qstr d_name;   struct inode *d_inode;   unsigned char d_iname[32U];   struct lockref d_lockref;   const struct dentry_operations *d_op;   struct super_block *d_sb;   unsigned long d_time;   void *d_fsdata;   struct list_head d_lru;   union __anonunion_d_u_203 d_u;   struct list_head d_subdirs;   struct hlist_node d_alias; } ;   142     struct dentry_operations {   int (*d_revalidate)(struct dentry *, unsigned int);   int (*d_weak_revalidate)(struct dentry *, unsigned int);   int (*d_hash)(const struct dentry *, struct qstr *);   int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *);   int (*d_delete)(const struct dentry *);   void (*d_release)(struct dentry *);   void (*d_prune)(struct dentry *);   void (*d_iput)(struct dentry *, struct inode *);   char * (*d_dname)(struct dentry *, char *, int);   struct vfsmount * (*d_automount)(struct path *);   int (*d_manage)(struct dentry *, bool ); } ;   477     struct path {   struct vfsmount *mnt;   struct dentry *dentry; } ;    27     struct list_lru_node {   spinlock_t lock;   struct list_head list;   long nr_items; } ;    30     struct list_lru {   struct list_lru_node *node;   nodemask_t active_nodes; } ;    58     struct __anonstruct_ldv_29462_205 {   struct radix_tree_node *parent;   void *private_data; } ;    58     union __anonunion_ldv_29464_204 {   struct __anonstruct_ldv_29462_205 ldv_29462;   struct callback_head callback_head; } ;    58     struct radix_tree_node {   unsigned int path;   unsigned int count;   union __anonunion_ldv_29464_204 ldv_29464;   struct list_head private_list;   void *slots[64U];   unsigned long tags[3U][1U]; } ;   105     struct radix_tree_root {   unsigned int height;   gfp_t gfp_mask;   struct radix_tree_node *rnode; } ;    45     struct fiemap_extent {   __u64 fe_logical;   __u64 fe_physical;   __u64 fe_length;   __u64 fe_reserved64[2U];   __u32 fe_flags;   __u32 fe_reserved[3U]; } ;    38     enum migrate_mode {   MIGRATE_ASYNC = 0,   MIGRATE_SYNC_LIGHT = 1,   MIGRATE_SYNC = 2 } ;    30     struct block_device ;    31     struct cgroup_subsys_state ;    19     struct bio_vec {   struct page *bv_page;   unsigned int bv_len;   unsigned int bv_offset; } ;    59     struct export_operations ;    61     struct kiocb ;    62     struct poll_table_struct ;    63     struct kstatfs ;    64     struct swap_info_struct ;    69     struct iattr {   unsigned int ia_valid;   umode_t ia_mode;   kuid_t ia_uid;   kgid_t ia_gid;   loff_t ia_size;   struct timespec ia_atime;   struct timespec ia_mtime;   struct timespec ia_ctime;   struct file *ia_file; } ;   253     struct fs_disk_quota {   __s8 d_version;   __s8 d_flags;   __u16 d_fieldmask;   __u32 d_id;   __u64 d_blk_hardlimit;   __u64 d_blk_softlimit;   __u64 d_ino_hardlimit;   __u64 d_ino_softlimit;   __u64 d_bcount;   __u64 d_icount;   __s32 d_itimer;   __s32 d_btimer;   __u16 d_iwarns;   __u16 d_bwarns;   __s32 d_padding2;   __u64 d_rtb_hardlimit;   __u64 d_rtb_softlimit;   __u64 d_rtbcount;   __s32 d_rtbtimer;   __u16 d_rtbwarns;   __s16 d_padding3;   char d_padding4[8U]; } ;    76     struct fs_qfilestat {   __u64 qfs_ino;   __u64 qfs_nblks;   __u32 qfs_nextents; } ;   151     typedef struct fs_qfilestat fs_qfilestat_t;   152     struct fs_quota_stat {   __s8 qs_version;   __u16 qs_flags;   __s8 qs_pad;   fs_qfilestat_t qs_uquota;   fs_qfilestat_t qs_gquota;   __u32 qs_incoredqs;   __s32 qs_btimelimit;   __s32 qs_itimelimit;   __s32 qs_rtbtimelimit;   __u16 qs_bwarnlimit;   __u16 qs_iwarnlimit; } ;   166     struct fs_qfilestatv {   __u64 qfs_ino;   __u64 qfs_nblks;   __u32 qfs_nextents;   __u32 qfs_pad; } ;   196     struct fs_quota_statv {   __s8 qs_version;   __u8 qs_pad1;   __u16 qs_flags;   __u32 qs_incoredqs;   struct fs_qfilestatv qs_uquota;   struct fs_qfilestatv qs_gquota;   struct fs_qfilestatv qs_pquota;   __s32 qs_btimelimit;   __s32 qs_itimelimit;   __s32 qs_rtbtimelimit;   __u16 qs_bwarnlimit;   __u16 qs_iwarnlimit;   __u64 qs_pad2[8U]; } ;   212     struct dquot ;    19     typedef __kernel_uid32_t projid_t;    23     struct __anonstruct_kprojid_t_206 {   projid_t val; } ;    23     typedef struct __anonstruct_kprojid_t_206 kprojid_t;   119     struct if_dqinfo {   __u64 dqi_bgrace;   __u64 dqi_igrace;   __u32 dqi_flags;   __u32 dqi_valid; } ;   152     enum quota_type {   USRQUOTA = 0,   GRPQUOTA = 1,   PRJQUOTA = 2 } ;    60     typedef long long qsize_t;    61     union __anonunion_ldv_29991_207 {   kuid_t uid;   kgid_t gid;   kprojid_t projid; } ;    61     struct kqid {   union __anonunion_ldv_29991_207 ldv_29991;   enum quota_type type; } ;   178     struct mem_dqblk {   qsize_t dqb_bhardlimit;   qsize_t dqb_bsoftlimit;   qsize_t dqb_curspace;   qsize_t dqb_rsvspace;   qsize_t dqb_ihardlimit;   qsize_t dqb_isoftlimit;   qsize_t dqb_curinodes;   time_t dqb_btime;   time_t dqb_itime; } ;   200     struct quota_format_type ;   201     struct mem_dqinfo {   struct quota_format_type *dqi_format;   int dqi_fmt_id;   struct list_head dqi_dirty_list;   unsigned long dqi_flags;   unsigned int dqi_bgrace;   unsigned int dqi_igrace;   qsize_t dqi_maxblimit;   qsize_t dqi_maxilimit;   void *dqi_priv; } ;   264     struct dquot {   struct hlist_node dq_hash;   struct list_head dq_inuse;   struct list_head dq_free;   struct list_head dq_dirty;   struct mutex dq_lock;   atomic_t dq_count;   wait_queue_head_t dq_wait_unused;   struct super_block *dq_sb;   struct kqid dq_id;   loff_t dq_off;   unsigned long dq_flags;   struct mem_dqblk dq_dqb; } ;   291     struct quota_format_ops {   int (*check_quota_file)(struct super_block *, int);   int (*read_file_info)(struct super_block *, int);   int (*write_file_info)(struct super_block *, int);   int (*free_file_info)(struct super_block *, int);   int (*read_dqblk)(struct dquot *);   int (*commit_dqblk)(struct dquot *);   int (*release_dqblk)(struct dquot *); } ;   302     struct dquot_operations {   int (*write_dquot)(struct dquot *);   struct dquot * (*alloc_dquot)(struct super_block *, int);   void (*destroy_dquot)(struct dquot *);   int (*acquire_dquot)(struct dquot *);   int (*release_dquot)(struct dquot *);   int (*mark_dirty)(struct dquot *);   int (*write_info)(struct super_block *, int);   qsize_t * (*get_reserved_space)(struct inode *); } ;   316     struct quotactl_ops {   int (*quota_on)(struct super_block *, int, int, struct path *);   int (*quota_on_meta)(struct super_block *, int, int);   int (*quota_off)(struct super_block *, int);   int (*quota_sync)(struct super_block *, int);   int (*get_info)(struct super_block *, int, struct if_dqinfo *);   int (*set_info)(struct super_block *, int, struct if_dqinfo *);   int (*get_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *);   int (*set_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *);   int (*get_xstate)(struct super_block *, struct fs_quota_stat *);   int (*set_xstate)(struct super_block *, unsigned int, int);   int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);   int (*rm_xquota)(struct super_block *, unsigned int); } ;   334     struct quota_format_type {   int qf_fmt_id;   const struct quota_format_ops *qf_ops;   struct module *qf_owner;   struct quota_format_type *qf_next; } ;   380     struct quota_info {   unsigned int flags;   struct mutex dqio_mutex;   struct mutex dqonoff_mutex;   struct rw_semaphore dqptr_sem;   struct inode *files[2U];   struct mem_dqinfo info[2U];   const struct quota_format_ops *ops[2U]; } ;   323     struct address_space_operations {   int (*writepage)(struct page *, struct writeback_control *);   int (*readpage)(struct file *, struct page *);   int (*writepages)(struct address_space *, struct writeback_control *);   int (*set_page_dirty)(struct page *);   int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int);   int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **);   int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *);   sector_t  (*bmap)(struct address_space *, sector_t );   void (*invalidatepage)(struct page *, unsigned int, unsigned int);   int (*releasepage)(struct page *, gfp_t );   void (*freepage)(struct page *);   ssize_t  (*direct_IO)(int, struct kiocb *, struct iov_iter *, loff_t );   int (*get_xip_mem)(struct address_space *, unsigned long, int, void **, unsigned long *);   int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode );   int (*launder_page)(struct page *);   int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);   void (*is_dirty_writeback)(struct page *, bool *, bool *);   int (*error_remove_page)(struct address_space *, struct page *);   int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *);   void (*swap_deactivate)(struct file *); } ;   382     struct address_space {   struct inode *host;   struct radix_tree_root page_tree;   spinlock_t tree_lock;   unsigned int i_mmap_writable;   struct rb_root i_mmap;   struct list_head i_mmap_nonlinear;   struct mutex i_mmap_mutex;   unsigned long nrpages;   unsigned long nrshadows;   unsigned long writeback_index;   const struct address_space_operations *a_ops;   unsigned long flags;   struct backing_dev_info *backing_dev_info;   spinlock_t private_lock;   struct list_head private_list;   void *private_data; } ;   405     struct request_queue ;   406     struct hd_struct ;   406     struct gendisk ;   406     struct block_device {   dev_t bd_dev;   int bd_openers;   struct inode *bd_inode;   struct super_block *bd_super;   struct mutex bd_mutex;   struct list_head bd_inodes;   void *bd_claiming;   void *bd_holder;   int bd_holders;   bool bd_write_holder;   struct list_head bd_holder_disks;   struct block_device *bd_contains;   unsigned int bd_block_size;   struct hd_struct *bd_part;   unsigned int bd_part_count;   int bd_invalidated;   struct gendisk *bd_disk;   struct request_queue *bd_queue;   struct list_head bd_list;   unsigned long bd_private;   int bd_fsfreeze_count;   struct mutex bd_fsfreeze_mutex; } ;   478     struct posix_acl ;   479     struct inode_operations ;   479     union __anonunion_ldv_30405_210 {   const unsigned int i_nlink;   unsigned int __i_nlink; } ;   479     union __anonunion_ldv_30425_211 {   struct hlist_head i_dentry;   struct callback_head i_rcu; } ;   479     struct file_lock ;   479     struct cdev ;   479     union __anonunion_ldv_30442_212 {   struct pipe_inode_info *i_pipe;   struct block_device *i_bdev;   struct cdev *i_cdev; } ;   479     struct inode {   umode_t i_mode;   unsigned short i_opflags;   kuid_t i_uid;   kgid_t i_gid;   unsigned int i_flags;   struct posix_acl *i_acl;   struct posix_acl *i_default_acl;   const struct inode_operations *i_op;   struct super_block *i_sb;   struct address_space *i_mapping;   void *i_security;   unsigned long i_ino;   union __anonunion_ldv_30405_210 ldv_30405;   dev_t i_rdev;   loff_t i_size;   struct timespec i_atime;   struct timespec i_mtime;   struct timespec i_ctime;   spinlock_t i_lock;   unsigned short i_bytes;   unsigned int i_blkbits;   blkcnt_t i_blocks;   unsigned long i_state;   struct mutex i_mutex;   unsigned long dirtied_when;   struct hlist_node i_hash;   struct list_head i_wb_list;   struct list_head i_lru;   struct list_head i_sb_list;   union __anonunion_ldv_30425_211 ldv_30425;   u64 i_version;   atomic_t i_count;   atomic_t i_dio_count;   atomic_t i_writecount;   atomic_t i_readcount;   const struct file_operations *i_fop;   struct file_lock *i_flock;   struct address_space i_data;   struct dquot *i_dquot[2U];   struct list_head i_devices;   union __anonunion_ldv_30442_212 ldv_30442;   __u32 i_generation;   __u32 i_fsnotify_mask;   struct hlist_head i_fsnotify_marks;   void *i_private; } ;   715     struct fown_struct {   rwlock_t lock;   struct pid *pid;   enum pid_type pid_type;   kuid_t uid;   kuid_t euid;   int signum; } ;   723     struct file_ra_state {   unsigned long start;   unsigned int size;   unsigned int async_size;   unsigned int ra_pages;   unsigned int mmap_miss;   loff_t prev_pos; } ;   746     union __anonunion_f_u_213 {   struct llist_node fu_llist;   struct callback_head fu_rcuhead; } ;   746     struct file {   union __anonunion_f_u_213 f_u;   struct path f_path;   struct inode *f_inode;   const struct file_operations *f_op;   spinlock_t f_lock;   atomic_long_t f_count;   unsigned int f_flags;   fmode_t f_mode;   struct mutex f_pos_lock;   loff_t f_pos;   struct fown_struct f_owner;   const struct cred *f_cred;   struct file_ra_state f_ra;   u64 f_version;   void *f_security;   void *private_data;   struct list_head f_ep_links;   struct list_head f_tfile_llink;   struct address_space *f_mapping; } ;   836     typedef struct files_struct *fl_owner_t;   837     struct file_lock_operations {   void (*fl_copy_lock)(struct file_lock *, struct file_lock *);   void (*fl_release_private)(struct file_lock *); } ;   842     struct lock_manager_operations {   int (*lm_compare_owner)(struct file_lock *, struct file_lock *);   unsigned long int (*lm_owner_key)(struct file_lock *);   void (*lm_notify)(struct file_lock *);   int (*lm_grant)(struct file_lock *, struct file_lock *, int);   void (*lm_break)(struct file_lock *);   int (*lm_change)(struct file_lock **, int); } ;   855     struct net ;   860     struct nlm_lockowner ;   861     struct nfs_lock_info {   u32 state;   struct nlm_lockowner *owner;   struct list_head list; } ;    14     struct nfs4_lock_state ;    15     struct nfs4_lock_info {   struct nfs4_lock_state *owner; } ;    19     struct fasync_struct ;    19     struct __anonstruct_afs_215 {   struct list_head link;   int state; } ;    19     union __anonunion_fl_u_214 {   struct nfs_lock_info nfs_fl;   struct nfs4_lock_info nfs4_fl;   struct __anonstruct_afs_215 afs; } ;    19     struct file_lock {   struct file_lock *fl_next;   struct hlist_node fl_link;   struct list_head fl_block;   fl_owner_t fl_owner;   unsigned int fl_flags;   unsigned char fl_type;   unsigned int fl_pid;   int fl_link_cpu;   struct pid *fl_nspid;   wait_queue_head_t fl_wait;   struct file *fl_file;   loff_t fl_start;   loff_t fl_end;   struct fasync_struct *fl_fasync;   unsigned long fl_break_time;   unsigned long fl_downgrade_time;   const struct file_lock_operations *fl_ops;   const struct lock_manager_operations *fl_lmops;   union __anonunion_fl_u_214 fl_u; } ;   963     struct fasync_struct {   spinlock_t fa_lock;   int magic;   int fa_fd;   struct fasync_struct *fa_next;   struct file *fa_file;   struct callback_head fa_rcu; } ;  1157     struct sb_writers {   struct percpu_counter counter[3U];   wait_queue_head_t wait;   int frozen;   wait_queue_head_t wait_unfrozen;   struct lockdep_map lock_map[3U]; } ;  1173     struct super_operations ;  1173     struct xattr_handler ;  1173     struct mtd_info ;  1173     struct super_block {   struct list_head s_list;   dev_t s_dev;   unsigned char s_blocksize_bits;   unsigned long s_blocksize;   loff_t s_maxbytes;   struct file_system_type *s_type;   const struct super_operations *s_op;   const struct dquot_operations *dq_op;   const struct quotactl_ops *s_qcop;   const struct export_operations *s_export_op;   unsigned long s_flags;   unsigned long s_magic;   struct dentry *s_root;   struct rw_semaphore s_umount;   int s_count;   atomic_t s_active;   void *s_security;   const struct xattr_handler **s_xattr;   struct list_head s_inodes;   struct hlist_bl_head s_anon;   struct list_head s_mounts;   struct block_device *s_bdev;   struct backing_dev_info *s_bdi;   struct mtd_info *s_mtd;   struct hlist_node s_instances;   struct quota_info s_dquot;   struct sb_writers s_writers;   char s_id[32U];   u8 s_uuid[16U];   void *s_fs_info;   unsigned int s_max_links;   fmode_t s_mode;   u32 s_time_gran;   struct mutex s_vfs_rename_mutex;   char *s_subtype;   char *s_options;   const struct dentry_operations *s_d_op;   int cleancache_poolid;   struct shrinker s_shrink;   atomic_long_t s_remove_count;   int s_readonly_remount;   struct workqueue_struct *s_dio_done_wq;   struct list_lru s_dentry_lru;   struct list_lru s_inode_lru;   struct callback_head rcu; } ;  1403     struct fiemap_extent_info {   unsigned int fi_flags;   unsigned int fi_extents_mapped;   unsigned int fi_extents_max;   struct fiemap_extent *fi_extents_start; } ;  1441     struct dir_context {   int (*actor)(void *, const char *, int, loff_t , u64 , unsigned int);   loff_t pos; } ;  1446     struct file_operations {   struct module *owner;   loff_t  (*llseek)(struct file *, loff_t , int);   ssize_t  (*read)(struct file *, char *, size_t , loff_t *);   ssize_t  (*write)(struct file *, const char *, size_t , loff_t *);   ssize_t  (*aio_read)(struct kiocb *, const struct iovec *, unsigned long, loff_t );   ssize_t  (*aio_write)(struct kiocb *, const struct iovec *, unsigned long, loff_t );   ssize_t  (*read_iter)(struct kiocb *, struct iov_iter *);   ssize_t  (*write_iter)(struct kiocb *, struct iov_iter *);   int (*iterate)(struct file *, struct dir_context *);   unsigned int (*poll)(struct file *, struct poll_table_struct *);   long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long);   long int (*compat_ioctl)(struct file *, unsigned int, unsigned long);   int (*mmap)(struct file *, struct vm_area_struct *);   int (*open)(struct inode *, struct file *);   int (*flush)(struct file *, fl_owner_t );   int (*release)(struct inode *, struct file *);   int (*fsync)(struct file *, loff_t , loff_t , int);   int (*aio_fsync)(struct kiocb *, int);   int (*fasync)(int, struct file *, int);   int (*lock)(struct file *, int, struct file_lock *);   ssize_t  (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int);   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   int (*check_flags)(int);   int (*flock)(struct file *, int, struct file_lock *);   ssize_t  (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int);   ssize_t  (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int);   int (*setlease)(struct file *, long, struct file_lock **);   long int (*fallocate)(struct file *, int, loff_t , loff_t );   int (*show_fdinfo)(struct seq_file *, struct file *); } ;  1488     struct inode_operations {   struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int);   void * (*follow_link)(struct dentry *, struct nameidata *);   int (*permission)(struct inode *, int);   struct posix_acl * (*get_acl)(struct inode *, int);   int (*readlink)(struct dentry *, char *, int);   void (*put_link)(struct dentry *, struct nameidata *, void *);   int (*create)(struct inode *, struct dentry *, umode_t , bool );   int (*link)(struct dentry *, struct inode *, struct dentry *);   int (*unlink)(struct inode *, struct dentry *);   int (*symlink)(struct inode *, struct dentry *, const char *);   int (*mkdir)(struct inode *, struct dentry *, umode_t );   int (*rmdir)(struct inode *, struct dentry *);   int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t );   int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *);   int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int);   int (*setattr)(struct dentry *, struct iattr *);   int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *);   int (*setxattr)(struct dentry *, const char *, const void *, size_t , int);   ssize_t  (*getxattr)(struct dentry *, const char *, void *, size_t );   ssize_t  (*listxattr)(struct dentry *, char *, size_t );   int (*removexattr)(struct dentry *, const char *);   int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 );   int (*update_time)(struct inode *, struct timespec *, int);   int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *);   int (*tmpfile)(struct inode *, struct dentry *, umode_t );   int (*set_acl)(struct inode *, struct posix_acl *, int); } ;  1535     struct super_operations {   struct inode * (*alloc_inode)(struct super_block *);   void (*destroy_inode)(struct inode *);   void (*dirty_inode)(struct inode *, int);   int (*write_inode)(struct inode *, struct writeback_control *);   int (*drop_inode)(struct inode *);   void (*evict_inode)(struct inode *);   void (*put_super)(struct super_block *);   int (*sync_fs)(struct super_block *, int);   int (*freeze_fs)(struct super_block *);   int (*unfreeze_fs)(struct super_block *);   int (*statfs)(struct dentry *, struct kstatfs *);   int (*remount_fs)(struct super_block *, int *, char *);   void (*umount_begin)(struct super_block *);   int (*show_options)(struct seq_file *, struct dentry *);   int (*show_devname)(struct seq_file *, struct dentry *);   int (*show_path)(struct seq_file *, struct dentry *);   int (*show_stats)(struct seq_file *, struct dentry *);   ssize_t  (*quota_read)(struct super_block *, int, char *, size_t , loff_t );   ssize_t  (*quota_write)(struct super_block *, int, const char *, size_t , loff_t );   int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t );   long int (*nr_cached_objects)(struct super_block *, int);   long int (*free_cached_objects)(struct super_block *, long, int); } ;  1749     struct file_system_type {   const char *name;   int fs_flags;   struct dentry * (*mount)(struct file_system_type *, int, const char *, void *);   void (*kill_sb)(struct super_block *);   struct module *owner;   struct file_system_type *next;   struct hlist_head fs_supers;   struct lock_class_key s_lock_key;   struct lock_class_key s_umount_key;   struct lock_class_key s_vfs_rename_key;   struct lock_class_key s_writers_key[3U];   struct lock_class_key i_lock_key;   struct lock_class_key i_mutex_key;   struct lock_class_key i_mutex_dir_key; } ;    39     typedef s32 compat_long_t;    44     typedef u32 compat_uptr_t;   276     struct compat_robust_list {   compat_uptr_t next; } ;   280     struct compat_robust_list_head {   struct compat_robust_list list;   compat_long_t futex_offset;   compat_uptr_t list_op_pending; } ;   140     struct sk_buff ;    15     typedef u64 netdev_features_t;    18     struct nf_conntrack {   atomic_t use; } ;   137     struct nf_bridge_info {   atomic_t use;   unsigned int mask;   struct net_device *physindev;   struct net_device *physoutdev;   unsigned long data[4U]; } ;   147     struct sk_buff_head {   struct sk_buff *next;   struct sk_buff *prev;   __u32 qlen;   spinlock_t lock; } ;   173     struct skb_frag_struct ;   173     typedef struct skb_frag_struct skb_frag_t;   174     struct __anonstruct_page_231 {   struct page *p; } ;   174     struct skb_frag_struct {   struct __anonstruct_page_231 page;   __u32 page_offset;   __u32 size; } ;   207     struct skb_shared_hwtstamps {   ktime_t hwtstamp;   ktime_t syststamp; } ;   276     struct skb_shared_info {   unsigned char nr_frags;   __u8 tx_flags;   unsigned short gso_size;   unsigned short gso_segs;   unsigned short gso_type;   struct sk_buff *frag_list;   struct skb_shared_hwtstamps hwtstamps;   __be32 ip6_frag_id;   atomic_t dataref;   void *destructor_arg;   skb_frag_t frags[17U]; } ;   360     typedef unsigned int sk_buff_data_t;   361     struct __anonstruct_ldv_34296_233 {   u32 stamp_us;   u32 stamp_jiffies; } ;   361     union __anonunion_ldv_34297_232 {   u64 v64;   struct __anonstruct_ldv_34296_233 ldv_34296; } ;   361     struct skb_mstamp {   union __anonunion_ldv_34297_232 ldv_34297; } ;   414     union __anonunion_ldv_34316_234 {   ktime_t tstamp;   struct skb_mstamp skb_mstamp; } ;   414     struct sec_path ;   414     struct __anonstruct_ldv_34332_236 {   __u16 csum_start;   __u16 csum_offset; } ;   414     union __anonunion_ldv_34333_235 {   __wsum csum;   struct __anonstruct_ldv_34332_236 ldv_34332; } ;   414     union __anonunion_ldv_34372_237 {   unsigned int napi_id;   dma_cookie_t dma_cookie; } ;   414     union __anonunion_ldv_34378_238 {   __u32 mark;   __u32 dropcount;   __u32 reserved_tailroom; } ;   414     struct sk_buff {   struct sk_buff *next;   struct sk_buff *prev;   union __anonunion_ldv_34316_234 ldv_34316;   struct sock *sk;   struct net_device *dev;   char cb[48U];   unsigned long _skb_refdst;   struct sec_path *sp;   unsigned int len;   unsigned int data_len;   __u16 mac_len;   __u16 hdr_len;   union __anonunion_ldv_34333_235 ldv_34333;   __u32 priority;   unsigned char ignore_df;   unsigned char cloned;   unsigned char ip_summed;   unsigned char nohdr;   unsigned char nfctinfo;   unsigned char pkt_type;   unsigned char fclone;   unsigned char ipvs_property;   unsigned char peeked;   unsigned char nf_trace;   __be16 protocol;   void (*destructor)(struct sk_buff *);   struct nf_conntrack *nfct;   struct nf_bridge_info *nf_bridge;   int skb_iif;   __u32 hash;   __be16 vlan_proto;   __u16 vlan_tci;   __u16 tc_index;   __u16 tc_verd;   __u16 queue_mapping;   unsigned char ndisc_nodetype;   unsigned char pfmemalloc;   unsigned char ooo_okay;   unsigned char l4_hash;   unsigned char wifi_acked_valid;   unsigned char wifi_acked;   unsigned char no_fcs;   unsigned char head_frag;   unsigned char encapsulation;   unsigned char encap_hdr_csum;   unsigned char csum_valid;   unsigned char csum_complete_sw;   union __anonunion_ldv_34372_237 ldv_34372;   __u32 secmark;   union __anonunion_ldv_34378_238 ldv_34378;   __be16 inner_protocol;   __u16 inner_transport_header;   __u16 inner_network_header;   __u16 inner_mac_header;   __u16 transport_header;   __u16 network_header;   __u16 mac_header;   sk_buff_data_t tail;   sk_buff_data_t end;   unsigned char *head;   unsigned char *data;   unsigned int truesize;   atomic_t users; } ;   641     struct dst_entry ;  3161     struct ethhdr {   unsigned char h_dest[6U];   unsigned char h_source[6U];   __be16 h_proto; } ;    34     struct ethtool_cmd {   __u32 cmd;   __u32 supported;   __u32 advertising;   __u16 speed;   __u8 duplex;   __u8 port;   __u8 phy_address;   __u8 transceiver;   __u8 autoneg;   __u8 mdio_support;   __u32 maxtxpkt;   __u32 maxrxpkt;   __u16 speed_hi;   __u8 eth_tp_mdix;   __u8 eth_tp_mdix_ctrl;   __u32 lp_advertising;   __u32 reserved[2U]; } ;   125     struct ethtool_drvinfo {   __u32 cmd;   char driver[32U];   char version[32U];   char fw_version[32U];   char bus_info[32U];   char reserved1[32U];   char reserved2[12U];   __u32 n_priv_flags;   __u32 n_stats;   __u32 testinfo_len;   __u32 eedump_len;   __u32 regdump_len; } ;   187     struct ethtool_wolinfo {   __u32 cmd;   __u32 supported;   __u32 wolopts;   __u8 sopass[6U]; } ;   211     struct ethtool_regs {   __u32 cmd;   __u32 version;   __u32 len;   __u8 data[0U]; } ;   233     struct ethtool_eeprom {   __u32 cmd;   __u32 magic;   __u32 offset;   __u32 len;   __u8 data[0U]; } ;   259     struct ethtool_eee {   __u32 cmd;   __u32 supported;   __u32 advertised;   __u32 lp_advertised;   __u32 eee_active;   __u32 eee_enabled;   __u32 tx_lpi_enabled;   __u32 tx_lpi_timer;   __u32 reserved[2U]; } ;   288     struct ethtool_modinfo {   __u32 cmd;   __u32 type;   __u32 eeprom_len;   __u32 reserved[8U]; } ;   305     struct ethtool_coalesce {   __u32 cmd;   __u32 rx_coalesce_usecs;   __u32 rx_max_coalesced_frames;   __u32 rx_coalesce_usecs_irq;   __u32 rx_max_coalesced_frames_irq;   __u32 tx_coalesce_usecs;   __u32 tx_max_coalesced_frames;   __u32 tx_coalesce_usecs_irq;   __u32 tx_max_coalesced_frames_irq;   __u32 stats_block_coalesce_usecs;   __u32 use_adaptive_rx_coalesce;   __u32 use_adaptive_tx_coalesce;   __u32 pkt_rate_low;   __u32 rx_coalesce_usecs_low;   __u32 rx_max_coalesced_frames_low;   __u32 tx_coalesce_usecs_low;   __u32 tx_max_coalesced_frames_low;   __u32 pkt_rate_high;   __u32 rx_coalesce_usecs_high;   __u32 rx_max_coalesced_frames_high;   __u32 tx_coalesce_usecs_high;   __u32 tx_max_coalesced_frames_high;   __u32 rate_sample_interval; } ;   404     struct ethtool_ringparam {   __u32 cmd;   __u32 rx_max_pending;   __u32 rx_mini_max_pending;   __u32 rx_jumbo_max_pending;   __u32 tx_max_pending;   __u32 rx_pending;   __u32 rx_mini_pending;   __u32 rx_jumbo_pending;   __u32 tx_pending; } ;   441     struct ethtool_channels {   __u32 cmd;   __u32 max_rx;   __u32 max_tx;   __u32 max_other;   __u32 max_combined;   __u32 rx_count;   __u32 tx_count;   __u32 other_count;   __u32 combined_count; } ;   469     struct ethtool_pauseparam {   __u32 cmd;   __u32 autoneg;   __u32 rx_pause;   __u32 tx_pause; } ;   568     struct ethtool_test {   __u32 cmd;   __u32 flags;   __u32 reserved;   __u32 len;   __u64 data[0U]; } ;   600     struct ethtool_stats {   __u32 cmd;   __u32 n_stats;   __u64 data[0U]; } ;   642     struct ethtool_tcpip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be16 psrc;   __be16 pdst;   __u8 tos; } ;   675     struct ethtool_ah_espip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be32 spi;   __u8 tos; } ;   691     struct ethtool_usrip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be32 l4_4_bytes;   __u8 tos;   __u8 ip_ver;   __u8 proto; } ;   711     union ethtool_flow_union {   struct ethtool_tcpip4_spec tcp_ip4_spec;   struct ethtool_tcpip4_spec udp_ip4_spec;   struct ethtool_tcpip4_spec sctp_ip4_spec;   struct ethtool_ah_espip4_spec ah_ip4_spec;   struct ethtool_ah_espip4_spec esp_ip4_spec;   struct ethtool_usrip4_spec usr_ip4_spec;   struct ethhdr ether_spec;   __u8 hdata[52U]; } ;   722     struct ethtool_flow_ext {   __u8 padding[2U];   unsigned char h_dest[6U];   __be16 vlan_etype;   __be16 vlan_tci;   __be32 data[2U]; } ;   741     struct ethtool_rx_flow_spec {   __u32 flow_type;   union ethtool_flow_union h_u;   struct ethtool_flow_ext h_ext;   union ethtool_flow_union m_u;   struct ethtool_flow_ext m_ext;   __u64 ring_cookie;   __u32 location; } ;   767     struct ethtool_rxnfc {   __u32 cmd;   __u32 flow_type;   __u64 data;   struct ethtool_rx_flow_spec fs;   __u32 rule_cnt;   __u32 rule_locs[0U]; } ;   933     struct ethtool_flash {   __u32 cmd;   __u32 region;   char data[128U]; } ;   941     struct ethtool_dump {   __u32 cmd;   __u32 version;   __u32 flag;   __u32 len;   __u8 data[0U]; } ;  1017     struct ethtool_ts_info {   __u32 cmd;   __u32 so_timestamping;   __s32 phc_index;   __u32 tx_types;   __u32 tx_reserved[3U];   __u32 rx_filters;   __u32 rx_reserved[3U]; } ;    44     enum ethtool_phys_id_state {   ETHTOOL_ID_INACTIVE = 0,   ETHTOOL_ID_ACTIVE = 1,   ETHTOOL_ID_ON = 2,   ETHTOOL_ID_OFF = 3 } ;    79     struct ethtool_ops {   int (*get_settings)(struct net_device *, struct ethtool_cmd *);   int (*set_settings)(struct net_device *, struct ethtool_cmd *);   void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);   int (*get_regs_len)(struct net_device *);   void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);   void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);   int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);   u32  (*get_msglevel)(struct net_device *);   void (*set_msglevel)(struct net_device *, u32 );   int (*nway_reset)(struct net_device *);   u32  (*get_link)(struct net_device *);   int (*get_eeprom_len)(struct net_device *);   int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);   int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);   void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);   int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);   void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *);   int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *);   void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);   void (*get_strings)(struct net_device *, u32 , u8 *);   int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state );   void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *);   int (*begin)(struct net_device *);   void (*complete)(struct net_device *);   u32  (*get_priv_flags)(struct net_device *);   int (*set_priv_flags)(struct net_device *, u32 );   int (*get_sset_count)(struct net_device *, int);   int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *);   int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);   int (*flash_device)(struct net_device *, struct ethtool_flash *);   int (*reset)(struct net_device *, u32 *);   u32  (*get_rxfh_key_size)(struct net_device *);   u32  (*get_rxfh_indir_size)(struct net_device *);   int (*get_rxfh)(struct net_device *, u32 *, u8 *);   int (*set_rxfh)(struct net_device *, const u32 *, const u8 *);   void (*get_channels)(struct net_device *, struct ethtool_channels *);   int (*set_channels)(struct net_device *, struct ethtool_channels *);   int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);   int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *);   int (*set_dump)(struct net_device *, struct ethtool_dump *);   int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);   int (*get_module_info)(struct net_device *, struct ethtool_modinfo *);   int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*get_eee)(struct net_device *, struct ethtool_eee *);   int (*set_eee)(struct net_device *, struct ethtool_eee *); } ;   235     struct prot_inuse ;   236     struct netns_core {   struct ctl_table_header *sysctl_hdr;   int sysctl_somaxconn;   struct prot_inuse *inuse; } ;    38     struct u64_stats_sync { } ;   145     struct ipstats_mib {   u64 mibs[36U];   struct u64_stats_sync syncp; } ;    61     struct icmp_mib {   unsigned long mibs[28U]; } ;    67     struct icmpmsg_mib {   atomic_long_t mibs[512U]; } ;    72     struct icmpv6_mib {   unsigned long mibs[6U]; } ;    83     struct icmpv6msg_mib {   atomic_long_t mibs[512U]; } ;    93     struct tcp_mib {   unsigned long mibs[16U]; } ;   100     struct udp_mib {   unsigned long mibs[8U]; } ;   106     struct linux_mib {   unsigned long mibs[103U]; } ;   112     struct linux_xfrm_mib {   unsigned long mibs[29U]; } ;   118     struct netns_mib {   struct tcp_mib *tcp_statistics;   struct ipstats_mib *ip_statistics;   struct linux_mib *net_statistics;   struct udp_mib *udp_statistics;   struct udp_mib *udplite_statistics;   struct icmp_mib *icmp_statistics;   struct icmpmsg_mib *icmpmsg_statistics;   struct proc_dir_entry *proc_net_devsnmp6;   struct udp_mib *udp_stats_in6;   struct udp_mib *udplite_stats_in6;   struct ipstats_mib *ipv6_statistics;   struct icmpv6_mib *icmpv6_statistics;   struct icmpv6msg_mib *icmpv6msg_statistics;   struct linux_xfrm_mib *xfrm_statistics; } ;    26     struct netns_unix {   int sysctl_max_dgram_qlen;   struct ctl_table_header *ctl; } ;    12     struct netns_packet {   struct mutex sklist_lock;   struct hlist_head sklist; } ;    14     struct netns_frags {   int nqueues;   struct list_head lru_list;   spinlock_t lru_lock;   struct percpu_counter mem;   int timeout;   int high_thresh;   int low_thresh; } ;   180     struct tcpm_hash_bucket ;   181     struct ipv4_devconf ;   182     struct fib_rules_ops ;   183     struct fib_table ;   184     struct local_ports {   seqlock_t lock;   int range[2U]; } ;    22     struct ping_group_range {   seqlock_t lock;   kgid_t range[2U]; } ;    27     struct inet_peer_base ;    27     struct xt_table ;    27     struct netns_ipv4 {   struct ctl_table_header *forw_hdr;   struct ctl_table_header *frags_hdr;   struct ctl_table_header *ipv4_hdr;   struct ctl_table_header *route_hdr;   struct ctl_table_header *xfrm4_hdr;   struct ipv4_devconf *devconf_all;   struct ipv4_devconf *devconf_dflt;   struct fib_rules_ops *rules_ops;   bool fib_has_custom_rules;   struct fib_table *fib_local;   struct fib_table *fib_main;   struct fib_table *fib_default;   int fib_num_tclassid_users;   struct hlist_head *fib_table_hash;   struct sock *fibnl;   struct sock **icmp_sk;   struct inet_peer_base *peers;   struct tcpm_hash_bucket *tcp_metrics_hash;   unsigned int tcp_metrics_hash_log;   struct netns_frags frags;   struct xt_table *iptable_filter;   struct xt_table *iptable_mangle;   struct xt_table *iptable_raw;   struct xt_table *arptable_filter;   struct xt_table *iptable_security;   struct xt_table *nat_table;   int sysctl_icmp_echo_ignore_all;   int sysctl_icmp_echo_ignore_broadcasts;   int sysctl_icmp_ignore_bogus_error_responses;   int sysctl_icmp_ratelimit;   int sysctl_icmp_ratemask;   int sysctl_icmp_errors_use_inbound_ifaddr;   struct local_ports ip_local_ports;   int sysctl_tcp_ecn;   int sysctl_ip_no_pmtu_disc;   int sysctl_ip_fwd_use_pmtu;   int sysctl_fwmark_reflect;   int sysctl_tcp_fwmark_accept;   struct ping_group_range ping_group_range;   atomic_t dev_addr_genid;   unsigned long *sysctl_local_reserved_ports;   struct list_head mr_tables;   struct fib_rules_ops *mr_rules_ops;   atomic_t rt_genid; } ;   102     struct neighbour ;   102     struct dst_ops {   unsigned short family;   __be16 protocol;   unsigned int gc_thresh;   int (*gc)(struct dst_ops *);   struct dst_entry * (*check)(struct dst_entry *, __u32 );   unsigned int (*default_advmss)(const struct dst_entry *);   unsigned int (*mtu)(const struct dst_entry *);   u32 * (*cow_metrics)(struct dst_entry *, unsigned long);   void (*destroy)(struct dst_entry *);   void (*ifdown)(struct dst_entry *, struct net_device *, int);   struct dst_entry * (*negative_advice)(struct dst_entry *);   void (*link_failure)(struct sk_buff *);   void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 );   void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *);   int (*local_out)(struct sk_buff *);   struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *);   struct kmem_cache *kmem_cachep;   struct percpu_counter pcpuc_entries; } ;    73     struct netns_sysctl_ipv6 {   struct ctl_table_header *hdr;   struct ctl_table_header *route_hdr;   struct ctl_table_header *icmp_hdr;   struct ctl_table_header *frags_hdr;   struct ctl_table_header *xfrm6_hdr;   int bindv6only;   int flush_delay;   int ip6_rt_max_size;   int ip6_rt_gc_min_interval;   int ip6_rt_gc_timeout;   int ip6_rt_gc_interval;   int ip6_rt_gc_elasticity;   int ip6_rt_mtu_expires;   int ip6_rt_min_advmss;   int flowlabel_consistency;   int icmpv6_time;   int anycast_src_echo_reply;   int fwmark_reflect; } ;    35     struct ipv6_devconf ;    35     struct rt6_info ;    35     struct rt6_statistics ;    35     struct fib6_table ;    35     struct netns_ipv6 {   struct netns_sysctl_ipv6 sysctl;   struct ipv6_devconf *devconf_all;   struct ipv6_devconf *devconf_dflt;   struct inet_peer_base *peers;   struct netns_frags frags;   struct xt_table *ip6table_filter;   struct xt_table *ip6table_mangle;   struct xt_table *ip6table_raw;   struct xt_table *ip6table_security;   struct xt_table *ip6table_nat;   struct rt6_info *ip6_null_entry;   struct rt6_statistics *rt6_stats;   struct timer_list ip6_fib_timer;   struct hlist_head *fib_table_hash;   struct fib6_table *fib6_main_tbl;   struct dst_ops ip6_dst_ops;   unsigned int ip6_rt_gc_expire;   unsigned long ip6_rt_last_gc;   struct rt6_info *ip6_prohibit_entry;   struct rt6_info *ip6_blk_hole_entry;   struct fib6_table *fib6_local_tbl;   struct fib_rules_ops *fib6_rules_ops;   struct sock **icmp_sk;   struct sock *ndisc_sk;   struct sock *tcp_sk;   struct sock *igmp_sk;   struct list_head mr6_tables;   struct fib_rules_ops *mr6_rules_ops;   atomic_t dev_addr_genid;   atomic_t rt_genid; } ;    80     struct netns_nf_frag {   struct netns_sysctl_ipv6 sysctl;   struct netns_frags frags; } ;    86     struct netns_sysctl_lowpan {   struct ctl_table_header *frags_hdr; } ;    14     struct netns_ieee802154_lowpan {   struct netns_sysctl_lowpan sysctl;   struct netns_frags frags;   u16 max_dsize; } ;    21     struct sctp_mib ;    22     struct netns_sctp {   struct sctp_mib *sctp_statistics;   struct proc_dir_entry *proc_net_sctp;   struct ctl_table_header *sysctl_header;   struct sock *ctl_sock;   struct list_head local_addr_list;   struct list_head addr_waitq;   struct timer_list addr_wq_timer;   struct list_head auto_asconf_splist;   spinlock_t addr_wq_lock;   spinlock_t local_addr_lock;   unsigned int rto_initial;   unsigned int rto_min;   unsigned int rto_max;   int rto_alpha;   int rto_beta;   int max_burst;   int cookie_preserve_enable;   char *sctp_hmac_alg;   unsigned int valid_cookie_life;   unsigned int sack_timeout;   unsigned int hb_interval;   int max_retrans_association;   int max_retrans_path;   int max_retrans_init;   int pf_retrans;   int sndbuf_policy;   int rcvbuf_policy;   int default_auto_asconf;   int addip_enable;   int addip_noauth;   int prsctp_enable;   int auth_enable;   int scope_policy;   int rwnd_upd_shift;   unsigned long max_autoclose; } ;   133     struct netns_dccp {   struct sock *v4_ctl_sk;   struct sock *v6_ctl_sk; } ;   324     struct nlattr ;   337     struct nf_logger ;   338     struct netns_nf {   struct proc_dir_entry *proc_netfilter;   const struct nf_logger *nf_loggers[13U];   struct ctl_table_header *nf_log_dir_header; } ;    17     struct ebt_table ;    18     struct netns_xt {   struct list_head tables[13U];   bool notrack_deprecated_warning;   struct ebt_table *broute_table;   struct ebt_table *frame_filter;   struct ebt_table *frame_nat;   bool ulog_warn_deprecated;   bool ebt_ulog_warn_deprecated; } ;    24     struct hlist_nulls_node ;    24     struct hlist_nulls_head {   struct hlist_nulls_node *first; } ;    20     struct hlist_nulls_node {   struct hlist_nulls_node *next;   struct hlist_nulls_node **pprev; } ;    32     struct nf_proto_net {   struct ctl_table_header *ctl_table_header;   struct ctl_table *ctl_table;   struct ctl_table_header *ctl_compat_header;   struct ctl_table *ctl_compat_table;   unsigned int users; } ;    24     struct nf_generic_net {   struct nf_proto_net pn;   unsigned int timeout; } ;    29     struct nf_tcp_net {   struct nf_proto_net pn;   unsigned int timeouts[14U];   unsigned int tcp_loose;   unsigned int tcp_be_liberal;   unsigned int tcp_max_retrans; } ;    43     struct nf_udp_net {   struct nf_proto_net pn;   unsigned int timeouts[2U]; } ;    48     struct nf_icmp_net {   struct nf_proto_net pn;   unsigned int timeout; } ;    53     struct nf_ip_net {   struct nf_generic_net generic;   struct nf_tcp_net tcp;   struct nf_udp_net udp;   struct nf_icmp_net icmp;   struct nf_icmp_net icmpv6;   struct ctl_table_header *ctl_table_header;   struct ctl_table *ctl_table; } ;    64     struct ct_pcpu {   spinlock_t lock;   struct hlist_nulls_head unconfirmed;   struct hlist_nulls_head dying;   struct hlist_nulls_head tmpl; } ;    72     struct ip_conntrack_stat ;    72     struct nf_ct_event_notifier ;    72     struct nf_exp_event_notifier ;    72     struct netns_ct {   atomic_t count;   unsigned int expect_count;   struct ctl_table_header *sysctl_header;   struct ctl_table_header *acct_sysctl_header;   struct ctl_table_header *tstamp_sysctl_header;   struct ctl_table_header *event_sysctl_header;   struct ctl_table_header *helper_sysctl_header;   char *slabname;   unsigned int sysctl_log_invalid;   unsigned int sysctl_events_retry_timeout;   int sysctl_events;   int sysctl_acct;   int sysctl_auto_assign_helper;   bool auto_assign_helper_warned;   int sysctl_tstamp;   int sysctl_checksum;   unsigned int htable_size;   seqcount_t generation;   struct kmem_cache *nf_conntrack_cachep;   struct hlist_nulls_head *hash;   struct hlist_head *expect_hash;   struct ct_pcpu *pcpu_lists;   struct ip_conntrack_stat *stat;   struct nf_ct_event_notifier *nf_conntrack_event_cb;   struct nf_exp_event_notifier *nf_expect_event_cb;   struct nf_ip_net nf_ct_proto;   unsigned int labels_used;   u8 label_words;   struct hlist_head *nat_bysource;   unsigned int nat_htable_size; } ;   111     struct nft_af_info ;   112     struct netns_nftables {   struct list_head af_info;   struct list_head commit_list;   struct nft_af_info *ipv4;   struct nft_af_info *ipv6;   struct nft_af_info *inet;   struct nft_af_info *arp;   struct nft_af_info *bridge;   u8 gencursor;   u8 genctr; } ;   499     struct flow_cache_percpu {   struct hlist_head *hash_table;   int hash_count;   u32 hash_rnd;   int hash_rnd_recalc;   struct tasklet_struct flush_tasklet; } ;    16     struct flow_cache {   u32 hash_shift;   struct flow_cache_percpu *percpu;   struct notifier_block hotcpu_notifier;   int low_watermark;   int high_watermark;   struct timer_list rnd_timer; } ;    25     struct xfrm_policy_hash {   struct hlist_head *table;   unsigned int hmask; } ;    17     struct netns_xfrm {   struct list_head state_all;   struct hlist_head *state_bydst;   struct hlist_head *state_bysrc;   struct hlist_head *state_byspi;   unsigned int state_hmask;   unsigned int state_num;   struct work_struct state_hash_work;   struct hlist_head state_gc_list;   struct work_struct state_gc_work;   struct list_head policy_all;   struct hlist_head *policy_byidx;   unsigned int policy_idx_hmask;   struct hlist_head policy_inexact[6U];   struct xfrm_policy_hash policy_bydst[6U];   unsigned int policy_count[6U];   struct work_struct policy_hash_work;   struct sock *nlsk;   struct sock *nlsk_stash;   u32 sysctl_aevent_etime;   u32 sysctl_aevent_rseqth;   int sysctl_larval_drop;   u32 sysctl_acq_expires;   struct ctl_table_header *sysctl_hdr;   struct dst_ops xfrm4_dst_ops;   struct dst_ops xfrm6_dst_ops;   spinlock_t xfrm_state_lock;   rwlock_t xfrm_policy_lock;   struct mutex xfrm_cfg_mutex;   struct flow_cache flow_cache_global;   atomic_t flow_cache_genid;   struct list_head flow_cache_gc_list;   spinlock_t flow_cache_gc_lock;   struct work_struct flow_cache_gc_work;   struct work_struct flow_cache_flush_work;   struct mutex flow_flush_sem; } ;    74     struct net_generic ;    75     struct netns_ipvs ;    76     struct net {   atomic_t passive;   atomic_t count;   spinlock_t rules_mod_lock;   struct list_head list;   struct list_head cleanup_list;   struct list_head exit_list;   struct user_namespace *user_ns;   unsigned int proc_inum;   struct proc_dir_entry *proc_net;   struct proc_dir_entry *proc_net_stat;   struct ctl_table_set sysctls;   struct sock *rtnl;   struct sock *genl_sock;   struct list_head dev_base_head;   struct hlist_head *dev_name_head;   struct hlist_head *dev_index_head;   unsigned int dev_base_seq;   int ifindex;   unsigned int dev_unreg_count;   struct list_head rules_ops;   struct net_device *loopback_dev;   struct netns_core core;   struct netns_mib mib;   struct netns_packet packet;   struct netns_unix unx;   struct netns_ipv4 ipv4;   struct netns_ipv6 ipv6;   struct netns_ieee802154_lowpan ieee802154_lowpan;   struct netns_sctp sctp;   struct netns_dccp dccp;   struct netns_nf nf;   struct netns_xt xt;   struct netns_ct ct;   struct netns_nftables nft;   struct netns_nf_frag nf_frag;   struct sock *nfnl;   struct sock *nfnl_stash;   struct sk_buff_head wext_nlevents;   struct net_generic *gen;   struct netns_xfrm xfrm;   struct netns_ipvs *ipvs;   struct sock *diag_nlsk;   atomic_t fnhe_genid; } ;   400     struct dsa_chip_data {   struct device *mii_bus;   int sw_addr;   char *port_names[12U];   s8 *rtable; } ;    46     struct dsa_platform_data {   struct device *netdev;   int nr_chips;   struct dsa_chip_data *chip; } ;    61     struct dsa_switch ;    61     struct dsa_switch_tree {   struct dsa_platform_data *pd;   struct net_device *master_netdev;   __be16 tag_protocol;   s8 cpu_switch;   s8 cpu_port;   int link_poll_needed;   struct work_struct link_poll_work;   struct timer_list link_poll_timer;   struct dsa_switch *ds[4U]; } ;    94     struct dsa_switch_driver ;    94     struct mii_bus ;    94     struct dsa_switch {   struct dsa_switch_tree *dst;   int index;   struct dsa_chip_data *pd;   struct dsa_switch_driver *drv;   struct mii_bus *master_mii_bus;   u32 dsa_port_mask;   u32 phys_port_mask;   struct mii_bus *slave_mii_bus;   struct net_device *ports[12U]; } ;   146     struct dsa_switch_driver {   struct list_head list;   __be16 tag_protocol;   int priv_size;   char * (*probe)(struct mii_bus *, int);   int (*setup)(struct dsa_switch *);   int (*set_addr)(struct dsa_switch *, u8 *);   int (*phy_read)(struct dsa_switch *, int, int);   int (*phy_write)(struct dsa_switch *, int, int, u16 );   void (*poll_link)(struct dsa_switch *);   void (*get_strings)(struct dsa_switch *, int, uint8_t *);   void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *);   int (*get_sset_count)(struct dsa_switch *); } ;   205     struct ieee_ets {   __u8 willing;   __u8 ets_cap;   __u8 cbs;   __u8 tc_tx_bw[8U];   __u8 tc_rx_bw[8U];   __u8 tc_tsa[8U];   __u8 prio_tc[8U];   __u8 tc_reco_bw[8U];   __u8 tc_reco_tsa[8U];   __u8 reco_prio_tc[8U]; } ;    69     struct ieee_maxrate {   __u64 tc_maxrate[8U]; } ;    80     struct ieee_pfc {   __u8 pfc_cap;   __u8 pfc_en;   __u8 mbc;   __u16 delay;   __u64 requests[8U];   __u64 indications[8U]; } ;   100     struct cee_pg {   __u8 willing;   __u8 error;   __u8 pg_en;   __u8 tcs_supported;   __u8 pg_bw[8U];   __u8 prio_pg[8U]; } ;   123     struct cee_pfc {   __u8 willing;   __u8 error;   __u8 pfc_en;   __u8 tcs_supported; } ;   138     struct dcb_app {   __u8 selector;   __u8 priority;   __u16 protocol; } ;   167     struct dcb_peer_app_info {   __u8 willing;   __u8 error; } ;    40     struct dcbnl_rtnl_ops {   int (*ieee_getets)(struct net_device *, struct ieee_ets *);   int (*ieee_setets)(struct net_device *, struct ieee_ets *);   int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *);   int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *);   int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *);   int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *);   int (*ieee_getapp)(struct net_device *, struct dcb_app *);   int (*ieee_setapp)(struct net_device *, struct dcb_app *);   int (*ieee_delapp)(struct net_device *, struct dcb_app *);   int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *);   int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *);   u8  (*getstate)(struct net_device *);   u8  (*setstate)(struct net_device *, u8 );   void (*getpermhwaddr)(struct net_device *, u8 *);   void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 );   void (*setpgbwgcfgtx)(struct net_device *, int, u8 );   void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 );   void (*setpgbwgcfgrx)(struct net_device *, int, u8 );   void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);   void (*getpgbwgcfgtx)(struct net_device *, int, u8 *);   void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);   void (*getpgbwgcfgrx)(struct net_device *, int, u8 *);   void (*setpfccfg)(struct net_device *, int, u8 );   void (*getpfccfg)(struct net_device *, int, u8 *);   u8  (*setall)(struct net_device *);   u8  (*getcap)(struct net_device *, int, u8 *);   int (*getnumtcs)(struct net_device *, int, u8 *);   int (*setnumtcs)(struct net_device *, int, u8 );   u8  (*getpfcstate)(struct net_device *);   void (*setpfcstate)(struct net_device *, u8 );   void (*getbcncfg)(struct net_device *, int, u32 *);   void (*setbcncfg)(struct net_device *, int, u32 );   void (*getbcnrp)(struct net_device *, int, u8 *);   void (*setbcnrp)(struct net_device *, int, u8 );   u8  (*setapp)(struct net_device *, u8 , u16 , u8 );   u8  (*getapp)(struct net_device *, u8 , u16 );   u8  (*getfeatcfg)(struct net_device *, int, u8 *);   u8  (*setfeatcfg)(struct net_device *, int, u8 );   u8  (*getdcbx)(struct net_device *);   u8  (*setdcbx)(struct net_device *, u8 );   int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *);   int (*peer_getapptable)(struct net_device *, struct dcb_app *);   int (*cee_peer_getpg)(struct net_device *, struct cee_pg *);   int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ;   102     struct taskstats {   __u16 version;   __u32 ac_exitcode;   __u8 ac_flag;   __u8 ac_nice;   __u64 cpu_count;   __u64 cpu_delay_total;   __u64 blkio_count;   __u64 blkio_delay_total;   __u64 swapin_count;   __u64 swapin_delay_total;   __u64 cpu_run_real_total;   __u64 cpu_run_virtual_total;   char ac_comm[32U];   __u8 ac_sched;   __u8 ac_pad[3U];   __u32 ac_uid;   __u32 ac_gid;   __u32 ac_pid;   __u32 ac_ppid;   __u32 ac_btime;   __u64 ac_etime;   __u64 ac_utime;   __u64 ac_stime;   __u64 ac_minflt;   __u64 ac_majflt;   __u64 coremem;   __u64 virtmem;   __u64 hiwater_rss;   __u64 hiwater_vm;   __u64 read_char;   __u64 write_char;   __u64 read_syscalls;   __u64 write_syscalls;   __u64 read_bytes;   __u64 write_bytes;   __u64 cancelled_write_bytes;   __u64 nvcsw;   __u64 nivcsw;   __u64 ac_utimescaled;   __u64 ac_stimescaled;   __u64 cpu_scaled_run_real_total;   __u64 freepages_count;   __u64 freepages_delay_total; } ;    58     struct percpu_ref ;    54     typedef void percpu_ref_func_t(struct percpu_ref *);    55     struct percpu_ref {   atomic_t count;   unsigned int *pcpu_count;   percpu_ref_func_t *release;   percpu_ref_func_t *confirm_kill;   struct callback_head rcu; } ;   205     struct cgroup_root ;   206     struct cgroup_subsys ;   207     struct cgroup ;    58     struct cgroup_subsys_state {   struct cgroup *cgroup;   struct cgroup_subsys *ss;   struct percpu_ref refcnt;   struct cgroup_subsys_state *parent;   struct list_head sibling;   struct list_head children;   int id;   unsigned int flags;   u64 serial_nr;   struct callback_head callback_head;   struct work_struct destroy_work; } ;   167     struct cgroup {   struct cgroup_subsys_state self;   unsigned long flags;   int id;   int populated_cnt;   struct kernfs_node *kn;   struct kernfs_node *populated_kn;   unsigned int child_subsys_mask;   struct cgroup_subsys_state *subsys[12U];   struct cgroup_root *root;   struct list_head cset_links;   struct list_head e_csets[12U];   struct list_head release_list;   struct list_head pidlists;   struct mutex pidlist_mutex;   wait_queue_head_t offline_waitq; } ;   253     struct cgroup_root {   struct kernfs_root *kf_root;   unsigned int subsys_mask;   int hierarchy_id;   struct cgroup cgrp;   atomic_t nr_cgrps;   struct list_head root_list;   unsigned int flags;   struct idr cgroup_idr;   char release_agent_path[4096U];   char name[64U]; } ;   355     struct css_set {   atomic_t refcount;   struct hlist_node hlist;   struct list_head tasks;   struct list_head mg_tasks;   struct list_head cgrp_links;   struct cgroup *dfl_cgrp;   struct cgroup_subsys_state *subsys[12U];   struct list_head mg_preload_node;   struct list_head mg_node;   struct cgroup *mg_src_cgrp;   struct css_set *mg_dst_cset;   struct list_head e_cset_node[12U];   struct callback_head callback_head; } ;   438     struct cftype {   char name[64U];   int private;   umode_t mode;   size_t max_write_len;   unsigned int flags;   struct cgroup_subsys *ss;   struct list_head node;   struct kernfs_ops *kf_ops;   u64  (*read_u64)(struct cgroup_subsys_state *, struct cftype *);   s64  (*read_s64)(struct cgroup_subsys_state *, struct cftype *);   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 );   int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 );   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   struct lock_class_key lockdep_key; } ;   609     struct cgroup_taskset ;   617     struct cgroup_subsys {   struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *);   int (*css_online)(struct cgroup_subsys_state *);   void (*css_offline)(struct cgroup_subsys_state *);   void (*css_free)(struct cgroup_subsys_state *);   int (*can_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *);   void (*cancel_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *);   void (*attach)(struct cgroup_subsys_state *, struct cgroup_taskset *);   void (*fork)(struct task_struct *);   void (*exit)(struct cgroup_subsys_state *, struct cgroup_subsys_state *, struct task_struct *);   void (*bind)(struct cgroup_subsys_state *);   int disabled;   int early_init;   bool broken_hierarchy;   bool warned_broken_hierarchy;   int id;   const char *name;   struct cgroup_root *root;   struct idr css_idr;   struct list_head cfts;   struct cftype *base_cftypes; } ;   919     struct netprio_map {   struct callback_head rcu;   u32 priomap_len;   u32 priomap[]; } ;  3161     struct mnt_namespace ;  3162     struct ipc_namespace ;  3163     struct nsproxy {   atomic_t count;   struct uts_namespace *uts_ns;   struct ipc_namespace *ipc_ns;   struct mnt_namespace *mnt_ns;   struct pid_namespace *pid_ns_for_children;   struct net *net_ns; } ;    41     struct nlmsghdr {   __u32 nlmsg_len;   __u16 nlmsg_type;   __u16 nlmsg_flags;   __u32 nlmsg_seq;   __u32 nlmsg_pid; } ;   145     struct nlattr {   __u16 nla_len;   __u16 nla_type; } ;   104     struct netlink_callback {   struct sk_buff *skb;   const struct nlmsghdr *nlh;   int (*dump)(struct sk_buff *, struct netlink_callback *);   int (*done)(struct netlink_callback *);   void *data;   struct module *module;   u16 family;   u16 min_dump_alloc;   unsigned int prev_seq;   unsigned int seq;   long args[6U]; } ;   180     struct ndmsg {   __u8 ndm_family;   __u8 ndm_pad1;   __u16 ndm_pad2;   __s32 ndm_ifindex;   __u16 ndm_state;   __u8 ndm_flags;   __u8 ndm_type; } ;    39     struct rtnl_link_stats64 {   __u64 rx_packets;   __u64 tx_packets;   __u64 rx_bytes;   __u64 tx_bytes;   __u64 rx_errors;   __u64 tx_errors;   __u64 rx_dropped;   __u64 tx_dropped;   __u64 multicast;   __u64 collisions;   __u64 rx_length_errors;   __u64 rx_over_errors;   __u64 rx_crc_errors;   __u64 rx_frame_errors;   __u64 rx_fifo_errors;   __u64 rx_missed_errors;   __u64 tx_aborted_errors;   __u64 tx_carrier_errors;   __u64 tx_fifo_errors;   __u64 tx_heartbeat_errors;   __u64 tx_window_errors;   __u64 rx_compressed;   __u64 tx_compressed; } ;   547     struct ifla_vf_info {   __u32 vf;   __u8 mac[32U];   __u32 vlan;   __u32 qos;   __u32 spoofchk;   __u32 linkstate;   __u32 min_tx_rate;   __u32 max_tx_rate; } ;    28     struct netpoll_info ;    29     struct phy_device ;    30     struct wireless_dev ;    61     enum netdev_tx {   __NETDEV_TX_MIN = -2147483648,   NETDEV_TX_OK = 0,   NETDEV_TX_BUSY = 16,   NETDEV_TX_LOCKED = 32 } ;   106     typedef enum netdev_tx netdev_tx_t;   125     struct net_device_stats {   unsigned long rx_packets;   unsigned long tx_packets;   unsigned long rx_bytes;   unsigned long tx_bytes;   unsigned long rx_errors;   unsigned long tx_errors;   unsigned long rx_dropped;   unsigned long tx_dropped;   unsigned long multicast;   unsigned long collisions;   unsigned long rx_length_errors;   unsigned long rx_over_errors;   unsigned long rx_crc_errors;   unsigned long rx_frame_errors;   unsigned long rx_fifo_errors;   unsigned long rx_missed_errors;   unsigned long tx_aborted_errors;   unsigned long tx_carrier_errors;   unsigned long tx_fifo_errors;   unsigned long tx_heartbeat_errors;   unsigned long tx_window_errors;   unsigned long rx_compressed;   unsigned long tx_compressed; } ;   186     struct neigh_parms ;   187     struct netdev_hw_addr {   struct list_head list;   unsigned char addr[32U];   unsigned char type;   bool global_use;   int sync_cnt;   int refcount;   int synced;   struct callback_head callback_head; } ;   207     struct netdev_hw_addr_list {   struct list_head list;   int count; } ;   212     struct hh_cache {   u16 hh_len;   u16 __pad;   seqlock_t hh_lock;   unsigned long hh_data[16U]; } ;   241     struct header_ops {   int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int);   int (*parse)(const struct sk_buff *, unsigned char *);   int (*rebuild)(struct sk_buff *);   int (*cache)(const struct neighbour *, struct hh_cache *, __be16 );   void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); } ;   292     struct napi_struct {   struct list_head poll_list;   unsigned long state;   int weight;   unsigned int gro_count;   int (*poll)(struct napi_struct *, int);   spinlock_t poll_lock;   int poll_owner;   struct net_device *dev;   struct sk_buff *gro_list;   struct sk_buff *skb;   struct list_head dev_list;   struct hlist_node napi_hash_node;   unsigned int napi_id; } ;   336     enum rx_handler_result {   RX_HANDLER_CONSUMED = 0,   RX_HANDLER_ANOTHER = 1,   RX_HANDLER_EXACT = 2,   RX_HANDLER_PASS = 3 } ;   384     typedef enum rx_handler_result rx_handler_result_t;   385     typedef rx_handler_result_t  rx_handler_func_t(struct sk_buff **);   522     struct Qdisc ;   522     struct netdev_queue {   struct net_device *dev;   struct Qdisc *qdisc;   struct Qdisc *qdisc_sleeping;   struct kobject kobj;   int numa_node;   spinlock_t _xmit_lock;   int xmit_lock_owner;   unsigned long trans_start;   unsigned long trans_timeout;   unsigned long state;   struct dql dql; } ;   591     struct rps_map {   unsigned int len;   struct callback_head rcu;   u16 cpus[0U]; } ;   603     struct rps_dev_flow {   u16 cpu;   u16 filter;   unsigned int last_qtail; } ;   615     struct rps_dev_flow_table {   unsigned int mask;   struct callback_head rcu;   struct rps_dev_flow flows[0U]; } ;   666     struct netdev_rx_queue {   struct rps_map *rps_map;   struct rps_dev_flow_table *rps_flow_table;   struct kobject kobj;   struct net_device *dev; } ;   689     struct xps_map {   unsigned int len;   unsigned int alloc_len;   struct callback_head rcu;   u16 queues[0U]; } ;   702     struct xps_dev_maps {   struct callback_head rcu;   struct xps_map *cpu_map[0U]; } ;   713     struct netdev_tc_txq {   u16 count;   u16 offset; } ;   724     struct netdev_fcoe_hbainfo {   char manufacturer[64U];   char serial_number[64U];   char hardware_version[64U];   char driver_version[64U];   char optionrom_version[64U];   char firmware_version[64U];   char model[256U];   char model_description[256U]; } ;   740     struct netdev_phys_port_id {   unsigned char id[32U];   unsigned char id_len; } ;   753     struct net_device_ops {   int (*ndo_init)(struct net_device *);   void (*ndo_uninit)(struct net_device *);   int (*ndo_open)(struct net_device *);   int (*ndo_stop)(struct net_device *);   netdev_tx_t  (*ndo_start_xmit)(struct sk_buff *, struct net_device *);   u16  (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16  (*)(struct net_device *, struct sk_buff *));   void (*ndo_change_rx_flags)(struct net_device *, int);   void (*ndo_set_rx_mode)(struct net_device *);   int (*ndo_set_mac_address)(struct net_device *, void *);   int (*ndo_validate_addr)(struct net_device *);   int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int);   int (*ndo_set_config)(struct net_device *, struct ifmap *);   int (*ndo_change_mtu)(struct net_device *, int);   int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *);   void (*ndo_tx_timeout)(struct net_device *);   struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *);   struct net_device_stats * (*ndo_get_stats)(struct net_device *);   int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 );   int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 );   void (*ndo_poll_controller)(struct net_device *);   int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *);   void (*ndo_netpoll_cleanup)(struct net_device *);   int (*ndo_busy_poll)(struct napi_struct *);   int (*ndo_set_vf_mac)(struct net_device *, int, u8 *);   int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 );   int (*ndo_set_vf_rate)(struct net_device *, int, int, int);   int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool );   int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *);   int (*ndo_set_vf_link_state)(struct net_device *, int, int);   int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **);   int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *);   int (*ndo_setup_tc)(struct net_device *, u8 );   int (*ndo_fcoe_enable)(struct net_device *);   int (*ndo_fcoe_disable)(struct net_device *);   int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int);   int (*ndo_fcoe_ddp_done)(struct net_device *, u16 );   int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int);   int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *);   int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int);   int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 );   int (*ndo_add_slave)(struct net_device *, struct net_device *);   int (*ndo_del_slave)(struct net_device *, struct net_device *);   netdev_features_t  (*ndo_fix_features)(struct net_device *, netdev_features_t );   int (*ndo_set_features)(struct net_device *, netdev_features_t );   int (*ndo_neigh_construct)(struct neighbour *);   void (*ndo_neigh_destroy)(struct neighbour *);   int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 );   int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *);   int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, int);   int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *);   int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 );   int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *);   int (*ndo_change_carrier)(struct net_device *, bool );   int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_port_id *);   void (*ndo_add_vxlan_port)(struct net_device *, sa_family_t , __be16 );   void (*ndo_del_vxlan_port)(struct net_device *, sa_family_t , __be16 );   void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *);   void (*ndo_dfwd_del_station)(struct net_device *, void *);   netdev_tx_t  (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *);   int (*ndo_get_lock_subclass)(struct net_device *); } ;  1187     struct __anonstruct_adj_list_250 {   struct list_head upper;   struct list_head lower; } ;  1187     struct __anonstruct_all_adj_list_251 {   struct list_head upper;   struct list_head lower; } ;  1187     struct iw_handler_def ;  1187     struct iw_public_data ;  1187     struct forwarding_accel_ops ;  1187     struct vlan_info ;  1187     struct tipc_bearer ;  1187     struct in_device ;  1187     struct dn_dev ;  1187     struct inet6_dev ;  1187     struct cpu_rmap ;  1187     struct pcpu_lstats ;  1187     struct pcpu_sw_netstats ;  1187     struct pcpu_dstats ;  1187     struct pcpu_vstats ;  1187     union __anonunion_ldv_42047_252 {   void *ml_priv;   struct pcpu_lstats *lstats;   struct pcpu_sw_netstats *tstats;   struct pcpu_dstats *dstats;   struct pcpu_vstats *vstats; } ;  1187     struct garp_port ;  1187     struct mrp_port ;  1187     struct rtnl_link_ops ;  1187     struct net_device {   char name[16U];   struct hlist_node name_hlist;   char *ifalias;   unsigned long mem_end;   unsigned long mem_start;   unsigned long base_addr;   int irq;   unsigned long state;   struct list_head dev_list;   struct list_head napi_list;   struct list_head unreg_list;   struct list_head close_list;   struct __anonstruct_adj_list_250 adj_list;   struct __anonstruct_all_adj_list_251 all_adj_list;   netdev_features_t features;   netdev_features_t hw_features;   netdev_features_t wanted_features;   netdev_features_t vlan_features;   netdev_features_t hw_enc_features;   netdev_features_t mpls_features;   int ifindex;   int iflink;   struct net_device_stats stats;   atomic_long_t rx_dropped;   atomic_long_t tx_dropped;   atomic_t carrier_changes;   const struct iw_handler_def *wireless_handlers;   struct iw_public_data *wireless_data;   const struct net_device_ops *netdev_ops;   const struct ethtool_ops *ethtool_ops;   const struct forwarding_accel_ops *fwd_ops;   const struct header_ops *header_ops;   unsigned int flags;   unsigned int priv_flags;   unsigned short gflags;   unsigned short padded;   unsigned char operstate;   unsigned char link_mode;   unsigned char if_port;   unsigned char dma;   unsigned int mtu;   unsigned short type;   unsigned short hard_header_len;   unsigned short needed_headroom;   unsigned short needed_tailroom;   unsigned char perm_addr[32U];   unsigned char addr_assign_type;   unsigned char addr_len;   unsigned short neigh_priv_len;   unsigned short dev_id;   unsigned short dev_port;   spinlock_t addr_list_lock;   struct netdev_hw_addr_list uc;   struct netdev_hw_addr_list mc;   struct netdev_hw_addr_list dev_addrs;   struct kset *queues_kset;   bool uc_promisc;   unsigned int promiscuity;   unsigned int allmulti;   struct vlan_info *vlan_info;   struct dsa_switch_tree *dsa_ptr;   struct tipc_bearer *tipc_ptr;   void *atalk_ptr;   struct in_device *ip_ptr;   struct dn_dev *dn_ptr;   struct inet6_dev *ip6_ptr;   void *ax25_ptr;   struct wireless_dev *ieee80211_ptr;   unsigned long last_rx;   unsigned char *dev_addr;   struct netdev_rx_queue *_rx;   unsigned int num_rx_queues;   unsigned int real_num_rx_queues;   rx_handler_func_t *rx_handler;   void *rx_handler_data;   struct netdev_queue *ingress_queue;   unsigned char broadcast[32U];   struct netdev_queue *_tx;   unsigned int num_tx_queues;   unsigned int real_num_tx_queues;   struct Qdisc *qdisc;   unsigned long tx_queue_len;   spinlock_t tx_global_lock;   struct xps_dev_maps *xps_maps;   struct cpu_rmap *rx_cpu_rmap;   unsigned long trans_start;   int watchdog_timeo;   struct timer_list watchdog_timer;   int *pcpu_refcnt;   struct list_head todo_list;   struct hlist_node index_hlist;   struct list_head link_watch_list;   unsigned char reg_state;   bool dismantle;   unsigned short rtnl_link_state;   void (*destructor)(struct net_device *);   struct netpoll_info *npinfo;   struct net *nd_net;   union __anonunion_ldv_42047_252 ldv_42047;   struct garp_port *garp_port;   struct mrp_port *mrp_port;   struct device dev;   const struct attribute_group *sysfs_groups[4U];   const struct attribute_group *sysfs_rx_queue_group;   const struct rtnl_link_ops *rtnl_link_ops;   unsigned int gso_max_size;   u16 gso_max_segs;   const struct dcbnl_rtnl_ops *dcbnl_ops;   u8 num_tc;   struct netdev_tc_txq tc_to_txq[16U];   u8 prio_tc_map[16U];   unsigned int fcoe_ddp_xid;   struct netprio_map *priomap;   struct phy_device *phydev;   struct lock_class_key *qdisc_tx_busylock;   int group;   struct pm_qos_request pm_qos_req; } ;  1806     struct pcpu_sw_netstats {   u64 rx_packets;   u64 rx_bytes;   u64 tx_packets;   u64 tx_bytes;   struct u64_stats_sync syncp; } ;  2548     enum skb_free_reason {   SKB_REASON_CONSUMED = 0,   SKB_REASON_DROPPED = 1 } ;    55     struct firmware {   size_t size;   const u8 *data;   struct page **pages;   void *priv; } ;    73     struct basic_ring {   u8 *ringBase;   u32 lastWrite; } ;    35     struct transmit_ring {   u8 *ringBase;   u32 lastWrite;   u32 lastRead;   int writeRegister; } ;    46     struct typhoon_indexes {   volatile __le32 rxHiCleared;   volatile __le32 rxLoCleared;   volatile __le32 rxBuffReady;   volatile __le32 respCleared;   volatile __le32 txLoCleared;   volatile __le32 txHiCleared;   volatile __le32 rxLoReady;   volatile __le32 rxBuffCleared;   volatile __le32 cmdCleared;   volatile __le32 respReady;   volatile __le32 rxHiReady; } ;    81     struct typhoon_interface {   __le32 ringIndex;   __le32 ringIndexHi;   __le32 txLoAddr;   __le32 txLoAddrHi;   __le32 txLoSize;   __le32 txHiAddr;   __le32 txHiAddrHi;   __le32 txHiSize;   __le32 rxLoAddr;   __le32 rxLoAddrHi;   __le32 rxLoSize;   __le32 rxBuffAddr;   __le32 rxBuffAddrHi;   __le32 rxBuffSize;   __le32 cmdAddr;   __le32 cmdAddrHi;   __le32 cmdSize;   __le32 respAddr;   __le32 respAddrHi;   __le32 respSize;   __le32 zeroAddr;   __le32 zeroAddrHi;   __le32 rxHiAddr;   __le32 rxHiAddrHi;   __le32 rxHiSize; } ;   129     struct __anonstruct_frag_258 {   __le32 addr;   __le32 addrHi; } ;   129     union __anonunion_ldv_44235_257 {   struct __anonstruct_frag_258 frag;   u64 tx_addr; } ;   129     struct tx_desc {   u8 flags;   u8 numDesc;   __le16 len;   union __anonunion_ldv_44235_257 ldv_44235;   __le32 processFlags; } ;   178     struct tcpopt_desc {   u8 flags;   u8 numDesc;   __le16 mss_flags;   __le32 respAddrLo;   __le32 bytesTx;   __le32 status; } ;   231     struct rx_desc {   u8 flags;   u8 numDesc;   __le16 frameLen;   u32 addr;   u32 addrHi;   __le32 rxStatus;   __le16 filterResults;   __le16 ipsecResults;   __be32 vlanTag; } ;   288     struct rx_free {   __le32 physAddr;   __le32 physAddrHi;   u32 virtAddr;   u32 virtAddrHi; } ;   305     struct cmd_desc {   u8 flags;   u8 numDesc;   __le16 cmd;   u16 seqNo;   __le16 parm1;   __le32 parm2;   __le32 parm3; } ;   351     struct resp_desc {   u8 flags;   u8 numDesc;   __le16 cmd;   __le16 seqNo;   __le16 parm1;   __le32 parm2;   __le32 parm3; } ;   363     struct stats_resp {   u8 flags;   u8 numDesc;   __le16 cmd;   __le16 seqNo;   __le16 unused;   __le32 txPackets;   __le64 txBytes;   __le32 txDeferred;   __le32 txLateCollisions;   __le32 txCollisions;   __le32 txCarrierLost;   __le32 txMultipleCollisions;   __le32 txExcessiveCollisions;   __le32 txFifoUnderruns;   __le32 txMulticastTxOverflows;   __le32 txFiltered;   __le32 rxPacketsGood;   __le64 rxBytesGood;   __le32 rxFifoOverruns;   __le32 BadSSD;   __le32 rxCrcErrors;   __le32 rxOversized;   __le32 rxBroadcast;   __le32 rxMulticast;   __le32 rxOverflow;   __le32 rxFiltered;   __le32 linkStatus;   __le32 unused2;   __le32 unused3; } ;   492     struct typhoon_file_header {   u8 tag[8U];   __le32 version;   __le32 numSections;   __le32 startAddr;   __le32 hmacDigest[5U]; } ;   522     struct typhoon_section_header {   __le32 len;   u16 checksum;   u16 reserved;   __le32 startAddr; } ;   249     struct typhoon_card_info {   const char *name;   const int capabilities; } ;   344     struct typhoon_shared {   struct typhoon_interface iface;   struct typhoon_indexes indexes;   struct tx_desc txLo[128U];   struct rx_desc rxLo[32U];   struct rx_desc rxHi[32U];   struct cmd_desc cmd[16U];   struct resp_desc resp[32U];   struct rx_free rxBuff[128U];   u32 zeroWord;   struct tx_desc txHi[2U]; } ;   362     struct rxbuff_ent {   struct sk_buff *skb;   dma_addr_t dma_addr; } ;   367     struct typhoon {   struct transmit_ring txLoRing;   struct pci_dev *tx_pdev;   void *tx_ioaddr;   u32 txlo_dma_addr;   void *ioaddr;   struct typhoon_indexes *indexes;   u8 awaiting_resp;   u8 duplex;   u8 speed;   u8 card_state;   struct basic_ring rxLoRing;   struct pci_dev *pdev;   struct net_device *dev;   struct napi_struct napi;   struct basic_ring rxHiRing;   struct basic_ring rxBuffRing;   struct rxbuff_ent rxbuffers[127U];   spinlock_t command_lock;   struct basic_ring cmdRing;   struct basic_ring respRing;   struct net_device_stats stats;   struct net_device_stats stats_saved;   struct typhoon_shared *shared;   dma_addr_t shared_dma;   __le16 xcvr_select;   __le16 wol_events;   __le32 offload;   int capabilities;   struct transmit_ring txHiRing; } ;  5467     typedef int ldv_func_ret_type___0;     1     void * __builtin_memcpy(void *, const void *, unsigned long);     1     long int __builtin_expect(long exp, long c);    33     extern struct module __this_module;   358     extern struct pv_irq_ops pv_irq_ops;    72     void set_bit(long nr, volatile unsigned long *addr);   110     void clear_bit(long nr, volatile unsigned long *addr);   204     int test_and_set_bit(long nr, volatile unsigned long *addr);   250     int test_and_clear_bit(long nr, volatile unsigned long *addr);   308     int constant_test_bit(long nr, const volatile unsigned long *addr);     7     __u32  __arch_swab32(__u32 val);    46     __u16  __fswab16(__u16 val);    57     __u32  __fswab32(__u32 val);   139     int printk(const char *, ...);   165     void __might_sleep(const char *, int, int);   391     int snprintf(char *, size_t , const char *, ...);    71     void warn_slowpath_null(const char *, const int);    23     unsigned long int __phys_addr(unsigned long);    34     void * __memcpy(void *, const void *, size_t );    55     void * memset(void *, int, size_t );    60     int memcmp(const void *, const void *, size_t );    26     size_t  strlcpy(char *, const char *, size_t );   802     unsigned long int arch_local_save_flags();   155     int arch_irqs_disabled_flags(unsigned long flags);    93     void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);    22     void _raw_spin_lock(raw_spinlock_t *);    37     int _raw_spin_trylock(raw_spinlock_t *);    39     void _raw_spin_unlock(raw_spinlock_t *);   290     raw_spinlock_t * spinlock_check(spinlock_t *lock);   301     void spin_lock(spinlock_t *lock);   365     void ldv_spin_lock_55(spinlock_t *lock);   387     int spin_trylock(spinlock_t *lock);   333     int ldv_spin_trylock_53(spinlock_t *lock);   349     void spin_unlock(spinlock_t *lock);   409     void ldv_spin_unlock_54(spinlock_t *lock);     5     void __ldv_spin_lock(spinlock_t *);     8     void ldv___ldv_spin_lock_7(spinlock_t *ldv_func_arg1);    12     void ldv___ldv_spin_lock_15(spinlock_t *ldv_func_arg1);    16     void ldv___ldv_spin_lock_17(spinlock_t *ldv_func_arg1);    20     void ldv___ldv_spin_lock_19(spinlock_t *ldv_func_arg1);    24     void ldv___ldv_spin_lock_21(spinlock_t *ldv_func_arg1);    28     void ldv___ldv_spin_lock_49(spinlock_t *ldv_func_arg1);    44     void ldv_spin_lock_addr_list_lock_of_net_device();    60     void ldv_spin_lock_command_lock_of_typhoon();    61     void ldv_spin_unlock_command_lock_of_typhoon();    62     int ldv_spin_trylock_command_lock_of_typhoon();    76     void ldv_spin_lock_lock();    84     void ldv_spin_lock_lock_of_NOT_ARG_SIGN();   100     void ldv_spin_lock_node_size_lock_of_pglist_data();   116     void ldv_spin_lock_siglock_of_sighand_struct();   155     int rx_copybreak = 200;   162     unsigned int use_mmio = 2U;   168     const int multicast_filter_limit = 32;    31     unsigned int ioread32(void *);    37     void iowrite32(u32 , void *);    72     void pci_iounmap(struct pci_dev *, void *);    17     void * pci_iomap(struct pci_dev *, int, unsigned long);    86     const char * kobject_name(const struct kobject *kobj);   380     long int schedule_timeout_uninterruptible(long);   123     int request_threaded_irq(unsigned int, irqreturn_t  (*)(int, void *), irqreturn_t  (*)(int, void *), unsigned long, const char *, void *);   128     int request_irq(unsigned int irq, irqreturn_t  (*handler)(int, void *), unsigned long flags, const char *name, void *dev);   142     void free_irq(unsigned int, void *);   806     const char * dev_name(const struct device *dev);   837     void * dev_get_drvdata(const struct device *dev);   842     void dev_set_drvdata(struct device *dev, void *data);   924     int pci_enable_device(struct pci_dev *);   941     void pci_disable_device(struct pci_dev *);   944     void pci_set_master(struct pci_dev *);   950     int pci_set_mwi(struct pci_dev *);   952     void pci_clear_mwi(struct pci_dev *);   997     int pci_save_state(struct pci_dev *);   998     void pci_restore_state(struct pci_dev *);  1009     int pci_set_power_state(struct pci_dev *, pci_power_t );  1010     pci_power_t  pci_choose_state(struct pci_dev *, pm_message_t );  1013     int __pci_enable_wake(struct pci_dev *, pci_power_t , bool , bool );  1022     int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);  1062     int pci_request_regions(struct pci_dev *, const char *);  1064     void pci_release_regions(struct pci_dev *);  1107     int __pci_register_driver(struct pci_driver *, struct module *, const char *);  1116     void pci_unregister_driver(struct pci_driver *);   912     void * lowmem_page_address(const struct page *page);    69     int valid_dma_direction(int dma_direction);    76     int is_device_dma_capable(struct device *dev);   131     void kmemcheck_mark_initialized(void *address, unsigned int n);    37     void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool );    44     void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool );    53     void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *);    56     void debug_dma_free_coherent(struct device *, size_t , void *, dma_addr_t );    59     void debug_dma_sync_single_for_cpu(struct device *, dma_addr_t , size_t , int);    63     void debug_dma_sync_single_for_device(struct device *, dma_addr_t , size_t , int);    27     extern struct device x86_dma_fallback_dev;    30     extern struct dma_map_ops *dma_ops;    32     struct dma_map_ops * get_dma_ops(struct device *dev);    10     dma_addr_t  dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);    29     void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);    97     void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);   109     void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);    61     int dma_set_mask(struct device *, u64 );   103     unsigned long int dma_alloc_coherent_mask(struct device *dev, gfp_t gfp);   115     gfp_t  dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp);   131     void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs);   160     void dma_free_attrs(struct device *dev, size_t size, void *vaddr, dma_addr_t bus, struct dma_attrs *attrs);    16     void * pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);    23     void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);    30     dma_addr_t  pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);    36     void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);    71     void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);    78     void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);   105     int pci_set_dma_mask(struct pci_dev *dev, u64 mask);  1426     void * pci_get_drvdata(struct pci_dev *pdev);  1431     void pci_set_drvdata(struct pci_dev *pdev, void *data);  1439     const char * pci_name(const struct pci_dev *pdev);    10     void __const_udelay(unsigned long);    46     void msleep(unsigned int);    22     __sum16  csum_fold(__wsum sum);   145     __wsum  csum_partial_copy_nocheck(const void *, void *, int, __wsum );   188     unsigned int skb_frag_size(const skb_frag_t *frag);   717     void consume_skb(struct sk_buff *);   869     unsigned char * skb_end_pointer(const struct sk_buff *skb);  1445     unsigned int skb_headlen(const struct sk_buff *skb);  1565     unsigned char * skb_put(struct sk_buff *, unsigned int);  1666     void skb_reserve(struct sk_buff *skb, int len);  2016     struct sk_buff * __netdev_alloc_skb(struct net_device *, unsigned int, gfp_t );  2032     struct sk_buff * netdev_alloc_skb(struct net_device *dev, unsigned int length);  2129     struct page * skb_frag_page(const skb_frag_t *frag);  2187     void * skb_frag_address(const skb_frag_t *frag);  2609     void skb_copy_to_linear_data(struct sk_buff *skb, const void *from, const unsigned int len);  3076     bool  skb_is_gso(const struct sk_buff *skb);  3118     void skb_checksum_none_assert(const struct sk_buff *skb);   113     void ethtool_cmd_speed_set(struct ethtool_cmd *ep, __u32 speed);   121     __u32  ethtool_cmd_speed(const struct ethtool_cmd *ep);    65     u32  ethtool_op_get_link(struct net_device *);   387     void __napi_schedule(struct napi_struct *);   389     bool  napi_disable_pending(struct napi_struct *n);   403     bool  napi_schedule_prep(struct napi_struct *n);   439     void napi_complete(struct napi_struct *);   474     void napi_disable(struct napi_struct *n);   490     void napi_enable(struct napi_struct *n);  1621     struct netdev_queue * netdev_get_tx_queue(const struct net_device *dev, unsigned int index);  1687     void * netdev_priv(const struct net_device *dev);  1718     void netif_napi_add(struct net_device *, struct napi_struct *, int (*)(struct napi_struct *, int), int);  1975     void free_netdev(struct net_device *);  2140     void __netif_schedule(struct Qdisc *);  2156     void netif_tx_start_queue(struct netdev_queue *dev_queue);  2167     void netif_start_queue(struct net_device *dev);  2182     void netif_tx_wake_queue(struct netdev_queue *dev_queue);  2195     void netif_wake_queue(struct net_device *dev);  2210     void netif_tx_stop_queue(struct netdev_queue *dev_queue);  2226     void netif_stop_queue(struct net_device *dev);  2241     bool  netif_tx_queue_stopped(const struct netdev_queue *dev_queue);  2252     bool  netif_queue_stopped(const struct net_device *dev);  2398     bool  netif_running(const struct net_device *dev);  2554     void __dev_kfree_skb_irq(struct sk_buff *, enum skb_free_reason );  2555     void __dev_kfree_skb_any(struct sk_buff *, enum skb_free_reason );  2576     void dev_kfree_skb_irq(struct sk_buff *skb);  2586     void dev_kfree_skb_any(struct sk_buff *skb);  2598     int netif_receive_skb(struct sk_buff *);  2695     void netif_carrier_on(struct net_device *);  2697     void netif_carrier_off(struct net_device *);  2765     void netif_device_detach(struct net_device *);  2767     void netif_device_attach(struct net_device *);  2998     int register_netdev(struct net_device *);  2999     void unregister_netdev(struct net_device *);  3395     int netdev_err(const struct net_device *, const char *, ...);  3397     int netdev_warn(const struct net_device *, const char *, ...);  3401     int netdev_info(const struct net_device *, const char *, ...);    32     __be16  eth_type_trans(struct sk_buff *, struct net_device *);    45     int eth_mac_addr(struct net_device *, void *);    46     int eth_change_mtu(struct net_device *, int);    47     int eth_validate_addr(struct net_device *);    49     struct net_device * alloc_etherdev_mqs(int, unsigned int, unsigned int);    89     bool  is_zero_ether_addr(const u8 *addr);   107     bool  is_multicast_ether_addr(const u8 *addr);   160     bool  is_valid_ether_addr(const u8 *addr);   356     struct sk_buff * __vlan_hwaccel_put_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);    14     u32  bitrev32(u32 );    11     u32  crc32_le(u32 , const unsigned char *, size_t );    42     int request_firmware(const struct firmware **, const char *, struct device *);    49     void release_firmware(const struct firmware *);   279     struct typhoon_card_info typhoon_card_info[13U] = { { "3Com Typhoon (3C990-TX)", 0 }, { "3Com Typhoon (3CR990-TX-95)", 1 }, { "3Com Typhoon (3CR990-TX-97)", 3 }, { "3Com Typhoon (3C990SVR)", 0 }, { "3Com Typhoon (3CR990SVR95)", 1 }, { "3Com Typhoon (3CR990SVR97)", 3 }, { "3Com Typhoon2 (3C990B-TX-M)", 4 }, { "3Com Typhoon2 (3C990BSVR)", 4 }, { "3Com Typhoon (3CR990-FX-95)", 9 }, { "3Com Typhoon (3CR990-FX-97)", 11 }, { "3Com Typhoon (3CR990-FX-95 Server)", 9 }, { "3Com Typhoon (3CR990-FX-97 Server)", 11 }, { "3Com Typhoon2 (3C990B-FX-97)", 12 } };   314     const struct pci_device_id typhoon_pci_tbl[14U] = { { 4279U, 39168U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 4279U, 39170U, 4294967295U, 4294967295U, 0U, 0U, 1UL }, { 4279U, 39171U, 4294967295U, 4294967295U, 0U, 0U, 2UL }, { 4279U, 39172U, 4294967295U, 4096U, 0U, 0U, 6UL }, { 4279U, 39172U, 4294967295U, 4354U, 0U, 0U, 12UL }, { 4279U, 39172U, 4294967295U, 8192U, 0U, 0U, 7UL }, { 4279U, 39173U, 4294967295U, 4353U, 0U, 0U, 8UL }, { 4279U, 39173U, 4294967295U, 4354U, 0U, 0U, 9UL }, { 4279U, 39173U, 4294967295U, 8449U, 0U, 0U, 10UL }, { 4279U, 39173U, 4294967295U, 8450U, 0U, 0U, 11UL }, { 4279U, 39176U, 4294967295U, 4294967295U, 0U, 0U, 4UL }, { 4279U, 39177U, 4294967295U, 4294967295U, 0U, 0U, 5UL }, { 4279U, 39178U, 4294967295U, 4294967295U, 0U, 0U, 3UL }, { 0U, 0U, 0U, 0U, 0U, 0U, 0UL } };   343     const struct pci_device_id __mod_pci__typhoon_pci_tbl_device_table = {  };   444     void typhoon_inc_index(u32 *index, const int count, const int num_entries);   455     void typhoon_inc_cmd_index(u32 *index, const int count);   461     void typhoon_inc_resp_index(u32 *index, const int count);   467     void typhoon_inc_rxfree_index(u32 *index, const int count);   480     void typhoon_inc_rx_index(u32 *index, const int count);   488     int typhoon_reset(void *ioaddr, int wait_type);   543     int typhoon_wait_status(void *ioaddr, u32 wait_value);   560     void typhoon_media_status(struct net_device *dev, struct resp_desc *resp);   569     void typhoon_hello(struct typhoon *tp);   590     int typhoon_process_response(struct typhoon *tp, int resp_size, struct resp_desc *resp_save);   649     int typhoon_num_free(int lastWrite, int lastRead, int ringSize);   660     int typhoon_num_free_cmd(struct typhoon *tp);   669     int typhoon_num_free_resp(struct typhoon *tp);   678     int typhoon_num_free_tx(struct transmit_ring *ring);   685     int typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd, int num_resp, struct resp_desc *resp);   800     void typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing, u32 ring_dma);   821     netdev_tx_t  typhoon_start_tx(struct sk_buff *skb, struct net_device *dev);   968     void typhoon_set_rx_mode(struct net_device *dev);  1007     int typhoon_do_get_stats(struct typhoon *tp);  1059     struct net_device_stats * typhoon_get_stats(struct net_device *dev);  1078     void typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);  1107     int typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);  1165     int typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);  1215     void typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);  1229     int typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol);  1246     void typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering);  1255     const struct ethtool_ops typhoon_ethtool_ops = { &typhoon_get_settings, &typhoon_set_settings, &typhoon_get_drvinfo, 0, 0, &typhoon_get_wol, &typhoon_set_wol, 0, 0, 0, ðtool_op_get_link, 0, 0, 0, 0, 0, &typhoon_get_ringparam, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };  1266     int typhoon_wait_interrupt(void *ioaddr);  1287     void typhoon_init_interface(struct typhoon *tp);  1357     void typhoon_init_rings(struct typhoon *tp);  1373     const struct firmware *typhoon_fw = 0;  1376     int typhoon_request_firmware(struct typhoon *tp);  1434     int typhoon_download_firmware(struct typhoon *tp);  1575     int typhoon_boot_3XP(struct typhoon *tp, u32 initial_status);  1611     u32  typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing, volatile __le32 *index);  1647     void typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing, volatile __le32 *index);  1664     void typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx);  1691     int typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx);  1737     int typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 *ready, volatile __le32 *cleared, int budget);  1814     void typhoon_fill_free_ring(struct typhoon *tp);  1828     int typhoon_poll(struct napi_struct *napi, int budget);  1869     irqreturn_t  typhoon_interrupt(int irq, void *dev_instance);  1893     void typhoon_free_rx_rings(struct typhoon *tp);  1909     int typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events);  1946     int typhoon_wakeup(struct typhoon *tp, int wait_type);  1967     int typhoon_start_runtime(struct typhoon *tp);  2059     int typhoon_stop_runtime(struct typhoon *tp, int wait_type);  2120     void typhoon_tx_timeout(struct net_device *dev);  2148     int typhoon_open(struct net_device *dev);  2197     int typhoon_close(struct net_device *dev);  2224     int typhoon_resume(struct pci_dev *pdev);  2253     int typhoon_suspend(struct pci_dev *pdev, pm_message_t state);  2312     int typhoon_test_mmio(struct pci_dev *pdev);  2357     const struct net_device_ops typhoon_netdev_ops = { 0, 0, &typhoon_open, &typhoon_close, &typhoon_start_tx, 0, 0, &typhoon_set_rx_mode, ð_mac_addr, ð_validate_addr, 0, 0, ð_change_mtu, 0, &typhoon_tx_timeout, 0, &typhoon_get_stats, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };  2370     int typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);  2621     void typhoon_remove_one(struct pci_dev *pdev);  2639     struct pci_driver typhoon_driver = { { 0, 0 }, "typhoon", (const struct pci_device_id *)(&typhoon_pci_tbl), &typhoon_init_one, &typhoon_remove_one, &typhoon_suspend, 0, 0, &typhoon_resume, 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { { { { { { 0U } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 } } };  2651     int typhoon_init();  2657     void typhoon_cleanup();  2682     void ldv_check_final_state();  2685     void ldv_check_return_value(int);  2688     void ldv_check_return_value_probe(int);  2691     void ldv_initialize();  2694     void ldv_handler_precall();  2697     int nondet_int();  2700     int LDV_IN_INTERRUPT = 0;  2703     void ldv_main0_sequence_infinite_withcheck_stateful();    10     void ldv_error();    25     int ldv_undef_int();    59     void __builtin_trap();     8     int ldv_spin__xmit_lock_of_netdev_queue = 0;    11     void ldv_spin_lock__xmit_lock_of_netdev_queue();    20     void ldv_spin_unlock__xmit_lock_of_netdev_queue();    29     int ldv_spin_trylock__xmit_lock_of_netdev_queue();    55     void ldv_spin_unlock_wait__xmit_lock_of_netdev_queue();    62     int ldv_spin_is_locked__xmit_lock_of_netdev_queue();    83     int ldv_spin_can_lock__xmit_lock_of_netdev_queue();    90     int ldv_spin_is_contended__xmit_lock_of_netdev_queue();   111     int ldv_atomic_dec_and_lock__xmit_lock_of_netdev_queue();   133     int ldv_spin_addr_list_lock_of_net_device = 0;   145     void ldv_spin_unlock_addr_list_lock_of_net_device();   154     int ldv_spin_trylock_addr_list_lock_of_net_device();   180     void ldv_spin_unlock_wait_addr_list_lock_of_net_device();   187     int ldv_spin_is_locked_addr_list_lock_of_net_device();   208     int ldv_spin_can_lock_addr_list_lock_of_net_device();   215     int ldv_spin_is_contended_addr_list_lock_of_net_device();   236     int ldv_atomic_dec_and_lock_addr_list_lock_of_net_device();   258     int ldv_spin_alloc_lock_of_task_struct = 0;   261     void ldv_spin_lock_alloc_lock_of_task_struct();   270     void ldv_spin_unlock_alloc_lock_of_task_struct();   279     int ldv_spin_trylock_alloc_lock_of_task_struct();   305     void ldv_spin_unlock_wait_alloc_lock_of_task_struct();   312     int ldv_spin_is_locked_alloc_lock_of_task_struct();   333     int ldv_spin_can_lock_alloc_lock_of_task_struct();   340     int ldv_spin_is_contended_alloc_lock_of_task_struct();   361     int ldv_atomic_dec_and_lock_alloc_lock_of_task_struct();   383     int ldv_spin_command_lock_of_typhoon = 0;   430     void ldv_spin_unlock_wait_command_lock_of_typhoon();   437     int ldv_spin_is_locked_command_lock_of_typhoon();   458     int ldv_spin_can_lock_command_lock_of_typhoon();   465     int ldv_spin_is_contended_command_lock_of_typhoon();   486     int ldv_atomic_dec_and_lock_command_lock_of_typhoon();   508     int ldv_spin_i_lock_of_inode = 0;   511     void ldv_spin_lock_i_lock_of_inode();   520     void ldv_spin_unlock_i_lock_of_inode();   529     int ldv_spin_trylock_i_lock_of_inode();   555     void ldv_spin_unlock_wait_i_lock_of_inode();   562     int ldv_spin_is_locked_i_lock_of_inode();   583     int ldv_spin_can_lock_i_lock_of_inode();   590     int ldv_spin_is_contended_i_lock_of_inode();   611     int ldv_atomic_dec_and_lock_i_lock_of_inode();   633     int ldv_spin_lock = 0;   645     void ldv_spin_unlock_lock();   654     int ldv_spin_trylock_lock();   680     void ldv_spin_unlock_wait_lock();   687     int ldv_spin_is_locked_lock();   708     int ldv_spin_can_lock_lock();   715     int ldv_spin_is_contended_lock();   736     int ldv_atomic_dec_and_lock_lock();   758     int ldv_spin_lock_of_NOT_ARG_SIGN = 0;   770     void ldv_spin_unlock_lock_of_NOT_ARG_SIGN();   779     int ldv_spin_trylock_lock_of_NOT_ARG_SIGN();   805     void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN();   812     int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN();   833     int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN();   840     int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN();   861     int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN();   883     int ldv_spin_lru_lock_of_netns_frags = 0;   886     void ldv_spin_lock_lru_lock_of_netns_frags();   895     void ldv_spin_unlock_lru_lock_of_netns_frags();   904     int ldv_spin_trylock_lru_lock_of_netns_frags();   930     void ldv_spin_unlock_wait_lru_lock_of_netns_frags();   937     int ldv_spin_is_locked_lru_lock_of_netns_frags();   958     int ldv_spin_can_lock_lru_lock_of_netns_frags();   965     int ldv_spin_is_contended_lru_lock_of_netns_frags();   986     int ldv_atomic_dec_and_lock_lru_lock_of_netns_frags();  1008     int ldv_spin_node_size_lock_of_pglist_data = 0;  1020     void ldv_spin_unlock_node_size_lock_of_pglist_data();  1029     int ldv_spin_trylock_node_size_lock_of_pglist_data();  1055     void ldv_spin_unlock_wait_node_size_lock_of_pglist_data();  1062     int ldv_spin_is_locked_node_size_lock_of_pglist_data();  1083     int ldv_spin_can_lock_node_size_lock_of_pglist_data();  1090     int ldv_spin_is_contended_node_size_lock_of_pglist_data();  1111     int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data();  1133     int ldv_spin_ptl = 0;  1136     void ldv_spin_lock_ptl();  1145     void ldv_spin_unlock_ptl();  1154     int ldv_spin_trylock_ptl();  1180     void ldv_spin_unlock_wait_ptl();  1187     int ldv_spin_is_locked_ptl();  1208     int ldv_spin_can_lock_ptl();  1215     int ldv_spin_is_contended_ptl();  1236     int ldv_atomic_dec_and_lock_ptl();  1258     int ldv_spin_siglock_of_sighand_struct = 0;  1270     void ldv_spin_unlock_siglock_of_sighand_struct();  1279     int ldv_spin_trylock_siglock_of_sighand_struct();  1305     void ldv_spin_unlock_wait_siglock_of_sighand_struct();  1312     int ldv_spin_is_locked_siglock_of_sighand_struct();  1333     int ldv_spin_can_lock_siglock_of_sighand_struct();  1340     int ldv_spin_is_contended_siglock_of_sighand_struct();  1361     int ldv_atomic_dec_and_lock_siglock_of_sighand_struct();  1383     int ldv_spin_tx_global_lock_of_net_device = 0;  1386     void ldv_spin_lock_tx_global_lock_of_net_device();  1395     void ldv_spin_unlock_tx_global_lock_of_net_device();  1404     int ldv_spin_trylock_tx_global_lock_of_net_device();  1430     void ldv_spin_unlock_wait_tx_global_lock_of_net_device();  1437     int ldv_spin_is_locked_tx_global_lock_of_net_device();  1458     int ldv_spin_can_lock_tx_global_lock_of_net_device();  1465     int ldv_spin_is_contended_tx_global_lock_of_net_device();  1486     int ldv_atomic_dec_and_lock_tx_global_lock_of_net_device();           return ;         }        {      2705     struct net_device *var_group1;  2706     struct ethtool_cmd *var_group2;  2707     struct ethtool_drvinfo *var_group3;  2708     struct ethtool_wolinfo *var_group4;  2709     struct ethtool_ringparam *var_group5;  2710     int res_typhoon_open_47;  2711     int res_typhoon_close_48;  2712     struct sk_buff *var_group6;  2713     struct pci_dev *var_group7;  2714     const struct pci_device_id *var_typhoon_init_one_52_p1;  2715     int res_typhoon_init_one_52;  2716     pm_message_t var_typhoon_suspend_50_p1;  2717     int var_typhoon_interrupt_40_p0;  2718     void *var_typhoon_interrupt_40_p1;  2719     int ldv_s_typhoon_netdev_ops_net_device_ops;  2720     int ldv_s_typhoon_driver_pci_driver;  2721     int tmp;  2722     int tmp___0;  2723     int tmp___1;  3757     ldv_s_typhoon_netdev_ops_net_device_ops = 0;  3760     ldv_s_typhoon_driver_pci_driver = 0;  3686     LDV_IN_INTERRUPT = 1;           {  1513       ldv_spin__xmit_lock_of_netdev_queue = 1;  1515       ldv_spin_addr_list_lock_of_net_device = 1;  1517       ldv_spin_alloc_lock_of_task_struct = 1;  1519       ldv_spin_command_lock_of_typhoon = 1;  1521       ldv_spin_i_lock_of_inode = 1;  1523       ldv_spin_lock = 1;  1525       ldv_spin_lock_of_NOT_ARG_SIGN = 1;  1527       ldv_spin_lru_lock_of_netns_frags = 1;  1529       ldv_spin_node_size_lock_of_pglist_data = 1;  1531       ldv_spin_ptl = 1;  1533       ldv_spin_siglock_of_sighand_struct = 1;  1535       ldv_spin_tx_global_lock_of_net_device = 1;           } 3752     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}           {  2653       int tmp;  2653       tmp = __pci_register_driver(&typhoon_driver, &__this_module, "typhoon") { /* Function call is skipped due to function is undefined */}           } 3766     goto ldv_45041;  3766     tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */}  3770     goto ldv_45040;  3767     ldv_45040:;  3771     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}  3771     switch (tmp___0) 4818     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}           {         } 2255       struct net_device *dev;  2256       void *tmp;  2257       struct typhoon *tp;  2258       void *tmp___0;  2259       struct cmd_desc xp_cmd;  2260       bool tmp___1;  2261       int tmp___2;  2262       int tmp___3;  2263       int tmp___4;  2264       struct cmd_desc *_ptr;  2265       __u16 tmp___5;  2266       __u32 tmp___6;  2267       int tmp___7;  2268       struct cmd_desc *_ptr___0;  2269       int tmp___8;  2270       pci_power_t tmp___9;  2271       int tmp___10;  2255       dev = (struct net_device *)tmp;  2256       tp = (struct typhoon *)tmp___0;             {  2400         int tmp;               {             }  310           return ((int)(((unsigned long)(*(addr + ((unsigned long)(nr >> 6))))) >> (((int)nr) & 63))) & 1;;               } 2261       tmp___2 = 0;  2265       int __CPAchecker_TMP_0 = (int)(tp->wol_events);  2268       netif_device_detach(dev) { /* Function call is skipped due to function is undefined */}             {           } 2061         struct typhoon_indexes *indexes;  2062         struct transmit_ring *txLo;  2063         void *ioaddr;  2064         struct cmd_desc xp_cmd;  2065         int i;  2066         struct cmd_desc *_ptr;  2067         struct cmd_desc *_ptr___0;  2068         size_t __len;  2069         void *__ret;  2070         struct cmd_desc *_ptr___1;  2071         int tmp;  2072         int tmp___0;  2061         indexes = tp->indexes;  2062         txLo = &(tp->txLoRing);  2063         ioaddr = tp->ioaddr;  2071         iowrite32(0U, ioaddr + 8UL) { /* Function call is skipped due to function is undefined */}  2073         _ptr = &xp_cmd;  2073         memset((void *)_ptr, 0, 16UL) { /* Function call is skipped due to function is undefined */}  2073         _ptr->flags = 130U;  2073         _ptr->cmd = 4U;               {             }  688           struct typhoon_indexes *indexes;   689           struct basic_ring *ring;   690           struct resp_desc local_resp;   691           int i;   692           int err;   693           int got_resp;   694           int freeCmd;   695           int freeResp;   696           int len;   697           int wrap_len;   698           long tmp;   699           size_t __len;   700           void *__ret;   701           struct cmd_desc *wrap_ptr;   702           size_t __len___0;   703           void *__ret___0;   704           long tmp___0;   705           long tmp___1;   688           indexes = tp->indexes;   689           ring = &(tp->cmdRing);   691           err = 0;                 {                   {                 }  303               _raw_spin_lock(&(lock->ldv_6306.rlock)) { /* Function call is skipped due to function is undefined */}                   }                {   662             int lastWrite;   663             int cmdCleared;   664             int tmp;   662             lastWrite = (int)(tp->cmdRing.lastWrite);   663             int __CPAchecker_TMP_0 = (int)(tp->indexes->cmdCleared);   663             cmdCleared = __CPAchecker_TMP_0;                   {                 }  654               lastWrite = (int)(((unsigned long)lastWrite) / 16UL);   655               lastRead = (int)(((unsigned long)lastRead) / 16UL);                   }                {   671             int respReady;   672             int respCleared;   673             int tmp;   671             int __CPAchecker_TMP_0 = (int)(tp->indexes->respReady);   671             respReady = __CPAchecker_TMP_0;   672             int __CPAchecker_TMP_1 = (int)(tp->indexes->respCleared);   672             respCleared = __CPAchecker_TMP_1;                   {                 }  654               lastWrite = (int)(((unsigned long)lastWrite) / 16UL);   655               lastRead = (int)(((unsigned long)lastRead) / 16UL);                   }  708           int __CPAchecker_TMP_1 = (int)(cmd->flags);   712           tp->awaiting_resp = 1U;   714           resp = &local_resp;   715           num_resp = 1;   719           wrap_len = 0;   720           len = (int)(((unsigned int)num_cmd) * 16U);   726           __len = (size_t )len;   726           void *__CPAchecker_TMP_2 = (void *)(ring->ringBase);   726           unsigned long __CPAchecker_TMP_3 = (unsigned long)(ring->lastWrite);   726           __ret = __builtin_memcpy(__CPAchecker_TMP_2 + __CPAchecker_TMP_3, (const void *)cmd, __len) { /* Function call is skipped due to function is undefined */}                 {   737           Ignored inline assembler code                  {                 }  450               *index = (*index) + (((u32 )((unsigned long)count)) * 16U);   451               *index = (u32 )(((unsigned long)(*index)) % (((unsigned long)num_entries) * 16UL));                   }  738           iowrite32(ring->lastWrite, (tp->ioaddr) + 40UL) { /* Function call is skipped due to function is undefined */}   739           ioread32((tp->ioaddr) + 52UL) { /* Function call is skipped due to function is undefined */}   741           int __CPAchecker_TMP_5 = (int)(cmd->flags);   760           got_resp = 0;   761           i = 0;   761           goto ldv_44570;   763           goto ldv_44569;   762           ldv_44569:;   762           unsigned int __CPAchecker_TMP_6 = (unsigned int)(indexes->respCleared);   762           unsigned int __CPAchecker_TMP_7 = (unsigned int)(indexes->respReady);                 {               }  592             struct typhoon_indexes *indexes;   593             struct resp_desc *resp;   594             u8 *base;   595             int count;   596             int len;   597             int wrap_len;   598             u32 cleared;   599             u32 ready;   600             long tmp;   601             size_t __len;   602             void *__ret;   603             size_t __len___0;   604             void *__ret___0;   605             long tmp___0;   593             indexes = tp->indexes;   595             base = tp->respRing.ringBase;   600             cleared = indexes->respCleared;   601             ready = indexes->respReady;   602             goto ldv_44524;   604             goto ldv_44523;   603             ldv_44523:;   603             resp = ((struct resp_desc *)base) + ((unsigned long)cleared);   604             int __CPAchecker_TMP_0 = (int)(resp->numDesc);   604             count = __CPAchecker_TMP_0 + 1;   605             unsigned int __CPAchecker_TMP_1 = (unsigned int)(resp->seqNo);   625             unsigned int __CPAchecker_TMP_2 = (unsigned int)(resp->cmd);   627             unsigned int __CPAchecker_TMP_3 = (unsigned int)(resp->cmd);                   {                 }  571               struct basic_ring *ring;   572               struct cmd_desc *cmd;   573               struct cmd_desc *_ptr;   574               int tmp;   571               ring = &(tp->cmdRing);                     {                   } 5467                 ldv_func_ret_type___0 ldv_func_res;  5468                 int tmp;  5469                 int tmp___0;                       {   389                   int tmp;   389                   tmp = _raw_spin_trylock(&(lock->ldv_6306.rlock)) { /* Function call is skipped due to function is undefined */}                       } 5469                 ldv_func_res = tmp;                     } |              Source code             1 #ifndef _ASM_X86_BITOPS_H
    2 #define _ASM_X86_BITOPS_H
    3 
    4 /*
    5  * Copyright 1992, Linus Torvalds.
    6  *
    7  * Note: inlines with more than a single statement should be marked
    8  * __always_inline to avoid problems with older gcc's inlining heuristics.
    9  */
   10 
   11 #ifndef _LINUX_BITOPS_H
   12 #error only <linux/bitops.h> can be included directly
   13 #endif
   14 
   15 #include <linux/compiler.h>
   16 #include <asm/alternative.h>
   17 #include <asm/rmwcc.h>
   18 #include <asm/barrier.h>
   19 
   20 #if BITS_PER_LONG == 32
   21 # define _BITOPS_LONG_SHIFT 5
   22 #elif BITS_PER_LONG == 64
   23 # define _BITOPS_LONG_SHIFT 6
   24 #else
   25 # error "Unexpected BITS_PER_LONG"
   26 #endif
   27 
   28 #define BIT_64(n)			(U64_C(1) << (n))
   29 
   30 /*
   31  * These have to be done with inline assembly: that way the bit-setting
   32  * is guaranteed to be atomic. All bit operations return 0 if the bit
   33  * was cleared before the operation and != 0 if it was not.
   34  *
   35  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
   36  */
   37 
   38 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
   39 /* Technically wrong, but this avoids compilation errors on some gcc
   40    versions. */
   41 #define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
   42 #else
   43 #define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
   44 #endif
   45 
   46 #define ADDR				BITOP_ADDR(addr)
   47 
   48 /*
   49  * We do the locked ops that don't return the old value as
   50  * a mask operation on a byte.
   51  */
   52 #define IS_IMMEDIATE(nr)		(__builtin_constant_p(nr))
   53 #define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((void *)(addr) + ((nr)>>3))
   54 #define CONST_MASK(nr)			(1 << ((nr) & 7))
   55 
   56 /**
   57  * set_bit - Atomically set a bit in memory
   58  * @nr: the bit to set
   59  * @addr: the address to start counting from
   60  *
   61  * This function is atomic and may not be reordered.  See __set_bit()
   62  * if you do not require the atomic guarantees.
   63  *
   64  * Note: there are no guarantees that this function will not be reordered
   65  * on non x86 architectures, so if you are writing portable code,
   66  * make sure not to rely on its reordering guarantees.
   67  *
   68  * Note that @nr may be almost arbitrarily large; this function is not
   69  * restricted to acting on a single-word quantity.
   70  */
   71 static __always_inline void
   72 set_bit(long nr, volatile unsigned long *addr)
   73 {
   74 	if (IS_IMMEDIATE(nr)) {
   75 		asm volatile(LOCK_PREFIX "orb %1,%0"
   76 			: CONST_MASK_ADDR(nr, addr)
   77 			: "iq" ((u8)CONST_MASK(nr))
   78 			: "memory");
   79 	} else {
   80 		asm volatile(LOCK_PREFIX "bts %1,%0"
   81 			: BITOP_ADDR(addr) : "Ir" (nr) : "memory");
   82 	}
   83 }
   84 
   85 /**
   86  * __set_bit - Set a bit in memory
   87  * @nr: the bit to set
   88  * @addr: the address to start counting from
   89  *
   90  * Unlike set_bit(), this function is non-atomic and may be reordered.
   91  * If it's called on the same region of memory simultaneously, the effect
   92  * may be that only one operation succeeds.
   93  */
   94 static inline void __set_bit(long nr, volatile unsigned long *addr)
   95 {
   96 	asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
   97 }
   98 
   99 /**
  100  * clear_bit - Clears a bit in memory
  101  * @nr: Bit to clear
  102  * @addr: Address to start counting from
  103  *
  104  * clear_bit() is atomic and may not be reordered.  However, it does
  105  * not contain a memory barrier, so if it is used for locking purposes,
  106  * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
  107  * in order to ensure changes are visible on other processors.
  108  */
  109 static __always_inline void
  110 clear_bit(long nr, volatile unsigned long *addr)
  111 {
  112 	if (IS_IMMEDIATE(nr)) {
  113 		asm volatile(LOCK_PREFIX "andb %1,%0"
  114 			: CONST_MASK_ADDR(nr, addr)
  115 			: "iq" ((u8)~CONST_MASK(nr)));
  116 	} else {
  117 		asm volatile(LOCK_PREFIX "btr %1,%0"
  118 			: BITOP_ADDR(addr)
  119 			: "Ir" (nr));
  120 	}
  121 }
  122 
  123 /*
  124  * clear_bit_unlock - Clears a bit in memory
  125  * @nr: Bit to clear
  126  * @addr: Address to start counting from
  127  *
  128  * clear_bit() is atomic and implies release semantics before the memory
  129  * operation. It can be used for an unlock.
  130  */
  131 static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
  132 {
  133 	barrier();
  134 	clear_bit(nr, addr);
  135 }
  136 
  137 static inline void __clear_bit(long nr, volatile unsigned long *addr)
  138 {
  139 	asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
  140 }
  141 
  142 /*
  143  * __clear_bit_unlock - Clears a bit in memory
  144  * @nr: Bit to clear
  145  * @addr: Address to start counting from
  146  *
  147  * __clear_bit() is non-atomic and implies release semantics before the memory
  148  * operation. It can be used for an unlock if no other CPUs can concurrently
  149  * modify other bits in the word.
  150  *
  151  * No memory barrier is required here, because x86 cannot reorder stores past
  152  * older loads. Same principle as spin_unlock.
  153  */
  154 static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
  155 {
  156 	barrier();
  157 	__clear_bit(nr, addr);
  158 }
  159 
  160 /**
  161  * __change_bit - Toggle a bit in memory
  162  * @nr: the bit to change
  163  * @addr: the address to start counting from
  164  *
  165  * Unlike change_bit(), this function is non-atomic and may be reordered.
  166  * If it's called on the same region of memory simultaneously, the effect
  167  * may be that only one operation succeeds.
  168  */
  169 static inline void __change_bit(long nr, volatile unsigned long *addr)
  170 {
  171 	asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
  172 }
  173 
  174 /**
  175  * change_bit - Toggle a bit in memory
  176  * @nr: Bit to change
  177  * @addr: Address to start counting from
  178  *
  179  * change_bit() is atomic and may not be reordered.
  180  * Note that @nr may be almost arbitrarily large; this function is not
  181  * restricted to acting on a single-word quantity.
  182  */
  183 static inline void change_bit(long nr, volatile unsigned long *addr)
  184 {
  185 	if (IS_IMMEDIATE(nr)) {
  186 		asm volatile(LOCK_PREFIX "xorb %1,%0"
  187 			: CONST_MASK_ADDR(nr, addr)
  188 			: "iq" ((u8)CONST_MASK(nr)));
  189 	} else {
  190 		asm volatile(LOCK_PREFIX "btc %1,%0"
  191 			: BITOP_ADDR(addr)
  192 			: "Ir" (nr));
  193 	}
  194 }
  195 
  196 /**
  197  * test_and_set_bit - Set a bit and return its old value
  198  * @nr: Bit to set
  199  * @addr: Address to count from
  200  *
  201  * This operation is atomic and cannot be reordered.
  202  * It also implies a memory barrier.
  203  */
  204 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
  205 {
  206 	GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
  207 }
  208 
  209 /**
  210  * test_and_set_bit_lock - Set a bit and return its old value for lock
  211  * @nr: Bit to set
  212  * @addr: Address to count from
  213  *
  214  * This is the same as test_and_set_bit on x86.
  215  */
  216 static __always_inline int
  217 test_and_set_bit_lock(long nr, volatile unsigned long *addr)
  218 {
  219 	return test_and_set_bit(nr, addr);
  220 }
  221 
  222 /**
  223  * __test_and_set_bit - Set a bit and return its old value
  224  * @nr: Bit to set
  225  * @addr: Address to count from
  226  *
  227  * This operation is non-atomic and can be reordered.
  228  * If two examples of this operation race, one can appear to succeed
  229  * but actually fail.  You must protect multiple accesses with a lock.
  230  */
  231 static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
  232 {
  233 	int oldbit;
  234 
  235 	asm("bts %2,%1\n\t"
  236 	    "sbb %0,%0"
  237 	    : "=r" (oldbit), ADDR
  238 	    : "Ir" (nr));
  239 	return oldbit;
  240 }
  241 
  242 /**
  243  * test_and_clear_bit - Clear a bit and return its old value
  244  * @nr: Bit to clear
  245  * @addr: Address to count from
  246  *
  247  * This operation is atomic and cannot be reordered.
  248  * It also implies a memory barrier.
  249  */
  250 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
  251 {
  252 	GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
  253 }
  254 
  255 /**
  256  * __test_and_clear_bit - Clear a bit and return its old value
  257  * @nr: Bit to clear
  258  * @addr: Address to count from
  259  *
  260  * This operation is non-atomic and can be reordered.
  261  * If two examples of this operation race, one can appear to succeed
  262  * but actually fail.  You must protect multiple accesses with a lock.
  263  *
  264  * Note: the operation is performed atomically with respect to
  265  * the local CPU, but not other CPUs. Portable code should not
  266  * rely on this behaviour.
  267  * KVM relies on this behaviour on x86 for modifying memory that is also
  268  * accessed from a hypervisor on the same CPU if running in a VM: don't change
  269  * this without also updating arch/x86/kernel/kvm.c
  270  */
  271 static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
  272 {
  273 	int oldbit;
  274 
  275 	asm volatile("btr %2,%1\n\t"
  276 		     "sbb %0,%0"
  277 		     : "=r" (oldbit), ADDR
  278 		     : "Ir" (nr));
  279 	return oldbit;
  280 }
  281 
  282 /* WARNING: non atomic and it can be reordered! */
  283 static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
  284 {
  285 	int oldbit;
  286 
  287 	asm volatile("btc %2,%1\n\t"
  288 		     "sbb %0,%0"
  289 		     : "=r" (oldbit), ADDR
  290 		     : "Ir" (nr) : "memory");
  291 
  292 	return oldbit;
  293 }
  294 
  295 /**
  296  * test_and_change_bit - Change a bit and return its old value
  297  * @nr: Bit to change
  298  * @addr: Address to count from
  299  *
  300  * This operation is atomic and cannot be reordered.
  301  * It also implies a memory barrier.
  302  */
  303 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
  304 {
  305 	GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
  306 }
  307 
  308 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
  309 {
  310 	return ((1UL << (nr & (BITS_PER_LONG-1))) &
  311 		(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
  312 }
  313 
  314 static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
  315 {
  316 	int oldbit;
  317 
  318 	asm volatile("bt %2,%1\n\t"
  319 		     "sbb %0,%0"
  320 		     : "=r" (oldbit)
  321 		     : "m" (*(unsigned long *)addr), "Ir" (nr));
  322 
  323 	return oldbit;
  324 }
  325 
  326 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
  327 /**
  328  * test_bit - Determine whether a bit is set
  329  * @nr: bit number to test
  330  * @addr: Address to start counting from
  331  */
  332 static int test_bit(int nr, const volatile unsigned long *addr);
  333 #endif
  334 
  335 #define test_bit(nr, addr)			\
  336 	(__builtin_constant_p((nr))		\
  337 	 ? constant_test_bit((nr), (addr))	\
  338 	 : variable_test_bit((nr), (addr)))
  339 
  340 /**
  341  * __ffs - find first set bit in word
  342  * @word: The word to search
  343  *
  344  * Undefined if no bit exists, so code should check against 0 first.
  345  */
  346 static inline unsigned long __ffs(unsigned long word)
  347 {
  348 	asm("rep; bsf %1,%0"
  349 		: "=r" (word)
  350 		: "rm" (word));
  351 	return word;
  352 }
  353 
  354 /**
  355  * ffz - find first zero bit in word
  356  * @word: The word to search
  357  *
  358  * Undefined if no zero exists, so code should check against ~0UL first.
  359  */
  360 static inline unsigned long ffz(unsigned long word)
  361 {
  362 	asm("rep; bsf %1,%0"
  363 		: "=r" (word)
  364 		: "r" (~word));
  365 	return word;
  366 }
  367 
  368 /*
  369  * __fls: find last set bit in word
  370  * @word: The word to search
  371  *
  372  * Undefined if no set bit exists, so code should check against 0 first.
  373  */
  374 static inline unsigned long __fls(unsigned long word)
  375 {
  376 	asm("bsr %1,%0"
  377 	    : "=r" (word)
  378 	    : "rm" (word));
  379 	return word;
  380 }
  381 
  382 #undef ADDR
  383 
  384 #ifdef __KERNEL__
  385 /**
  386  * ffs - find first set bit in word
  387  * @x: the word to search
  388  *
  389  * This is defined the same way as the libc and compiler builtin ffs
  390  * routines, therefore differs in spirit from the other bitops.
  391  *
  392  * ffs(value) returns 0 if value is 0 or the position of the first
  393  * set bit if value is nonzero. The first (least significant) bit
  394  * is at position 1.
  395  */
  396 static inline int ffs(int x)
  397 {
  398 	int r;
  399 
  400 #ifdef CONFIG_X86_64
  401 	/*
  402 	 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
  403 	 * dest reg is undefined if x==0, but their CPU architect says its
  404 	 * value is written to set it to the same as before, except that the
  405 	 * top 32 bits will be cleared.
  406 	 *
  407 	 * We cannot do this on 32 bits because at the very least some
  408 	 * 486 CPUs did not behave this way.
  409 	 */
  410 	asm("bsfl %1,%0"
  411 	    : "=r" (r)
  412 	    : "rm" (x), "0" (-1));
  413 #elif defined(CONFIG_X86_CMOV)
  414 	asm("bsfl %1,%0\n\t"
  415 	    "cmovzl %2,%0"
  416 	    : "=&r" (r) : "rm" (x), "r" (-1));
  417 #else
  418 	asm("bsfl %1,%0\n\t"
  419 	    "jnz 1f\n\t"
  420 	    "movl $-1,%0\n"
  421 	    "1:" : "=r" (r) : "rm" (x));
  422 #endif
  423 	return r + 1;
  424 }
  425 
  426 /**
  427  * fls - find last set bit in word
  428  * @x: the word to search
  429  *
  430  * This is defined in a similar way as the libc and compiler builtin
  431  * ffs, but returns the position of the most significant set bit.
  432  *
  433  * fls(value) returns 0 if value is 0 or the position of the last
  434  * set bit if value is nonzero. The last (most significant) bit is
  435  * at position 32.
  436  */
  437 static inline int fls(int x)
  438 {
  439 	int r;
  440 
  441 #ifdef CONFIG_X86_64
  442 	/*
  443 	 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
  444 	 * dest reg is undefined if x==0, but their CPU architect says its
  445 	 * value is written to set it to the same as before, except that the
  446 	 * top 32 bits will be cleared.
  447 	 *
  448 	 * We cannot do this on 32 bits because at the very least some
  449 	 * 486 CPUs did not behave this way.
  450 	 */
  451 	asm("bsrl %1,%0"
  452 	    : "=r" (r)
  453 	    : "rm" (x), "0" (-1));
  454 #elif defined(CONFIG_X86_CMOV)
  455 	asm("bsrl %1,%0\n\t"
  456 	    "cmovzl %2,%0"
  457 	    : "=&r" (r) : "rm" (x), "rm" (-1));
  458 #else
  459 	asm("bsrl %1,%0\n\t"
  460 	    "jnz 1f\n\t"
  461 	    "movl $-1,%0\n"
  462 	    "1:" : "=r" (r) : "rm" (x));
  463 #endif
  464 	return r + 1;
  465 }
  466 
  467 /**
  468  * fls64 - find last set bit in a 64-bit word
  469  * @x: the word to search
  470  *
  471  * This is defined in a similar way as the libc and compiler builtin
  472  * ffsll, but returns the position of the most significant set bit.
  473  *
  474  * fls64(value) returns 0 if value is 0 or the position of the last
  475  * set bit if value is nonzero. The last (most significant) bit is
  476  * at position 64.
  477  */
  478 #ifdef CONFIG_X86_64
  479 static __always_inline int fls64(__u64 x)
  480 {
  481 	int bitpos = -1;
  482 	/*
  483 	 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
  484 	 * dest reg is undefined if x==0, but their CPU architect says its
  485 	 * value is written to set it to the same as before.
  486 	 */
  487 	asm("bsrq %1,%q0"
  488 	    : "+r" (bitpos)
  489 	    : "rm" (x));
  490 	return bitpos + 1;
  491 }
  492 #else
  493 #include <asm-generic/bitops/fls64.h>
  494 #endif
  495 
  496 #include <asm-generic/bitops/find.h>
  497 
  498 #include <asm-generic/bitops/sched.h>
  499 
  500 #define ARCH_HAS_FAST_MULTIPLIER 1
  501 
  502 #include <asm/arch_hweight.h>
  503 
  504 #include <asm-generic/bitops/const_hweight.h>
  505 
  506 #include <asm-generic/bitops/le.h>
  507 
  508 #include <asm-generic/bitops/ext2-atomic-setbit.h>
  509 
  510 #endif /* __KERNEL__ */
  511 #endif /* _ASM_X86_BITOPS_H */                 1 
    2 #include <linux/kernel.h>
    3 #include <linux/spinlock.h>
    4 
    5 extern void __ldv_spin_lock(spinlock_t *lock);
    6 extern void __ldv_spin_unlock(spinlock_t *lock);
    7 extern int __ldv_spin_trylock(spinlock_t *lock);
    8 extern void __ldv_spin_unlock_wait(spinlock_t *lock);
    9 extern void __ldv_spin_can_lock(spinlock_t *lock);
   10 extern int __ldv_atomic_dec_and_lock(spinlock_t *lock);
   11 
   12 extern void ldv_spin_lock__xmit_lock_of_netdev_queue(void);
   13 extern void ldv_spin_unlock__xmit_lock_of_netdev_queue(void);
   14 extern int ldv_spin_trylock__xmit_lock_of_netdev_queue(void);
   15 extern void ldv_spin_unlock_wait__xmit_lock_of_netdev_queue(void);
   16 extern int ldv_spin_is_locked__xmit_lock_of_netdev_queue(void);
   17 extern int ldv_spin_can_lock__xmit_lock_of_netdev_queue(void);
   18 extern int ldv_spin_is_contended__xmit_lock_of_netdev_queue(void);
   19 extern int ldv_atomic_dec_and_lock__xmit_lock_of_netdev_queue(void);
   20 extern void ldv_spin_lock_addr_list_lock_of_net_device(void);
   21 extern void ldv_spin_unlock_addr_list_lock_of_net_device(void);
   22 extern int ldv_spin_trylock_addr_list_lock_of_net_device(void);
   23 extern void ldv_spin_unlock_wait_addr_list_lock_of_net_device(void);
   24 extern int ldv_spin_is_locked_addr_list_lock_of_net_device(void);
   25 extern int ldv_spin_can_lock_addr_list_lock_of_net_device(void);
   26 extern int ldv_spin_is_contended_addr_list_lock_of_net_device(void);
   27 extern int ldv_atomic_dec_and_lock_addr_list_lock_of_net_device(void);
   28 extern void ldv_spin_lock_alloc_lock_of_task_struct(void);
   29 extern void ldv_spin_unlock_alloc_lock_of_task_struct(void);
   30 extern int ldv_spin_trylock_alloc_lock_of_task_struct(void);
   31 extern void ldv_spin_unlock_wait_alloc_lock_of_task_struct(void);
   32 extern int ldv_spin_is_locked_alloc_lock_of_task_struct(void);
   33 extern int ldv_spin_can_lock_alloc_lock_of_task_struct(void);
   34 extern int ldv_spin_is_contended_alloc_lock_of_task_struct(void);
   35 extern int ldv_atomic_dec_and_lock_alloc_lock_of_task_struct(void);
   36 extern void ldv_spin_lock_command_lock_of_typhoon(void);
   37 extern void ldv_spin_unlock_command_lock_of_typhoon(void);
   38 extern int ldv_spin_trylock_command_lock_of_typhoon(void);
   39 extern void ldv_spin_unlock_wait_command_lock_of_typhoon(void);
   40 extern int ldv_spin_is_locked_command_lock_of_typhoon(void);
   41 extern int ldv_spin_can_lock_command_lock_of_typhoon(void);
   42 extern int ldv_spin_is_contended_command_lock_of_typhoon(void);
   43 extern int ldv_atomic_dec_and_lock_command_lock_of_typhoon(void);
   44 extern void ldv_spin_lock_i_lock_of_inode(void);
   45 extern void ldv_spin_unlock_i_lock_of_inode(void);
   46 extern int ldv_spin_trylock_i_lock_of_inode(void);
   47 extern void ldv_spin_unlock_wait_i_lock_of_inode(void);
   48 extern int ldv_spin_is_locked_i_lock_of_inode(void);
   49 extern int ldv_spin_can_lock_i_lock_of_inode(void);
   50 extern int ldv_spin_is_contended_i_lock_of_inode(void);
   51 extern int ldv_atomic_dec_and_lock_i_lock_of_inode(void);
   52 extern void ldv_spin_lock_lock(void);
   53 extern void ldv_spin_unlock_lock(void);
   54 extern int ldv_spin_trylock_lock(void);
   55 extern void ldv_spin_unlock_wait_lock(void);
   56 extern int ldv_spin_is_locked_lock(void);
   57 extern int ldv_spin_can_lock_lock(void);
   58 extern int ldv_spin_is_contended_lock(void);
   59 extern int ldv_atomic_dec_and_lock_lock(void);
   60 extern void ldv_spin_lock_lock_of_NOT_ARG_SIGN(void);
   61 extern void ldv_spin_unlock_lock_of_NOT_ARG_SIGN(void);
   62 extern int ldv_spin_trylock_lock_of_NOT_ARG_SIGN(void);
   63 extern void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN(void);
   64 extern int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN(void);
   65 extern int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN(void);
   66 extern int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN(void);
   67 extern int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN(void);
   68 extern void ldv_spin_lock_lru_lock_of_netns_frags(void);
   69 extern void ldv_spin_unlock_lru_lock_of_netns_frags(void);
   70 extern int ldv_spin_trylock_lru_lock_of_netns_frags(void);
   71 extern void ldv_spin_unlock_wait_lru_lock_of_netns_frags(void);
   72 extern int ldv_spin_is_locked_lru_lock_of_netns_frags(void);
   73 extern int ldv_spin_can_lock_lru_lock_of_netns_frags(void);
   74 extern int ldv_spin_is_contended_lru_lock_of_netns_frags(void);
   75 extern int ldv_atomic_dec_and_lock_lru_lock_of_netns_frags(void);
   76 extern void ldv_spin_lock_node_size_lock_of_pglist_data(void);
   77 extern void ldv_spin_unlock_node_size_lock_of_pglist_data(void);
   78 extern int ldv_spin_trylock_node_size_lock_of_pglist_data(void);
   79 extern void ldv_spin_unlock_wait_node_size_lock_of_pglist_data(void);
   80 extern int ldv_spin_is_locked_node_size_lock_of_pglist_data(void);
   81 extern int ldv_spin_can_lock_node_size_lock_of_pglist_data(void);
   82 extern int ldv_spin_is_contended_node_size_lock_of_pglist_data(void);
   83 extern int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data(void);
   84 extern void ldv_spin_lock_ptl(void);
   85 extern void ldv_spin_unlock_ptl(void);
   86 extern int ldv_spin_trylock_ptl(void);
   87 extern void ldv_spin_unlock_wait_ptl(void);
   88 extern int ldv_spin_is_locked_ptl(void);
   89 extern int ldv_spin_can_lock_ptl(void);
   90 extern int ldv_spin_is_contended_ptl(void);
   91 extern int ldv_atomic_dec_and_lock_ptl(void);
   92 extern void ldv_spin_lock_siglock_of_sighand_struct(void);
   93 extern void ldv_spin_unlock_siglock_of_sighand_struct(void);
   94 extern int ldv_spin_trylock_siglock_of_sighand_struct(void);
   95 extern void ldv_spin_unlock_wait_siglock_of_sighand_struct(void);
   96 extern int ldv_spin_is_locked_siglock_of_sighand_struct(void);
   97 extern int ldv_spin_can_lock_siglock_of_sighand_struct(void);
   98 extern int ldv_spin_is_contended_siglock_of_sighand_struct(void);
   99 extern int ldv_atomic_dec_and_lock_siglock_of_sighand_struct(void);
  100 extern void ldv_spin_lock_tx_global_lock_of_net_device(void);
  101 extern void ldv_spin_unlock_tx_global_lock_of_net_device(void);
  102 extern int ldv_spin_trylock_tx_global_lock_of_net_device(void);
  103 extern void ldv_spin_unlock_wait_tx_global_lock_of_net_device(void);
  104 extern int ldv_spin_is_locked_tx_global_lock_of_net_device(void);
  105 extern int ldv_spin_can_lock_tx_global_lock_of_net_device(void);
  106 extern int ldv_spin_is_contended_tx_global_lock_of_net_device(void);
  107 extern int ldv_atomic_dec_and_lock_tx_global_lock_of_net_device(void);
  108 
  109 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
  110 /*
  111 	Written 2002-2004 by David Dillow <dave@thedillows.org>
  112 	Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
  113 	Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
  114 
  115 	This software may be used and distributed according to the terms of
  116 	the GNU General Public License (GPL), incorporated herein by reference.
  117 	Drivers based on or derived from this code fall under the GPL and must
  118 	retain the authorship, copyright and license notice.  This file is not
  119 	a complete program and may only be used when the entire operating
  120 	system is licensed under the GPL.
  121 
  122 	This software is available on a public web site. It may enable
  123 	cryptographic capabilities of the 3Com hardware, and may be
  124 	exported from the United States under License Exception "TSU"
  125 	pursuant to 15 C.F.R. Section 740.13(e).
  126 
  127 	This work was funded by the National Library of Medicine under
  128 	the Department of Energy project number 0274DD06D1 and NLM project
  129 	number Y1-LM-2015-01.
  130 
  131 	This driver is designed for the 3Com 3CR990 Family of cards with the
  132 	3XP Processor. It has been tested on x86 and sparc64.
  133 
  134 	KNOWN ISSUES:
  135 	*) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
  136 		issue. Hopefully 3Com will fix it.
  137 	*) Waiting for a command response takes 8ms due to non-preemptable
  138 		polling. Only significant for getting stats and creating
  139 		SAs, but an ugly wart never the less.
  140 
  141 	TODO:
  142 	*) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
  143 	*) Add more support for ethtool (especially for NIC stats)
  144 	*) Allow disabling of RX checksum offloading
  145 	*) Fix MAC changing to work while the interface is up
  146 		(Need to put commands on the TX ring, which changes
  147 		the locking)
  148 	*) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
  149 		http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
  150 */
  151 
  152 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  153  * Setting to > 1518 effectively disables this feature.
  154  */
  155 static int rx_copybreak = 200;
  156 
  157 /* Should we use MMIO or Port IO?
  158  * 0: Port IO
  159  * 1: MMIO
  160  * 2: Try MMIO, fallback to Port IO
  161  */
  162 static unsigned int use_mmio = 2;
  163 
  164 /* end user-configurable values */
  165 
  166 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  167  */
  168 static const int multicast_filter_limit = 32;
  169 
  170 /* Operational parameters that are set at compile time. */
  171 
  172 /* Keep the ring sizes a power of two for compile efficiency.
  173  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  174  * Making the Tx ring too large decreases the effectiveness of channel
  175  * bonding and packet priority.
  176  * There are no ill effects from too-large receive rings.
  177  *
  178  * We don't currently use the Hi Tx ring so, don't make it very big.
  179  *
  180  * Beware that if we start using the Hi Tx ring, we will need to change
  181  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
  182  */
  183 #define TXHI_ENTRIES		2
  184 #define TXLO_ENTRIES		128
  185 #define RX_ENTRIES		32
  186 #define COMMAND_ENTRIES		16
  187 #define RESPONSE_ENTRIES	32
  188 
  189 #define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
  190 #define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
  191 
  192 /* The 3XP will preload and remove 64 entries from the free buffer
  193  * list, and we need one entry to keep the ring from wrapping, so
  194  * to keep this a power of two, we use 128 entries.
  195  */
  196 #define RXFREE_ENTRIES		128
  197 #define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
  198 
  199 /* Operational parameters that usually are not changed. */
  200 
  201 /* Time in jiffies before concluding the transmitter is hung. */
  202 #define TX_TIMEOUT  (2*HZ)
  203 
  204 #define PKT_BUF_SZ		1536
  205 #define FIRMWARE_NAME		"3com/typhoon.bin"
  206 
  207 #define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
  208 
  209 #include <linux/module.h>
  210 #include <linux/kernel.h>
  211 #include <linux/sched.h>
  212 #include <linux/string.h>
  213 #include <linux/timer.h>
  214 #include <linux/errno.h>
  215 #include <linux/ioport.h>
  216 #include <linux/interrupt.h>
  217 #include <linux/pci.h>
  218 #include <linux/netdevice.h>
  219 #include <linux/etherdevice.h>
  220 #include <linux/skbuff.h>
  221 #include <linux/mm.h>
  222 #include <linux/init.h>
  223 #include <linux/delay.h>
  224 #include <linux/ethtool.h>
  225 #include <linux/if_vlan.h>
  226 #include <linux/crc32.h>
  227 #include <linux/bitops.h>
  228 #include <asm/processor.h>
  229 #include <asm/io.h>
  230 #include <asm/uaccess.h>
  231 #include <linux/in6.h>
  232 #include <linux/dma-mapping.h>
  233 #include <linux/firmware.h>
  234 
  235 #include "typhoon.h"
  236 
  237 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
  238 MODULE_VERSION("1.0");
  239 MODULE_LICENSE("GPL");
  240 MODULE_FIRMWARE(FIRMWARE_NAME);
  241 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
  242 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
  243 			       "the buffer given back to the NIC. Default "
  244 			       "is 200.");
  245 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
  246 			   "Default is to try MMIO and fallback to PIO.");
  247 module_param(rx_copybreak, int, 0);
  248 module_param(use_mmio, int, 0);
  249 
  250 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
  251 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
  252 #undef NETIF_F_TSO
  253 #endif
  254 
  255 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
  256 #error TX ring too small!
  257 #endif
  258 
  259 struct typhoon_card_info {
  260 	const char *name;
  261 	const int capabilities;
  262 };
  263 
  264 #define TYPHOON_CRYPTO_NONE		0x00
  265 #define TYPHOON_CRYPTO_DES		0x01
  266 #define TYPHOON_CRYPTO_3DES		0x02
  267 #define	TYPHOON_CRYPTO_VARIABLE		0x04
  268 #define TYPHOON_FIBER			0x08
  269 #define TYPHOON_WAKEUP_NEEDS_RESET	0x10
  270 
  271 enum typhoon_cards {
  272 	TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
  273 	TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
  274 	TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
  275 	TYPHOON_FXM,
  276 };
  277 
  278 /* directly indexed by enum typhoon_cards, above */
  279 static struct typhoon_card_info typhoon_card_info[] = {
  280 	{ "3Com Typhoon (3C990-TX)",
  281 		TYPHOON_CRYPTO_NONE},
  282 	{ "3Com Typhoon (3CR990-TX-95)",
  283 		TYPHOON_CRYPTO_DES},
  284 	{ "3Com Typhoon (3CR990-TX-97)",
  285 	 	TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
  286 	{ "3Com Typhoon (3C990SVR)",
  287 		TYPHOON_CRYPTO_NONE},
  288 	{ "3Com Typhoon (3CR990SVR95)",
  289 		TYPHOON_CRYPTO_DES},
  290 	{ "3Com Typhoon (3CR990SVR97)",
  291 	 	TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
  292 	{ "3Com Typhoon2 (3C990B-TX-M)",
  293 		TYPHOON_CRYPTO_VARIABLE},
  294 	{ "3Com Typhoon2 (3C990BSVR)",
  295 		TYPHOON_CRYPTO_VARIABLE},
  296 	{ "3Com Typhoon (3CR990-FX-95)",
  297 		TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
  298 	{ "3Com Typhoon (3CR990-FX-97)",
  299 	 	TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
  300 	{ "3Com Typhoon (3CR990-FX-95 Server)",
  301 	 	TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
  302 	{ "3Com Typhoon (3CR990-FX-97 Server)",
  303 	 	TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
  304 	{ "3Com Typhoon2 (3C990B-FX-97)",
  305 		TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
  306 };
  307 
  308 /* Notes on the new subsystem numbering scheme:
  309  * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
  310  * bit 4 indicates if this card has secured firmware (we don't support it)
  311  * bit 8 indicates if this is a (0) copper or (1) fiber card
  312  * bits 12-16 indicate card type: (0) client and (1) server
  313  */
  314 static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
  315 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
  316 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
  317 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
  318 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
  319 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
  320 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
  321 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
  322 	  PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
  323 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
  324 	  PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
  325 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
  326 	  PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
  327 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
  328 	  PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
  329 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
  330 	  PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
  331 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
  332 	  PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
  333 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
  334 	  PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
  335 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
  336 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
  337 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
  338 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
  339 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
  340 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
  341 	{ 0, }
  342 };
  343 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
  344 
  345 /* Define the shared memory area
  346  * Align everything the 3XP will normally be using.
  347  * We'll need to move/align txHi if we start using that ring.
  348  */
  349 #define __3xp_aligned	____cacheline_aligned
  350 struct typhoon_shared {
  351 	struct typhoon_interface	iface;
  352 	struct typhoon_indexes		indexes			__3xp_aligned;
  353 	struct tx_desc			txLo[TXLO_ENTRIES] 	__3xp_aligned;
  354 	struct rx_desc			rxLo[RX_ENTRIES]	__3xp_aligned;
  355 	struct rx_desc			rxHi[RX_ENTRIES]	__3xp_aligned;
  356 	struct cmd_desc			cmd[COMMAND_ENTRIES]	__3xp_aligned;
  357 	struct resp_desc		resp[RESPONSE_ENTRIES]	__3xp_aligned;
  358 	struct rx_free			rxBuff[RXFREE_ENTRIES]	__3xp_aligned;
  359 	u32				zeroWord;
  360 	struct tx_desc			txHi[TXHI_ENTRIES];
  361 } __packed;
  362 
  363 struct rxbuff_ent {
  364 	struct sk_buff *skb;
  365 	dma_addr_t	dma_addr;
  366 };
  367 
  368 struct typhoon {
  369 	/* Tx cache line section */
  370 	struct transmit_ring 	txLoRing	____cacheline_aligned;
  371 	struct pci_dev *	tx_pdev;
  372 	void __iomem		*tx_ioaddr;
  373 	u32			txlo_dma_addr;
  374 
  375 	/* Irq/Rx cache line section */
  376 	void __iomem		*ioaddr		____cacheline_aligned;
  377 	struct typhoon_indexes *indexes;
  378 	u8			awaiting_resp;
  379 	u8			duplex;
  380 	u8			speed;
  381 	u8			card_state;
  382 	struct basic_ring	rxLoRing;
  383 	struct pci_dev *	pdev;
  384 	struct net_device *	dev;
  385 	struct napi_struct	napi;
  386 	struct basic_ring	rxHiRing;
  387 	struct basic_ring	rxBuffRing;
  388 	struct rxbuff_ent	rxbuffers[RXENT_ENTRIES];
  389 
  390 	/* general section */
  391 	spinlock_t		command_lock	____cacheline_aligned;
  392 	struct basic_ring	cmdRing;
  393 	struct basic_ring	respRing;
  394 	struct net_device_stats	stats;
  395 	struct net_device_stats	stats_saved;
  396 	struct typhoon_shared *	shared;
  397 	dma_addr_t		shared_dma;
  398 	__le16			xcvr_select;
  399 	__le16			wol_events;
  400 	__le32			offload;
  401 
  402 	/* unused stuff (future use) */
  403 	int			capabilities;
  404 	struct transmit_ring 	txHiRing;
  405 };
  406 
  407 enum completion_wait_values {
  408 	NoWait = 0, WaitNoSleep, WaitSleep,
  409 };
  410 
  411 /* These are the values for the typhoon.card_state variable.
  412  * These determine where the statistics will come from in get_stats().
  413  * The sleep image does not support the statistics we need.
  414  */
  415 enum state_values {
  416 	Sleeping = 0, Running,
  417 };
  418 
  419 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
  420  * cannot pass a read, so this forces current writes to post.
  421  */
  422 #define typhoon_post_pci_writes(x) \
  423 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
  424 
  425 /* We'll wait up to six seconds for a reset, and half a second normally.
  426  */
  427 #define TYPHOON_UDELAY			50
  428 #define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
  429 #define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
  430 #define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
  431 
  432 #if defined(NETIF_F_TSO)
  433 #define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
  434 #define TSO_NUM_DESCRIPTORS	2
  435 #define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
  436 #else
  437 #define NETIF_F_TSO 		0
  438 #define skb_tso_size(x) 	0
  439 #define TSO_NUM_DESCRIPTORS	0
  440 #define TSO_OFFLOAD_ON		0
  441 #endif
  442 
  443 static inline void
  444 typhoon_inc_index(u32 *index, const int count, const int num_entries)
  445 {
  446 	/* Increment a ring index -- we can use this for all rings execept
  447 	 * the Rx rings, as they use different size descriptors
  448 	 * otherwise, everything is the same size as a cmd_desc
  449 	 */
  450 	*index += count * sizeof(struct cmd_desc);
  451 	*index %= num_entries * sizeof(struct cmd_desc);
  452 }
  453 
  454 static inline void
  455 typhoon_inc_cmd_index(u32 *index, const int count)
  456 {
  457 	typhoon_inc_index(index, count, COMMAND_ENTRIES);
  458 }
  459 
  460 static inline void
  461 typhoon_inc_resp_index(u32 *index, const int count)
  462 {
  463 	typhoon_inc_index(index, count, RESPONSE_ENTRIES);
  464 }
  465 
  466 static inline void
  467 typhoon_inc_rxfree_index(u32 *index, const int count)
  468 {
  469 	typhoon_inc_index(index, count, RXFREE_ENTRIES);
  470 }
  471 
  472 static inline void
  473 typhoon_inc_tx_index(u32 *index, const int count)
  474 {
  475 	/* if we start using the Hi Tx ring, this needs updating */
  476 	typhoon_inc_index(index, count, TXLO_ENTRIES);
  477 }
  478 
  479 static inline void
  480 typhoon_inc_rx_index(u32 *index, const int count)
  481 {
  482 	/* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
  483 	*index += count * sizeof(struct rx_desc);
  484 	*index %= RX_ENTRIES * sizeof(struct rx_desc);
  485 }
  486 
  487 static int
  488 typhoon_reset(void __iomem *ioaddr, int wait_type)
  489 {
  490 	int i, err = 0;
  491 	int timeout;
  492 
  493 	if(wait_type == WaitNoSleep)
  494 		timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
  495 	else
  496 		timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
  497 
  498 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
  499 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
  500 
  501 	iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
  502 	typhoon_post_pci_writes(ioaddr);
  503 	udelay(1);
  504 	iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
  505 
  506 	if(wait_type != NoWait) {
  507 		for(i = 0; i < timeout; i++) {
  508 			if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
  509 			   TYPHOON_STATUS_WAITING_FOR_HOST)
  510 				goto out;
  511 
  512 			if(wait_type == WaitSleep)
  513 				schedule_timeout_uninterruptible(1);
  514 			else
  515 				udelay(TYPHOON_UDELAY);
  516 		}
  517 
  518 		err = -ETIMEDOUT;
  519 	}
  520 
  521 out:
  522 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
  523 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
  524 
  525 	/* The 3XP seems to need a little extra time to complete the load
  526 	 * of the sleep image before we can reliably boot it. Failure to
  527 	 * do this occasionally results in a hung adapter after boot in
  528 	 * typhoon_init_one() while trying to read the MAC address or
  529 	 * putting the card to sleep. 3Com's driver waits 5ms, but
  530 	 * that seems to be overkill. However, if we can sleep, we might
  531 	 * as well give it that much time. Otherwise, we'll give it 500us,
  532 	 * which should be enough (I've see it work well at 100us, but still
  533 	 * saw occasional problems.)
  534 	 */
  535 	if(wait_type == WaitSleep)
  536 		msleep(5);
  537 	else
  538 		udelay(500);
  539 	return err;
  540 }
  541 
  542 static int
  543 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
  544 {
  545 	int i, err = 0;
  546 
  547 	for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
  548 		if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
  549 			goto out;
  550 		udelay(TYPHOON_UDELAY);
  551 	}
  552 
  553 	err = -ETIMEDOUT;
  554 
  555 out:
  556 	return err;
  557 }
  558 
  559 static inline void
  560 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
  561 {
  562 	if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
  563 		netif_carrier_off(dev);
  564 	else
  565 		netif_carrier_on(dev);
  566 }
  567 
  568 static inline void
  569 typhoon_hello(struct typhoon *tp)
  570 {
  571 	struct basic_ring *ring = &tp->cmdRing;
  572 	struct cmd_desc *cmd;
  573 
  574 	/* We only get a hello request if we've not sent anything to the
  575 	 * card in a long while. If the lock is held, then we're in the
  576 	 * process of issuing a command, so we don't need to respond.
  577 	 */
  578 	if(spin_trylock(&tp->command_lock)) {
  579 		cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
  580 		typhoon_inc_cmd_index(&ring->lastWrite, 1);
  581 
  582 		INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
  583 		wmb();
  584 		iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
  585 		spin_unlock(&tp->command_lock);
  586 	}
  587 }
  588 
  589 static int
  590 typhoon_process_response(struct typhoon *tp, int resp_size,
  591 				struct resp_desc *resp_save)
  592 {
  593 	struct typhoon_indexes *indexes = tp->indexes;
  594 	struct resp_desc *resp;
  595 	u8 *base = tp->respRing.ringBase;
  596 	int count, len, wrap_len;
  597 	u32 cleared;
  598 	u32 ready;
  599 
  600 	cleared = le32_to_cpu(indexes->respCleared);
  601 	ready = le32_to_cpu(indexes->respReady);
  602 	while(cleared != ready) {
  603 		resp = (struct resp_desc *)(base + cleared);
  604 		count = resp->numDesc + 1;
  605 		if(resp_save && resp->seqNo) {
  606 			if(count > resp_size) {
  607 				resp_save->flags = TYPHOON_RESP_ERROR;
  608 				goto cleanup;
  609 			}
  610 
  611 			wrap_len = 0;
  612 			len = count * sizeof(*resp);
  613 			if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
  614 				wrap_len = cleared + len - RESPONSE_RING_SIZE;
  615 				len = RESPONSE_RING_SIZE - cleared;
  616 			}
  617 
  618 			memcpy(resp_save, resp, len);
  619 			if(unlikely(wrap_len)) {
  620 				resp_save += len / sizeof(*resp);
  621 				memcpy(resp_save, base, wrap_len);
  622 			}
  623 
  624 			resp_save = NULL;
  625 		} else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
  626 			typhoon_media_status(tp->dev, resp);
  627 		} else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
  628 			typhoon_hello(tp);
  629 		} else {
  630 			netdev_err(tp->dev,
  631 				   "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
  632 				   le16_to_cpu(resp->cmd),
  633 				   resp->numDesc, resp->flags,
  634 				   le16_to_cpu(resp->parm1),
  635 				   le32_to_cpu(resp->parm2),
  636 				   le32_to_cpu(resp->parm3));
  637 		}
  638 
  639 cleanup:
  640 		typhoon_inc_resp_index(&cleared, count);
  641 	}
  642 
  643 	indexes->respCleared = cpu_to_le32(cleared);
  644 	wmb();
  645 	return resp_save == NULL;
  646 }
  647 
  648 static inline int
  649 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
  650 {
  651 	/* this works for all descriptors but rx_desc, as they are a
  652 	 * different size than the cmd_desc -- everyone else is the same
  653 	 */
  654 	lastWrite /= sizeof(struct cmd_desc);
  655 	lastRead /= sizeof(struct cmd_desc);
  656 	return (ringSize + lastRead - lastWrite - 1) % ringSize;
  657 }
  658 
  659 static inline int
  660 typhoon_num_free_cmd(struct typhoon *tp)
  661 {
  662 	int lastWrite = tp->cmdRing.lastWrite;
  663 	int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
  664 
  665 	return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
  666 }
  667 
  668 static inline int
  669 typhoon_num_free_resp(struct typhoon *tp)
  670 {
  671 	int respReady = le32_to_cpu(tp->indexes->respReady);
  672 	int respCleared = le32_to_cpu(tp->indexes->respCleared);
  673 
  674 	return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
  675 }
  676 
  677 static inline int
  678 typhoon_num_free_tx(struct transmit_ring *ring)
  679 {
  680 	/* if we start using the Hi Tx ring, this needs updating */
  681 	return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
  682 }
  683 
  684 static int
  685 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
  686 		      int num_resp, struct resp_desc *resp)
  687 {
  688 	struct typhoon_indexes *indexes = tp->indexes;
  689 	struct basic_ring *ring = &tp->cmdRing;
  690 	struct resp_desc local_resp;
  691 	int i, err = 0;
  692 	int got_resp;
  693 	int freeCmd, freeResp;
  694 	int len, wrap_len;
  695 
  696 	spin_lock(&tp->command_lock);
  697 
  698 	freeCmd = typhoon_num_free_cmd(tp);
  699 	freeResp = typhoon_num_free_resp(tp);
  700 
  701 	if(freeCmd < num_cmd || freeResp < num_resp) {
  702 		netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
  703 			   freeCmd, num_cmd, freeResp, num_resp);
  704 		err = -ENOMEM;
  705 		goto out;
  706 	}
  707 
  708 	if(cmd->flags & TYPHOON_CMD_RESPOND) {
  709 		/* If we're expecting a response, but the caller hasn't given
  710 		 * us a place to put it, we'll provide one.
  711 		 */
  712 		tp->awaiting_resp = 1;
  713 		if(resp == NULL) {
  714 			resp = &local_resp;
  715 			num_resp = 1;
  716 		}
  717 	}
  718 
  719 	wrap_len = 0;
  720 	len = num_cmd * sizeof(*cmd);
  721 	if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
  722 		wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
  723 		len = COMMAND_RING_SIZE - ring->lastWrite;
  724 	}
  725 
  726 	memcpy(ring->ringBase + ring->lastWrite, cmd, len);
  727 	if(unlikely(wrap_len)) {
  728 		struct cmd_desc *wrap_ptr = cmd;
  729 		wrap_ptr += len / sizeof(*cmd);
  730 		memcpy(ring->ringBase, wrap_ptr, wrap_len);
  731 	}
  732 
  733 	typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
  734 
  735 	/* "I feel a presence... another warrior is on the mesa."
  736 	 */
  737 	wmb();
  738 	iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
  739 	typhoon_post_pci_writes(tp->ioaddr);
  740 
  741 	if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
  742 		goto out;
  743 
  744 	/* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
  745 	 * preempt or do anything other than take interrupts. So, don't
  746 	 * wait for a response unless you have to.
  747 	 *
  748 	 * I've thought about trying to sleep here, but we're called
  749 	 * from many contexts that don't allow that. Also, given the way
  750 	 * 3Com has implemented irq coalescing, we would likely timeout --
  751 	 * this has been observed in real life!
  752 	 *
  753 	 * The big killer is we have to wait to get stats from the card,
  754 	 * though we could go to a periodic refresh of those if we don't
  755 	 * mind them getting somewhat stale. The rest of the waiting
  756 	 * commands occur during open/close/suspend/resume, so they aren't
  757 	 * time critical. Creating SAs in the future will also have to
  758 	 * wait here.
  759 	 */
  760 	got_resp = 0;
  761 	for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
  762 		if(indexes->respCleared != indexes->respReady)
  763 			got_resp = typhoon_process_response(tp, num_resp,
  764 								resp);
  765 		udelay(TYPHOON_UDELAY);
  766 	}
  767 
  768 	if(!got_resp) {
  769 		err = -ETIMEDOUT;
  770 		goto out;
  771 	}
  772 
  773 	/* Collect the error response even if we don't care about the
  774 	 * rest of the response
  775 	 */
  776 	if(resp->flags & TYPHOON_RESP_ERROR)
  777 		err = -EIO;
  778 
  779 out:
  780 	if(tp->awaiting_resp) {
  781 		tp->awaiting_resp = 0;
  782 		smp_wmb();
  783 
  784 		/* Ugh. If a response was added to the ring between
  785 		 * the call to typhoon_process_response() and the clearing
  786 		 * of tp->awaiting_resp, we could have missed the interrupt
  787 		 * and it could hang in the ring an indeterminate amount of
  788 		 * time. So, check for it, and interrupt ourselves if this
  789 		 * is the case.
  790 		 */
  791 		if(indexes->respCleared != indexes->respReady)
  792 			iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
  793 	}
  794 
  795 	spin_unlock(&tp->command_lock);
  796 	return err;
  797 }
  798 
  799 static inline void
  800 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
  801 			u32 ring_dma)
  802 {
  803 	struct tcpopt_desc *tcpd;
  804 	u32 tcpd_offset = ring_dma;
  805 
  806 	tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
  807 	tcpd_offset += txRing->lastWrite;
  808 	tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
  809 	typhoon_inc_tx_index(&txRing->lastWrite, 1);
  810 
  811 	tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
  812 	tcpd->numDesc = 1;
  813 	tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
  814 	tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
  815 	tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
  816 	tcpd->bytesTx = cpu_to_le32(skb->len);
  817 	tcpd->status = 0;
  818 }
  819 
  820 static netdev_tx_t
  821 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
  822 {
  823 	struct typhoon *tp = netdev_priv(dev);
  824 	struct transmit_ring *txRing;
  825 	struct tx_desc *txd, *first_txd;
  826 	dma_addr_t skb_dma;
  827 	int numDesc;
  828 
  829 	/* we have two rings to choose from, but we only use txLo for now
  830 	 * If we start using the Hi ring as well, we'll need to update
  831 	 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
  832 	 * and TXHI_ENTRIES to match, as well as update the TSO code below
  833 	 * to get the right DMA address
  834 	 */
  835 	txRing = &tp->txLoRing;
  836 
  837 	/* We need one descriptor for each fragment of the sk_buff, plus the
  838 	 * one for the ->data area of it.
  839 	 *
  840 	 * The docs say a maximum of 16 fragment descriptors per TCP option
  841 	 * descriptor, then make a new packet descriptor and option descriptor
  842 	 * for the next 16 fragments. The engineers say just an option
  843 	 * descriptor is needed. I've tested up to 26 fragments with a single
  844 	 * packet descriptor/option descriptor combo, so I use that for now.
  845 	 *
  846 	 * If problems develop with TSO, check this first.
  847 	 */
  848 	numDesc = skb_shinfo(skb)->nr_frags + 1;
  849 	if (skb_is_gso(skb))
  850 		numDesc++;
  851 
  852 	/* When checking for free space in the ring, we need to also
  853 	 * account for the initial Tx descriptor, and we always must leave
  854 	 * at least one descriptor unused in the ring so that it doesn't
  855 	 * wrap and look empty.
  856 	 *
  857 	 * The only time we should loop here is when we hit the race
  858 	 * between marking the queue awake and updating the cleared index.
  859 	 * Just loop and it will appear. This comes from the acenic driver.
  860 	 */
  861 	while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
  862 		smp_rmb();
  863 
  864 	first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
  865 	typhoon_inc_tx_index(&txRing->lastWrite, 1);
  866 
  867 	first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
  868 	first_txd->numDesc = 0;
  869 	first_txd->len = 0;
  870 	first_txd->tx_addr = (u64)((unsigned long) skb);
  871 	first_txd->processFlags = 0;
  872 
  873 	if(skb->ip_summed == CHECKSUM_PARTIAL) {
  874 		/* The 3XP will figure out if this is UDP/TCP */
  875 		first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
  876 		first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
  877 		first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
  878 	}
  879 
  880 	if(vlan_tx_tag_present(skb)) {
  881 		first_txd->processFlags |=
  882 		    TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
  883 		first_txd->processFlags |=
  884 		    cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
  885 				TYPHOON_TX_PF_VLAN_TAG_SHIFT);
  886 	}
  887 
  888 	if (skb_is_gso(skb)) {
  889 		first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
  890 		first_txd->numDesc++;
  891 
  892 		typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
  893 	}
  894 
  895 	txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
  896 	typhoon_inc_tx_index(&txRing->lastWrite, 1);
  897 
  898 	/* No need to worry about padding packet -- the firmware pads
  899 	 * it with zeros to ETH_ZLEN for us.
  900 	 */
  901 	if(skb_shinfo(skb)->nr_frags == 0) {
  902 		skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
  903 				       PCI_DMA_TODEVICE);
  904 		txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
  905 		txd->len = cpu_to_le16(skb->len);
  906 		txd->frag.addr = cpu_to_le32(skb_dma);
  907 		txd->frag.addrHi = 0;
  908 		first_txd->numDesc++;
  909 	} else {
  910 		int i, len;
  911 
  912 		len = skb_headlen(skb);
  913 		skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
  914 				         PCI_DMA_TODEVICE);
  915 		txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
  916 		txd->len = cpu_to_le16(len);
  917 		txd->frag.addr = cpu_to_le32(skb_dma);
  918 		txd->frag.addrHi = 0;
  919 		first_txd->numDesc++;
  920 
  921 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  922 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  923 			void *frag_addr;
  924 
  925 			txd = (struct tx_desc *) (txRing->ringBase +
  926 						txRing->lastWrite);
  927 			typhoon_inc_tx_index(&txRing->lastWrite, 1);
  928 
  929 			len = skb_frag_size(frag);
  930 			frag_addr = skb_frag_address(frag);
  931 			skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
  932 					 PCI_DMA_TODEVICE);
  933 			txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
  934 			txd->len = cpu_to_le16(len);
  935 			txd->frag.addr = cpu_to_le32(skb_dma);
  936 			txd->frag.addrHi = 0;
  937 			first_txd->numDesc++;
  938 		}
  939 	}
  940 
  941 	/* Kick the 3XP
  942 	 */
  943 	wmb();
  944 	iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
  945 
  946 	/* If we don't have room to put the worst case packet on the
  947 	 * queue, then we must stop the queue. We need 2 extra
  948 	 * descriptors -- one to prevent ring wrap, and one for the
  949 	 * Tx header.
  950 	 */
  951 	numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
  952 
  953 	if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
  954 		netif_stop_queue(dev);
  955 
  956 		/* A Tx complete IRQ could have gotten between, making
  957 		 * the ring free again. Only need to recheck here, since
  958 		 * Tx is serialized.
  959 		 */
  960 		if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
  961 			netif_wake_queue(dev);
  962 	}
  963 
  964 	return NETDEV_TX_OK;
  965 }
  966 
  967 static void
  968 typhoon_set_rx_mode(struct net_device *dev)
  969 {
  970 	struct typhoon *tp = netdev_priv(dev);
  971 	struct cmd_desc xp_cmd;
  972 	u32 mc_filter[2];
  973 	__le16 filter;
  974 
  975 	filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
  976 	if(dev->flags & IFF_PROMISC) {
  977 		filter |= TYPHOON_RX_FILTER_PROMISCOUS;
  978 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
  979 		  (dev->flags & IFF_ALLMULTI)) {
  980 		/* Too many to match, or accept all multicasts. */
  981 		filter |= TYPHOON_RX_FILTER_ALL_MCAST;
  982 	} else if (!netdev_mc_empty(dev)) {
  983 		struct netdev_hw_addr *ha;
  984 
  985 		memset(mc_filter, 0, sizeof(mc_filter));
  986 		netdev_for_each_mc_addr(ha, dev) {
  987 			int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
  988 			mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
  989 		}
  990 
  991 		INIT_COMMAND_NO_RESPONSE(&xp_cmd,
  992 					 TYPHOON_CMD_SET_MULTICAST_HASH);
  993 		xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
  994 		xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
  995 		xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
  996 		typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
  997 
  998 		filter |= TYPHOON_RX_FILTER_MCAST_HASH;
  999 	}
 1000 
 1001 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
 1002 	xp_cmd.parm1 = filter;
 1003 	typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 1004 }
 1005 
 1006 static int
 1007 typhoon_do_get_stats(struct typhoon *tp)
 1008 {
 1009 	struct net_device_stats *stats = &tp->stats;
 1010 	struct net_device_stats *saved = &tp->stats_saved;
 1011 	struct cmd_desc xp_cmd;
 1012 	struct resp_desc xp_resp[7];
 1013 	struct stats_resp *s = (struct stats_resp *) xp_resp;
 1014 	int err;
 1015 
 1016 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
 1017 	err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
 1018 	if(err < 0)
 1019 		return err;
 1020 
 1021 	/* 3Com's Linux driver uses txMultipleCollisions as it's
 1022 	 * collisions value, but there is some other collision info as well...
 1023 	 *
 1024 	 * The extra status reported would be a good candidate for
 1025 	 * ethtool_ops->get_{strings,stats}()
 1026 	 */
 1027 	stats->tx_packets = le32_to_cpu(s->txPackets) +
 1028 			saved->tx_packets;
 1029 	stats->tx_bytes = le64_to_cpu(s->txBytes) +
 1030 			saved->tx_bytes;
 1031 	stats->tx_errors = le32_to_cpu(s->txCarrierLost) +
 1032 			saved->tx_errors;
 1033 	stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) +
 1034 			saved->tx_carrier_errors;
 1035 	stats->collisions = le32_to_cpu(s->txMultipleCollisions) +
 1036 			saved->collisions;
 1037 	stats->rx_packets = le32_to_cpu(s->rxPacketsGood) +
 1038 			saved->rx_packets;
 1039 	stats->rx_bytes = le64_to_cpu(s->rxBytesGood) +
 1040 			saved->rx_bytes;
 1041 	stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) +
 1042 			saved->rx_fifo_errors;
 1043 	stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
 1044 			le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) +
 1045 			saved->rx_errors;
 1046 	stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) +
 1047 			saved->rx_crc_errors;
 1048 	stats->rx_length_errors = le32_to_cpu(s->rxOversized) +
 1049 			saved->rx_length_errors;
 1050 	tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
 1051 			SPEED_100 : SPEED_10;
 1052 	tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
 1053 			DUPLEX_FULL : DUPLEX_HALF;
 1054 
 1055 	return 0;
 1056 }
 1057 
 1058 static struct net_device_stats *
 1059 typhoon_get_stats(struct net_device *dev)
 1060 {
 1061 	struct typhoon *tp = netdev_priv(dev);
 1062 	struct net_device_stats *stats = &tp->stats;
 1063 	struct net_device_stats *saved = &tp->stats_saved;
 1064 
 1065 	smp_rmb();
 1066 	if(tp->card_state == Sleeping)
 1067 		return saved;
 1068 
 1069 	if(typhoon_do_get_stats(tp) < 0) {
 1070 		netdev_err(dev, "error getting stats\n");
 1071 		return saved;
 1072 	}
 1073 
 1074 	return stats;
 1075 }
 1076 
 1077 static void
 1078 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 1079 {
 1080 	struct typhoon *tp = netdev_priv(dev);
 1081 	struct pci_dev *pci_dev = tp->pdev;
 1082 	struct cmd_desc xp_cmd;
 1083 	struct resp_desc xp_resp[3];
 1084 
 1085 	smp_rmb();
 1086 	if(tp->card_state == Sleeping) {
 1087 		strlcpy(info->fw_version, "Sleep image",
 1088 			sizeof(info->fw_version));
 1089 	} else {
 1090 		INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
 1091 		if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
 1092 			strlcpy(info->fw_version, "Unknown runtime",
 1093 				sizeof(info->fw_version));
 1094 		} else {
 1095 			u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
 1096 			snprintf(info->fw_version, sizeof(info->fw_version),
 1097 				"%02x.%03x.%03x", sleep_ver >> 24,
 1098 				(sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff);
 1099 		}
 1100 	}
 1101 
 1102 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
 1103 	strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
 1104 }
 1105 
 1106 static int
 1107 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 1108 {
 1109 	struct typhoon *tp = netdev_priv(dev);
 1110 
 1111 	cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
 1112 				SUPPORTED_Autoneg;
 1113 
 1114 	switch (tp->xcvr_select) {
 1115 	case TYPHOON_XCVR_10HALF:
 1116 		cmd->advertising = ADVERTISED_10baseT_Half;
 1117 		break;
 1118 	case TYPHOON_XCVR_10FULL:
 1119 		cmd->advertising = ADVERTISED_10baseT_Full;
 1120 		break;
 1121 	case TYPHOON_XCVR_100HALF:
 1122 		cmd->advertising = ADVERTISED_100baseT_Half;
 1123 		break;
 1124 	case TYPHOON_XCVR_100FULL:
 1125 		cmd->advertising = ADVERTISED_100baseT_Full;
 1126 		break;
 1127 	case TYPHOON_XCVR_AUTONEG:
 1128 		cmd->advertising = ADVERTISED_10baseT_Half |
 1129 					    ADVERTISED_10baseT_Full |
 1130 					    ADVERTISED_100baseT_Half |
 1131 					    ADVERTISED_100baseT_Full |
 1132 					    ADVERTISED_Autoneg;
 1133 		break;
 1134 	}
 1135 
 1136 	if(tp->capabilities & TYPHOON_FIBER) {
 1137 		cmd->supported |= SUPPORTED_FIBRE;
 1138 		cmd->advertising |= ADVERTISED_FIBRE;
 1139 		cmd->port = PORT_FIBRE;
 1140 	} else {
 1141 		cmd->supported |= SUPPORTED_10baseT_Half |
 1142 		    			SUPPORTED_10baseT_Full |
 1143 					SUPPORTED_TP;
 1144 		cmd->advertising |= ADVERTISED_TP;
 1145 		cmd->port = PORT_TP;
 1146 	}
 1147 
 1148 	/* need to get stats to make these link speed/duplex valid */
 1149 	typhoon_do_get_stats(tp);
 1150 	ethtool_cmd_speed_set(cmd, tp->speed);
 1151 	cmd->duplex = tp->duplex;
 1152 	cmd->phy_address = 0;
 1153 	cmd->transceiver = XCVR_INTERNAL;
 1154 	if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
 1155 		cmd->autoneg = AUTONEG_ENABLE;
 1156 	else
 1157 		cmd->autoneg = AUTONEG_DISABLE;
 1158 	cmd->maxtxpkt = 1;
 1159 	cmd->maxrxpkt = 1;
 1160 
 1161 	return 0;
 1162 }
 1163 
 1164 static int
 1165 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 1166 {
 1167 	struct typhoon *tp = netdev_priv(dev);
 1168 	u32 speed = ethtool_cmd_speed(cmd);
 1169 	struct cmd_desc xp_cmd;
 1170 	__le16 xcvr;
 1171 	int err;
 1172 
 1173 	err = -EINVAL;
 1174 	if (cmd->autoneg == AUTONEG_ENABLE) {
 1175 		xcvr = TYPHOON_XCVR_AUTONEG;
 1176 	} else {
 1177 		if (cmd->duplex == DUPLEX_HALF) {
 1178 			if (speed == SPEED_10)
 1179 				xcvr = TYPHOON_XCVR_10HALF;
 1180 			else if (speed == SPEED_100)
 1181 				xcvr = TYPHOON_XCVR_100HALF;
 1182 			else
 1183 				goto out;
 1184 		} else if (cmd->duplex == DUPLEX_FULL) {
 1185 			if (speed == SPEED_10)
 1186 				xcvr = TYPHOON_XCVR_10FULL;
 1187 			else if (speed == SPEED_100)
 1188 				xcvr = TYPHOON_XCVR_100FULL;
 1189 			else
 1190 				goto out;
 1191 		} else
 1192 			goto out;
 1193 	}
 1194 
 1195 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
 1196 	xp_cmd.parm1 = xcvr;
 1197 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 1198 	if(err < 0)
 1199 		goto out;
 1200 
 1201 	tp->xcvr_select = xcvr;
 1202 	if(cmd->autoneg == AUTONEG_ENABLE) {
 1203 		tp->speed = 0xff;	/* invalid */
 1204 		tp->duplex = 0xff;	/* invalid */
 1205 	} else {
 1206 		tp->speed = speed;
 1207 		tp->duplex = cmd->duplex;
 1208 	}
 1209 
 1210 out:
 1211 	return err;
 1212 }
 1213 
 1214 static void
 1215 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 1216 {
 1217 	struct typhoon *tp = netdev_priv(dev);
 1218 
 1219 	wol->supported = WAKE_PHY | WAKE_MAGIC;
 1220 	wol->wolopts = 0;
 1221 	if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
 1222 		wol->wolopts |= WAKE_PHY;
 1223 	if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
 1224 		wol->wolopts |= WAKE_MAGIC;
 1225 	memset(&wol->sopass, 0, sizeof(wol->sopass));
 1226 }
 1227 
 1228 static int
 1229 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 1230 {
 1231 	struct typhoon *tp = netdev_priv(dev);
 1232 
 1233 	if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
 1234 		return -EINVAL;
 1235 
 1236 	tp->wol_events = 0;
 1237 	if(wol->wolopts & WAKE_PHY)
 1238 		tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
 1239 	if(wol->wolopts & WAKE_MAGIC)
 1240 		tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
 1241 
 1242 	return 0;
 1243 }
 1244 
 1245 static void
 1246 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
 1247 {
 1248 	ering->rx_max_pending = RXENT_ENTRIES;
 1249 	ering->tx_max_pending = TXLO_ENTRIES - 1;
 1250 
 1251 	ering->rx_pending = RXENT_ENTRIES;
 1252 	ering->tx_pending = TXLO_ENTRIES - 1;
 1253 }
 1254 
 1255 static const struct ethtool_ops typhoon_ethtool_ops = {
 1256 	.get_settings		= typhoon_get_settings,
 1257 	.set_settings		= typhoon_set_settings,
 1258 	.get_drvinfo		= typhoon_get_drvinfo,
 1259 	.get_wol		= typhoon_get_wol,
 1260 	.set_wol		= typhoon_set_wol,
 1261 	.get_link		= ethtool_op_get_link,
 1262 	.get_ringparam		= typhoon_get_ringparam,
 1263 };
 1264 
 1265 static int
 1266 typhoon_wait_interrupt(void __iomem *ioaddr)
 1267 {
 1268 	int i, err = 0;
 1269 
 1270 	for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
 1271 		if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
 1272 		   TYPHOON_INTR_BOOTCMD)
 1273 			goto out;
 1274 		udelay(TYPHOON_UDELAY);
 1275 	}
 1276 
 1277 	err = -ETIMEDOUT;
 1278 
 1279 out:
 1280 	iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
 1281 	return err;
 1282 }
 1283 
 1284 #define shared_offset(x)	offsetof(struct typhoon_shared, x)
 1285 
 1286 static void
 1287 typhoon_init_interface(struct typhoon *tp)
 1288 {
 1289 	struct typhoon_interface *iface = &tp->shared->iface;
 1290 	dma_addr_t shared_dma;
 1291 
 1292 	memset(tp->shared, 0, sizeof(struct typhoon_shared));
 1293 
 1294 	/* The *Hi members of iface are all init'd to zero by the memset().
 1295 	 */
 1296 	shared_dma = tp->shared_dma + shared_offset(indexes);
 1297 	iface->ringIndex = cpu_to_le32(shared_dma);
 1298 
 1299 	shared_dma = tp->shared_dma + shared_offset(txLo);
 1300 	iface->txLoAddr = cpu_to_le32(shared_dma);
 1301 	iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
 1302 
 1303 	shared_dma = tp->shared_dma + shared_offset(txHi);
 1304 	iface->txHiAddr = cpu_to_le32(shared_dma);
 1305 	iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
 1306 
 1307 	shared_dma = tp->shared_dma + shared_offset(rxBuff);
 1308 	iface->rxBuffAddr = cpu_to_le32(shared_dma);
 1309 	iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
 1310 					sizeof(struct rx_free));
 1311 
 1312 	shared_dma = tp->shared_dma + shared_offset(rxLo);
 1313 	iface->rxLoAddr = cpu_to_le32(shared_dma);
 1314 	iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
 1315 
 1316 	shared_dma = tp->shared_dma + shared_offset(rxHi);
 1317 	iface->rxHiAddr = cpu_to_le32(shared_dma);
 1318 	iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
 1319 
 1320 	shared_dma = tp->shared_dma + shared_offset(cmd);
 1321 	iface->cmdAddr = cpu_to_le32(shared_dma);
 1322 	iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
 1323 
 1324 	shared_dma = tp->shared_dma + shared_offset(resp);
 1325 	iface->respAddr = cpu_to_le32(shared_dma);
 1326 	iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
 1327 
 1328 	shared_dma = tp->shared_dma + shared_offset(zeroWord);
 1329 	iface->zeroAddr = cpu_to_le32(shared_dma);
 1330 
 1331 	tp->indexes = &tp->shared->indexes;
 1332 	tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
 1333 	tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
 1334 	tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
 1335 	tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
 1336 	tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
 1337 	tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
 1338 	tp->respRing.ringBase = (u8 *) tp->shared->resp;
 1339 
 1340 	tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
 1341 	tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
 1342 
 1343 	tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
 1344 	tp->card_state = Sleeping;
 1345 
 1346 	tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
 1347 	tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
 1348 	tp->offload |= TYPHOON_OFFLOAD_VLAN;
 1349 
 1350 	spin_lock_init(&tp->command_lock);
 1351 
 1352 	/* Force the writes to the shared memory area out before continuing. */
 1353 	wmb();
 1354 }
 1355 
 1356 static void
 1357 typhoon_init_rings(struct typhoon *tp)
 1358 {
 1359 	memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
 1360 
 1361 	tp->txLoRing.lastWrite = 0;
 1362 	tp->txHiRing.lastWrite = 0;
 1363 	tp->rxLoRing.lastWrite = 0;
 1364 	tp->rxHiRing.lastWrite = 0;
 1365 	tp->rxBuffRing.lastWrite = 0;
 1366 	tp->cmdRing.lastWrite = 0;
 1367 	tp->respRing.lastWrite = 0;
 1368 
 1369 	tp->txLoRing.lastRead = 0;
 1370 	tp->txHiRing.lastRead = 0;
 1371 }
 1372 
 1373 static const struct firmware *typhoon_fw;
 1374 
 1375 static int
 1376 typhoon_request_firmware(struct typhoon *tp)
 1377 {
 1378 	const struct typhoon_file_header *fHdr;
 1379 	const struct typhoon_section_header *sHdr;
 1380 	const u8 *image_data;
 1381 	u32 numSections;
 1382 	u32 section_len;
 1383 	u32 remaining;
 1384 	int err;
 1385 
 1386 	if (typhoon_fw)
 1387 		return 0;
 1388 
 1389 	err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
 1390 	if (err) {
 1391 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
 1392 			   FIRMWARE_NAME);
 1393 		return err;
 1394 	}
 1395 
 1396 	image_data = (u8 *) typhoon_fw->data;
 1397 	remaining = typhoon_fw->size;
 1398 	if (remaining < sizeof(struct typhoon_file_header))
 1399 		goto invalid_fw;
 1400 
 1401 	fHdr = (struct typhoon_file_header *) image_data;
 1402 	if (memcmp(fHdr->tag, "TYPHOON", 8))
 1403 		goto invalid_fw;
 1404 
 1405 	numSections = le32_to_cpu(fHdr->numSections);
 1406 	image_data += sizeof(struct typhoon_file_header);
 1407 	remaining -= sizeof(struct typhoon_file_header);
 1408 
 1409 	while (numSections--) {
 1410 		if (remaining < sizeof(struct typhoon_section_header))
 1411 			goto invalid_fw;
 1412 
 1413 		sHdr = (struct typhoon_section_header *) image_data;
 1414 		image_data += sizeof(struct typhoon_section_header);
 1415 		section_len = le32_to_cpu(sHdr->len);
 1416 
 1417 		if (remaining < section_len)
 1418 			goto invalid_fw;
 1419 
 1420 		image_data += section_len;
 1421 		remaining -= section_len;
 1422 	}
 1423 
 1424 	return 0;
 1425 
 1426 invalid_fw:
 1427 	netdev_err(tp->dev, "Invalid firmware image\n");
 1428 	release_firmware(typhoon_fw);
 1429 	typhoon_fw = NULL;
 1430 	return -EINVAL;
 1431 }
 1432 
 1433 static int
 1434 typhoon_download_firmware(struct typhoon *tp)
 1435 {
 1436 	void __iomem *ioaddr = tp->ioaddr;
 1437 	struct pci_dev *pdev = tp->pdev;
 1438 	const struct typhoon_file_header *fHdr;
 1439 	const struct typhoon_section_header *sHdr;
 1440 	const u8 *image_data;
 1441 	void *dpage;
 1442 	dma_addr_t dpage_dma;
 1443 	__sum16 csum;
 1444 	u32 irqEnabled;
 1445 	u32 irqMasked;
 1446 	u32 numSections;
 1447 	u32 section_len;
 1448 	u32 len;
 1449 	u32 load_addr;
 1450 	u32 hmac;
 1451 	int i;
 1452 	int err;
 1453 
 1454 	image_data = (u8 *) typhoon_fw->data;
 1455 	fHdr = (struct typhoon_file_header *) image_data;
 1456 
 1457 	/* Cannot just map the firmware image using pci_map_single() as
 1458 	 * the firmware is vmalloc()'d and may not be physically contiguous,
 1459 	 * so we allocate some consistent memory to copy the sections into.
 1460 	 */
 1461 	err = -ENOMEM;
 1462 	dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
 1463 	if(!dpage) {
 1464 		netdev_err(tp->dev, "no DMA mem for firmware\n");
 1465 		goto err_out;
 1466 	}
 1467 
 1468 	irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
 1469 	iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
 1470 	       ioaddr + TYPHOON_REG_INTR_ENABLE);
 1471 	irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
 1472 	iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
 1473 	       ioaddr + TYPHOON_REG_INTR_MASK);
 1474 
 1475 	err = -ETIMEDOUT;
 1476 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
 1477 		netdev_err(tp->dev, "card ready timeout\n");
 1478 		goto err_out_irq;
 1479 	}
 1480 
 1481 	numSections = le32_to_cpu(fHdr->numSections);
 1482 	load_addr = le32_to_cpu(fHdr->startAddr);
 1483 
 1484 	iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
 1485 	iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
 1486 	hmac = le32_to_cpu(fHdr->hmacDigest[0]);
 1487 	iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
 1488 	hmac = le32_to_cpu(fHdr->hmacDigest[1]);
 1489 	iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
 1490 	hmac = le32_to_cpu(fHdr->hmacDigest[2]);
 1491 	iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
 1492 	hmac = le32_to_cpu(fHdr->hmacDigest[3]);
 1493 	iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
 1494 	hmac = le32_to_cpu(fHdr->hmacDigest[4]);
 1495 	iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
 1496 	typhoon_post_pci_writes(ioaddr);
 1497 	iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
 1498 
 1499 	image_data += sizeof(struct typhoon_file_header);
 1500 
 1501 	/* The ioread32() in typhoon_wait_interrupt() will force the
 1502 	 * last write to the command register to post, so
 1503 	 * we don't need a typhoon_post_pci_writes() after it.
 1504 	 */
 1505 	for(i = 0; i < numSections; i++) {
 1506 		sHdr = (struct typhoon_section_header *) image_data;
 1507 		image_data += sizeof(struct typhoon_section_header);
 1508 		load_addr = le32_to_cpu(sHdr->startAddr);
 1509 		section_len = le32_to_cpu(sHdr->len);
 1510 
 1511 		while(section_len) {
 1512 			len = min_t(u32, section_len, PAGE_SIZE);
 1513 
 1514 			if(typhoon_wait_interrupt(ioaddr) < 0 ||
 1515 			   ioread32(ioaddr + TYPHOON_REG_STATUS) !=
 1516 			   TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
 1517 				netdev_err(tp->dev, "segment ready timeout\n");
 1518 				goto err_out_irq;
 1519 			}
 1520 
 1521 			/* Do an pseudo IPv4 checksum on the data -- first
 1522 			 * need to convert each u16 to cpu order before
 1523 			 * summing. Fortunately, due to the properties of
 1524 			 * the checksum, we can do this once, at the end.
 1525 			 */
 1526 			csum = csum_fold(csum_partial_copy_nocheck(image_data,
 1527 								   dpage, len,
 1528 								   0));
 1529 
 1530 			iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
 1531 			iowrite32(le16_to_cpu((__force __le16)csum),
 1532 					ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
 1533 			iowrite32(load_addr,
 1534 					ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
 1535 			iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
 1536 			iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
 1537 			typhoon_post_pci_writes(ioaddr);
 1538 			iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
 1539 					ioaddr + TYPHOON_REG_COMMAND);
 1540 
 1541 			image_data += len;
 1542 			load_addr += len;
 1543 			section_len -= len;
 1544 		}
 1545 	}
 1546 
 1547 	if(typhoon_wait_interrupt(ioaddr) < 0 ||
 1548 	   ioread32(ioaddr + TYPHOON_REG_STATUS) !=
 1549 	   TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
 1550 		netdev_err(tp->dev, "final segment ready timeout\n");
 1551 		goto err_out_irq;
 1552 	}
 1553 
 1554 	iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
 1555 
 1556 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
 1557 		netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
 1558 			   ioread32(ioaddr + TYPHOON_REG_STATUS));
 1559 		goto err_out_irq;
 1560 	}
 1561 
 1562 	err = 0;
 1563 
 1564 err_out_irq:
 1565 	iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
 1566 	iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
 1567 
 1568 	pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
 1569 
 1570 err_out:
 1571 	return err;
 1572 }
 1573 
 1574 static int
 1575 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
 1576 {
 1577 	void __iomem *ioaddr = tp->ioaddr;
 1578 
 1579 	if(typhoon_wait_status(ioaddr, initial_status) < 0) {
 1580 		netdev_err(tp->dev, "boot ready timeout\n");
 1581 		goto out_timeout;
 1582 	}
 1583 
 1584 	iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
 1585 	iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
 1586 	typhoon_post_pci_writes(ioaddr);
 1587 	iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
 1588 				ioaddr + TYPHOON_REG_COMMAND);
 1589 
 1590 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
 1591 		netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
 1592 			   ioread32(ioaddr + TYPHOON_REG_STATUS));
 1593 		goto out_timeout;
 1594 	}
 1595 
 1596 	/* Clear the Transmit and Command ready registers
 1597 	 */
 1598 	iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
 1599 	iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
 1600 	iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
 1601 	typhoon_post_pci_writes(ioaddr);
 1602 	iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
 1603 
 1604 	return 0;
 1605 
 1606 out_timeout:
 1607 	return -ETIMEDOUT;
 1608 }
 1609 
 1610 static u32
 1611 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
 1612 			volatile __le32 * index)
 1613 {
 1614 	u32 lastRead = txRing->lastRead;
 1615 	struct tx_desc *tx;
 1616 	dma_addr_t skb_dma;
 1617 	int dma_len;
 1618 	int type;
 1619 
 1620 	while(lastRead != le32_to_cpu(*index)) {
 1621 		tx = (struct tx_desc *) (txRing->ringBase + lastRead);
 1622 		type = tx->flags & TYPHOON_TYPE_MASK;
 1623 
 1624 		if(type == TYPHOON_TX_DESC) {
 1625 			/* This tx_desc describes a packet.
 1626 			 */
 1627 			unsigned long ptr = tx->tx_addr;
 1628 			struct sk_buff *skb = (struct sk_buff *) ptr;
 1629 			dev_kfree_skb_irq(skb);
 1630 		} else if(type == TYPHOON_FRAG_DESC) {
 1631 			/* This tx_desc describes a memory mapping. Free it.
 1632 			 */
 1633 			skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
 1634 			dma_len = le16_to_cpu(tx->len);
 1635 			pci_unmap_single(tp->pdev, skb_dma, dma_len,
 1636 				       PCI_DMA_TODEVICE);
 1637 		}
 1638 
 1639 		tx->flags = 0;
 1640 		typhoon_inc_tx_index(&lastRead, 1);
 1641 	}
 1642 
 1643 	return lastRead;
 1644 }
 1645 
 1646 static void
 1647 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
 1648 			volatile __le32 * index)
 1649 {
 1650 	u32 lastRead;
 1651 	int numDesc = MAX_SKB_FRAGS + 1;
 1652 
 1653 	/* This will need changing if we start to use the Hi Tx ring. */
 1654 	lastRead = typhoon_clean_tx(tp, txRing, index);
 1655 	if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
 1656 				lastRead, TXLO_ENTRIES) > (numDesc + 2))
 1657 		netif_wake_queue(tp->dev);
 1658 
 1659 	txRing->lastRead = lastRead;
 1660 	smp_wmb();
 1661 }
 1662 
 1663 static void
 1664 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
 1665 {
 1666 	struct typhoon_indexes *indexes = tp->indexes;
 1667 	struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
 1668 	struct basic_ring *ring = &tp->rxBuffRing;
 1669 	struct rx_free *r;
 1670 
 1671 	if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
 1672 				le32_to_cpu(indexes->rxBuffCleared)) {
 1673 		/* no room in ring, just drop the skb
 1674 		 */
 1675 		dev_kfree_skb_any(rxb->skb);
 1676 		rxb->skb = NULL;
 1677 		return;
 1678 	}
 1679 
 1680 	r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
 1681 	typhoon_inc_rxfree_index(&ring->lastWrite, 1);
 1682 	r->virtAddr = idx;
 1683 	r->physAddr = cpu_to_le32(rxb->dma_addr);
 1684 
 1685 	/* Tell the card about it */
 1686 	wmb();
 1687 	indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
 1688 }
 1689 
 1690 static int
 1691 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
 1692 {
 1693 	struct typhoon_indexes *indexes = tp->indexes;
 1694 	struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
 1695 	struct basic_ring *ring = &tp->rxBuffRing;
 1696 	struct rx_free *r;
 1697 	struct sk_buff *skb;
 1698 	dma_addr_t dma_addr;
 1699 
 1700 	rxb->skb = NULL;
 1701 
 1702 	if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
 1703 				le32_to_cpu(indexes->rxBuffCleared))
 1704 		return -ENOMEM;
 1705 
 1706 	skb = netdev_alloc_skb(tp->dev, PKT_BUF_SZ);
 1707 	if(!skb)
 1708 		return -ENOMEM;
 1709 
 1710 #if 0
 1711 	/* Please, 3com, fix the firmware to allow DMA to a unaligned
 1712 	 * address! Pretty please?
 1713 	 */
 1714 	skb_reserve(skb, 2);
 1715 #endif
 1716 
 1717 	dma_addr = pci_map_single(tp->pdev, skb->data,
 1718 				  PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
 1719 
 1720 	/* Since no card does 64 bit DAC, the high bits will never
 1721 	 * change from zero.
 1722 	 */
 1723 	r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
 1724 	typhoon_inc_rxfree_index(&ring->lastWrite, 1);
 1725 	r->virtAddr = idx;
 1726 	r->physAddr = cpu_to_le32(dma_addr);
 1727 	rxb->skb = skb;
 1728 	rxb->dma_addr = dma_addr;
 1729 
 1730 	/* Tell the card about it */
 1731 	wmb();
 1732 	indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
 1733 	return 0;
 1734 }
 1735 
 1736 static int
 1737 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
 1738 	   volatile __le32 * cleared, int budget)
 1739 {
 1740 	struct rx_desc *rx;
 1741 	struct sk_buff *skb, *new_skb;
 1742 	struct rxbuff_ent *rxb;
 1743 	dma_addr_t dma_addr;
 1744 	u32 local_ready;
 1745 	u32 rxaddr;
 1746 	int pkt_len;
 1747 	u32 idx;
 1748 	__le32 csum_bits;
 1749 	int received;
 1750 
 1751 	received = 0;
 1752 	local_ready = le32_to_cpu(*ready);
 1753 	rxaddr = le32_to_cpu(*cleared);
 1754 	while(rxaddr != local_ready && budget > 0) {
 1755 		rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
 1756 		idx = rx->addr;
 1757 		rxb = &tp->rxbuffers[idx];
 1758 		skb = rxb->skb;
 1759 		dma_addr = rxb->dma_addr;
 1760 
 1761 		typhoon_inc_rx_index(&rxaddr, 1);
 1762 
 1763 		if(rx->flags & TYPHOON_RX_ERROR) {
 1764 			typhoon_recycle_rx_skb(tp, idx);
 1765 			continue;
 1766 		}
 1767 
 1768 		pkt_len = le16_to_cpu(rx->frameLen);
 1769 
 1770 		if(pkt_len < rx_copybreak &&
 1771 		   (new_skb = netdev_alloc_skb(tp->dev, pkt_len + 2)) != NULL) {
 1772 			skb_reserve(new_skb, 2);
 1773 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
 1774 						    PKT_BUF_SZ,
 1775 						    PCI_DMA_FROMDEVICE);
 1776 			skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
 1777 			pci_dma_sync_single_for_device(tp->pdev, dma_addr,
 1778 						       PKT_BUF_SZ,
 1779 						       PCI_DMA_FROMDEVICE);
 1780 			skb_put(new_skb, pkt_len);
 1781 			typhoon_recycle_rx_skb(tp, idx);
 1782 		} else {
 1783 			new_skb = skb;
 1784 			skb_put(new_skb, pkt_len);
 1785 			pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
 1786 				       PCI_DMA_FROMDEVICE);
 1787 			typhoon_alloc_rx_skb(tp, idx);
 1788 		}
 1789 		new_skb->protocol = eth_type_trans(new_skb, tp->dev);
 1790 		csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
 1791 			TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
 1792 		if(csum_bits ==
 1793 		   (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
 1794 		   csum_bits ==
 1795 		   (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
 1796 			new_skb->ip_summed = CHECKSUM_UNNECESSARY;
 1797 		} else
 1798 			skb_checksum_none_assert(new_skb);
 1799 
 1800 		if (rx->rxStatus & TYPHOON_RX_VLAN)
 1801 			__vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q),
 1802 					       ntohl(rx->vlanTag) & 0xffff);
 1803 		netif_receive_skb(new_skb);
 1804 
 1805 		received++;
 1806 		budget--;
 1807 	}
 1808 	*cleared = cpu_to_le32(rxaddr);
 1809 
 1810 	return received;
 1811 }
 1812 
 1813 static void
 1814 typhoon_fill_free_ring(struct typhoon *tp)
 1815 {
 1816 	u32 i;
 1817 
 1818 	for(i = 0; i < RXENT_ENTRIES; i++) {
 1819 		struct rxbuff_ent *rxb = &tp->rxbuffers[i];
 1820 		if(rxb->skb)
 1821 			continue;
 1822 		if(typhoon_alloc_rx_skb(tp, i) < 0)
 1823 			break;
 1824 	}
 1825 }
 1826 
 1827 static int
 1828 typhoon_poll(struct napi_struct *napi, int budget)
 1829 {
 1830 	struct typhoon *tp = container_of(napi, struct typhoon, napi);
 1831 	struct typhoon_indexes *indexes = tp->indexes;
 1832 	int work_done;
 1833 
 1834 	rmb();
 1835 	if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
 1836 			typhoon_process_response(tp, 0, NULL);
 1837 
 1838 	if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
 1839 		typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
 1840 
 1841 	work_done = 0;
 1842 
 1843 	if(indexes->rxHiCleared != indexes->rxHiReady) {
 1844 		work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
 1845 			   		&indexes->rxHiCleared, budget);
 1846 	}
 1847 
 1848 	if(indexes->rxLoCleared != indexes->rxLoReady) {
 1849 		work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
 1850 					&indexes->rxLoCleared, budget - work_done);
 1851 	}
 1852 
 1853 	if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
 1854 		/* rxBuff ring is empty, try to fill it. */
 1855 		typhoon_fill_free_ring(tp);
 1856 	}
 1857 
 1858 	if (work_done < budget) {
 1859 		napi_complete(napi);
 1860 		iowrite32(TYPHOON_INTR_NONE,
 1861 				tp->ioaddr + TYPHOON_REG_INTR_MASK);
 1862 		typhoon_post_pci_writes(tp->ioaddr);
 1863 	}
 1864 
 1865 	return work_done;
 1866 }
 1867 
 1868 static irqreturn_t
 1869 typhoon_interrupt(int irq, void *dev_instance)
 1870 {
 1871 	struct net_device *dev = dev_instance;
 1872 	struct typhoon *tp = netdev_priv(dev);
 1873 	void __iomem *ioaddr = tp->ioaddr;
 1874 	u32 intr_status;
 1875 
 1876 	intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
 1877 	if(!(intr_status & TYPHOON_INTR_HOST_INT))
 1878 		return IRQ_NONE;
 1879 
 1880 	iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
 1881 
 1882 	if (napi_schedule_prep(&tp->napi)) {
 1883 		iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
 1884 		typhoon_post_pci_writes(ioaddr);
 1885 		__napi_schedule(&tp->napi);
 1886 	} else {
 1887 		netdev_err(dev, "Error, poll already scheduled\n");
 1888 	}
 1889 	return IRQ_HANDLED;
 1890 }
 1891 
 1892 static void
 1893 typhoon_free_rx_rings(struct typhoon *tp)
 1894 {
 1895 	u32 i;
 1896 
 1897 	for(i = 0; i < RXENT_ENTRIES; i++) {
 1898 		struct rxbuff_ent *rxb = &tp->rxbuffers[i];
 1899 		if(rxb->skb) {
 1900 			pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
 1901 				       PCI_DMA_FROMDEVICE);
 1902 			dev_kfree_skb(rxb->skb);
 1903 			rxb->skb = NULL;
 1904 		}
 1905 	}
 1906 }
 1907 
 1908 static int
 1909 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
 1910 {
 1911 	struct pci_dev *pdev = tp->pdev;
 1912 	void __iomem *ioaddr = tp->ioaddr;
 1913 	struct cmd_desc xp_cmd;
 1914 	int err;
 1915 
 1916 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
 1917 	xp_cmd.parm1 = events;
 1918 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 1919 	if(err < 0) {
 1920 		netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
 1921 			   err);
 1922 		return err;
 1923 	}
 1924 
 1925 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
 1926 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 1927 	if(err < 0) {
 1928 		netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
 1929 		return err;
 1930 	}
 1931 
 1932 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
 1933 		return -ETIMEDOUT;
 1934 
 1935 	/* Since we cannot monitor the status of the link while sleeping,
 1936 	 * tell the world it went away.
 1937 	 */
 1938 	netif_carrier_off(tp->dev);
 1939 
 1940 	pci_enable_wake(tp->pdev, state, 1);
 1941 	pci_disable_device(pdev);
 1942 	return pci_set_power_state(pdev, state);
 1943 }
 1944 
 1945 static int
 1946 typhoon_wakeup(struct typhoon *tp, int wait_type)
 1947 {
 1948 	struct pci_dev *pdev = tp->pdev;
 1949 	void __iomem *ioaddr = tp->ioaddr;
 1950 
 1951 	pci_set_power_state(pdev, PCI_D0);
 1952 	pci_restore_state(pdev);
 1953 
 1954 	/* Post 2.x.x versions of the Sleep Image require a reset before
 1955 	 * we can download the Runtime Image. But let's not make users of
 1956 	 * the old firmware pay for the reset.
 1957 	 */
 1958 	iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
 1959 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
 1960 			(tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
 1961 		return typhoon_reset(ioaddr, wait_type);
 1962 
 1963 	return 0;
 1964 }
 1965 
 1966 static int
 1967 typhoon_start_runtime(struct typhoon *tp)
 1968 {
 1969 	struct net_device *dev = tp->dev;
 1970 	void __iomem *ioaddr = tp->ioaddr;
 1971 	struct cmd_desc xp_cmd;
 1972 	int err;
 1973 
 1974 	typhoon_init_rings(tp);
 1975 	typhoon_fill_free_ring(tp);
 1976 
 1977 	err = typhoon_download_firmware(tp);
 1978 	if(err < 0) {
 1979 		netdev_err(tp->dev, "cannot load runtime on 3XP\n");
 1980 		goto error_out;
 1981 	}
 1982 
 1983 	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
 1984 		netdev_err(tp->dev, "cannot boot 3XP\n");
 1985 		err = -EIO;
 1986 		goto error_out;
 1987 	}
 1988 
 1989 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
 1990 	xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
 1991 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 1992 	if(err < 0)
 1993 		goto error_out;
 1994 
 1995 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
 1996 	xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
 1997 	xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
 1998 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 1999 	if(err < 0)
 2000 		goto error_out;
 2001 
 2002 	/* Disable IRQ coalescing -- we can reenable it when 3Com gives
 2003 	 * us some more information on how to control it.
 2004 	 */
 2005 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
 2006 	xp_cmd.parm1 = 0;
 2007 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 2008 	if(err < 0)
 2009 		goto error_out;
 2010 
 2011 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
 2012 	xp_cmd.parm1 = tp->xcvr_select;
 2013 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 2014 	if(err < 0)
 2015 		goto error_out;
 2016 
 2017 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
 2018 	xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
 2019 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 2020 	if(err < 0)
 2021 		goto error_out;
 2022 
 2023 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
 2024 	xp_cmd.parm2 = tp->offload;
 2025 	xp_cmd.parm3 = tp->offload;
 2026 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 2027 	if(err < 0)
 2028 		goto error_out;
 2029 
 2030 	typhoon_set_rx_mode(dev);
 2031 
 2032 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
 2033 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 2034 	if(err < 0)
 2035 		goto error_out;
 2036 
 2037 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
 2038 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 2039 	if(err < 0)
 2040 		goto error_out;
 2041 
 2042 	tp->card_state = Running;
 2043 	smp_wmb();
 2044 
 2045 	iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
 2046 	iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
 2047 	typhoon_post_pci_writes(ioaddr);
 2048 
 2049 	return 0;
 2050 
 2051 error_out:
 2052 	typhoon_reset(ioaddr, WaitNoSleep);
 2053 	typhoon_free_rx_rings(tp);
 2054 	typhoon_init_rings(tp);
 2055 	return err;
 2056 }
 2057 
 2058 static int
 2059 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
 2060 {
 2061 	struct typhoon_indexes *indexes = tp->indexes;
 2062 	struct transmit_ring *txLo = &tp->txLoRing;
 2063 	void __iomem *ioaddr = tp->ioaddr;
 2064 	struct cmd_desc xp_cmd;
 2065 	int i;
 2066 
 2067 	/* Disable interrupts early, since we can't schedule a poll
 2068 	 * when called with !netif_running(). This will be posted
 2069 	 * when we force the posting of the command.
 2070 	 */
 2071 	iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
 2072 
 2073 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
 2074 	typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 2075 
 2076 	/* Wait 1/2 sec for any outstanding transmits to occur
 2077 	 * We'll cleanup after the reset if this times out.
 2078 	 */
 2079 	for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
 2080 		if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
 2081 			break;
 2082 		udelay(TYPHOON_UDELAY);
 2083 	}
 2084 
 2085 	if(i == TYPHOON_WAIT_TIMEOUT)
 2086 		netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
 2087 
 2088 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
 2089 	typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 2090 
 2091 	/* save the statistics so when we bring the interface up again,
 2092 	 * the values reported to userspace are correct.
 2093 	 */
 2094 	tp->card_state = Sleeping;
 2095 	smp_wmb();
 2096 	typhoon_do_get_stats(tp);
 2097 	memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
 2098 
 2099 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
 2100 	typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 2101 
 2102 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
 2103 		netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
 2104 
 2105 	if(typhoon_reset(ioaddr, wait_type) < 0) {
 2106 		netdev_err(tp->dev, "unable to reset 3XP\n");
 2107 		return -ETIMEDOUT;
 2108 	}
 2109 
 2110 	/* cleanup any outstanding Tx packets */
 2111 	if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
 2112 		indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
 2113 		typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
 2114 	}
 2115 
 2116 	return 0;
 2117 }
 2118 
 2119 static void
 2120 typhoon_tx_timeout(struct net_device *dev)
 2121 {
 2122 	struct typhoon *tp = netdev_priv(dev);
 2123 
 2124 	if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
 2125 		netdev_warn(dev, "could not reset in tx timeout\n");
 2126 		goto truly_dead;
 2127 	}
 2128 
 2129 	/* If we ever start using the Hi ring, it will need cleaning too */
 2130 	typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
 2131 	typhoon_free_rx_rings(tp);
 2132 
 2133 	if(typhoon_start_runtime(tp) < 0) {
 2134 		netdev_err(dev, "could not start runtime in tx timeout\n");
 2135 		goto truly_dead;
 2136         }
 2137 
 2138 	netif_wake_queue(dev);
 2139 	return;
 2140 
 2141 truly_dead:
 2142 	/* Reset the hardware, and turn off carrier to avoid more timeouts */
 2143 	typhoon_reset(tp->ioaddr, NoWait);
 2144 	netif_carrier_off(dev);
 2145 }
 2146 
 2147 static int
 2148 typhoon_open(struct net_device *dev)
 2149 {
 2150 	struct typhoon *tp = netdev_priv(dev);
 2151 	int err;
 2152 
 2153 	err = typhoon_request_firmware(tp);
 2154 	if (err)
 2155 		goto out;
 2156 
 2157 	err = typhoon_wakeup(tp, WaitSleep);
 2158 	if(err < 0) {
 2159 		netdev_err(dev, "unable to wakeup device\n");
 2160 		goto out_sleep;
 2161 	}
 2162 
 2163 	err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
 2164 				dev->name, dev);
 2165 	if(err < 0)
 2166 		goto out_sleep;
 2167 
 2168 	napi_enable(&tp->napi);
 2169 
 2170 	err = typhoon_start_runtime(tp);
 2171 	if(err < 0) {
 2172 		napi_disable(&tp->napi);
 2173 		goto out_irq;
 2174 	}
 2175 
 2176 	netif_start_queue(dev);
 2177 	return 0;
 2178 
 2179 out_irq:
 2180 	free_irq(dev->irq, dev);
 2181 
 2182 out_sleep:
 2183 	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
 2184 		netdev_err(dev, "unable to reboot into sleep img\n");
 2185 		typhoon_reset(tp->ioaddr, NoWait);
 2186 		goto out;
 2187 	}
 2188 
 2189 	if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
 2190 		netdev_err(dev, "unable to go back to sleep\n");
 2191 
 2192 out:
 2193 	return err;
 2194 }
 2195 
 2196 static int
 2197 typhoon_close(struct net_device *dev)
 2198 {
 2199 	struct typhoon *tp = netdev_priv(dev);
 2200 
 2201 	netif_stop_queue(dev);
 2202 	napi_disable(&tp->napi);
 2203 
 2204 	if(typhoon_stop_runtime(tp, WaitSleep) < 0)
 2205 		netdev_err(dev, "unable to stop runtime\n");
 2206 
 2207 	/* Make sure there is no irq handler running on a different CPU. */
 2208 	free_irq(dev->irq, dev);
 2209 
 2210 	typhoon_free_rx_rings(tp);
 2211 	typhoon_init_rings(tp);
 2212 
 2213 	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
 2214 		netdev_err(dev, "unable to boot sleep image\n");
 2215 
 2216 	if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
 2217 		netdev_err(dev, "unable to put card to sleep\n");
 2218 
 2219 	return 0;
 2220 }
 2221 
 2222 #ifdef CONFIG_PM
 2223 static int
 2224 typhoon_resume(struct pci_dev *pdev)
 2225 {
 2226 	struct net_device *dev = pci_get_drvdata(pdev);
 2227 	struct typhoon *tp = netdev_priv(dev);
 2228 
 2229 	/* If we're down, resume when we are upped.
 2230 	 */
 2231 	if(!netif_running(dev))
 2232 		return 0;
 2233 
 2234 	if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
 2235 		netdev_err(dev, "critical: could not wake up in resume\n");
 2236 		goto reset;
 2237 	}
 2238 
 2239 	if(typhoon_start_runtime(tp) < 0) {
 2240 		netdev_err(dev, "critical: could not start runtime in resume\n");
 2241 		goto reset;
 2242 	}
 2243 
 2244 	netif_device_attach(dev);
 2245 	return 0;
 2246 
 2247 reset:
 2248 	typhoon_reset(tp->ioaddr, NoWait);
 2249 	return -EBUSY;
 2250 }
 2251 
 2252 static int
 2253 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
 2254 {
 2255 	struct net_device *dev = pci_get_drvdata(pdev);
 2256 	struct typhoon *tp = netdev_priv(dev);
 2257 	struct cmd_desc xp_cmd;
 2258 
 2259 	/* If we're down, we're already suspended.
 2260 	 */
 2261 	if(!netif_running(dev))
 2262 		return 0;
 2263 
 2264 	/* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
 2265 	if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
 2266 		netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
 2267 
 2268 	netif_device_detach(dev);
 2269 
 2270 	if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
 2271 		netdev_err(dev, "unable to stop runtime\n");
 2272 		goto need_resume;
 2273 	}
 2274 
 2275 	typhoon_free_rx_rings(tp);
 2276 	typhoon_init_rings(tp);
 2277 
 2278 	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
 2279 		netdev_err(dev, "unable to boot sleep image\n");
 2280 		goto need_resume;
 2281 	}
 2282 
 2283 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
 2284 	xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
 2285 	xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
 2286 	if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
 2287 		netdev_err(dev, "unable to set mac address in suspend\n");
 2288 		goto need_resume;
 2289 	}
 2290 
 2291 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
 2292 	xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
 2293 	if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
 2294 		netdev_err(dev, "unable to set rx filter in suspend\n");
 2295 		goto need_resume;
 2296 	}
 2297 
 2298 	if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
 2299 		netdev_err(dev, "unable to put card to sleep\n");
 2300 		goto need_resume;
 2301 	}
 2302 
 2303 	return 0;
 2304 
 2305 need_resume:
 2306 	typhoon_resume(pdev);
 2307 	return -EBUSY;
 2308 }
 2309 #endif
 2310 
 2311 static int
 2312 typhoon_test_mmio(struct pci_dev *pdev)
 2313 {
 2314 	void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
 2315 	int mode = 0;
 2316 	u32 val;
 2317 
 2318 	if(!ioaddr)
 2319 		goto out;
 2320 
 2321 	if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
 2322 				TYPHOON_STATUS_WAITING_FOR_HOST)
 2323 		goto out_unmap;
 2324 
 2325 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
 2326 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
 2327 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
 2328 
 2329 	/* Ok, see if we can change our interrupt status register by
 2330 	 * sending ourselves an interrupt. If so, then MMIO works.
 2331 	 * The 50usec delay is arbitrary -- it could probably be smaller.
 2332 	 */
 2333 	val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
 2334 	if((val & TYPHOON_INTR_SELF) == 0) {
 2335 		iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
 2336 		ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
 2337 		udelay(50);
 2338 		val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
 2339 		if(val & TYPHOON_INTR_SELF)
 2340 			mode = 1;
 2341 	}
 2342 
 2343 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
 2344 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
 2345 	iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
 2346 	ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
 2347 
 2348 out_unmap:
 2349 	pci_iounmap(pdev, ioaddr);
 2350 
 2351 out:
 2352 	if(!mode)
 2353 		pr_info("%s: falling back to port IO\n", pci_name(pdev));
 2354 	return mode;
 2355 }
 2356 
 2357 static const struct net_device_ops typhoon_netdev_ops = {
 2358 	.ndo_open		= typhoon_open,
 2359 	.ndo_stop		= typhoon_close,
 2360 	.ndo_start_xmit		= typhoon_start_tx,
 2361 	.ndo_set_rx_mode	= typhoon_set_rx_mode,
 2362 	.ndo_tx_timeout		= typhoon_tx_timeout,
 2363 	.ndo_get_stats		= typhoon_get_stats,
 2364 	.ndo_validate_addr	= eth_validate_addr,
 2365 	.ndo_set_mac_address	= eth_mac_addr,
 2366 	.ndo_change_mtu		= eth_change_mtu,
 2367 };
 2368 
 2369 static int
 2370 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 2371 {
 2372 	struct net_device *dev;
 2373 	struct typhoon *tp;
 2374 	int card_id = (int) ent->driver_data;
 2375 	void __iomem *ioaddr;
 2376 	void *shared;
 2377 	dma_addr_t shared_dma;
 2378 	struct cmd_desc xp_cmd;
 2379 	struct resp_desc xp_resp[3];
 2380 	int err = 0;
 2381 	const char *err_msg;
 2382 
 2383 	dev = alloc_etherdev(sizeof(*tp));
 2384 	if(dev == NULL) {
 2385 		err_msg = "unable to alloc new net device";
 2386 		err = -ENOMEM;
 2387 		goto error_out;
 2388 	}
 2389 	SET_NETDEV_DEV(dev, &pdev->dev);
 2390 
 2391 	err = pci_enable_device(pdev);
 2392 	if(err < 0) {
 2393 		err_msg = "unable to enable device";
 2394 		goto error_out_dev;
 2395 	}
 2396 
 2397 	err = pci_set_mwi(pdev);
 2398 	if(err < 0) {
 2399 		err_msg = "unable to set MWI";
 2400 		goto error_out_disable;
 2401 	}
 2402 
 2403 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 2404 	if(err < 0) {
 2405 		err_msg = "No usable DMA configuration";
 2406 		goto error_out_mwi;
 2407 	}
 2408 
 2409 	/* sanity checks on IO and MMIO BARs
 2410 	 */
 2411 	if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
 2412 		err_msg = "region #1 not a PCI IO resource, aborting";
 2413 		err = -ENODEV;
 2414 		goto error_out_mwi;
 2415 	}
 2416 	if(pci_resource_len(pdev, 0) < 128) {
 2417 		err_msg = "Invalid PCI IO region size, aborting";
 2418 		err = -ENODEV;
 2419 		goto error_out_mwi;
 2420 	}
 2421 	if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
 2422 		err_msg = "region #1 not a PCI MMIO resource, aborting";
 2423 		err = -ENODEV;
 2424 		goto error_out_mwi;
 2425 	}
 2426 	if(pci_resource_len(pdev, 1) < 128) {
 2427 		err_msg = "Invalid PCI MMIO region size, aborting";
 2428 		err = -ENODEV;
 2429 		goto error_out_mwi;
 2430 	}
 2431 
 2432 	err = pci_request_regions(pdev, KBUILD_MODNAME);
 2433 	if(err < 0) {
 2434 		err_msg = "could not request regions";
 2435 		goto error_out_mwi;
 2436 	}
 2437 
 2438 	/* map our registers
 2439 	 */
 2440 	if(use_mmio != 0 && use_mmio != 1)
 2441 		use_mmio = typhoon_test_mmio(pdev);
 2442 
 2443 	ioaddr = pci_iomap(pdev, use_mmio, 128);
 2444 	if (!ioaddr) {
 2445 		err_msg = "cannot remap registers, aborting";
 2446 		err = -EIO;
 2447 		goto error_out_regions;
 2448 	}
 2449 
 2450 	/* allocate pci dma space for rx and tx descriptor rings
 2451 	 */
 2452 	shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
 2453 				      &shared_dma);
 2454 	if(!shared) {
 2455 		err_msg = "could not allocate DMA memory";
 2456 		err = -ENOMEM;
 2457 		goto error_out_remap;
 2458 	}
 2459 
 2460 	dev->irq = pdev->irq;
 2461 	tp = netdev_priv(dev);
 2462 	tp->shared = shared;
 2463 	tp->shared_dma = shared_dma;
 2464 	tp->pdev = pdev;
 2465 	tp->tx_pdev = pdev;
 2466 	tp->ioaddr = ioaddr;
 2467 	tp->tx_ioaddr = ioaddr;
 2468 	tp->dev = dev;
 2469 
 2470 	/* Init sequence:
 2471 	 * 1) Reset the adapter to clear any bad juju
 2472 	 * 2) Reload the sleep image
 2473 	 * 3) Boot the sleep image
 2474 	 * 4) Get the hardware address.
 2475 	 * 5) Put the card to sleep.
 2476 	 */
 2477 	if (typhoon_reset(ioaddr, WaitSleep) < 0) {
 2478 		err_msg = "could not reset 3XP";
 2479 		err = -EIO;
 2480 		goto error_out_dma;
 2481 	}
 2482 
 2483 	/* Now that we've reset the 3XP and are sure it's not going to
 2484 	 * write all over memory, enable bus mastering, and save our
 2485 	 * state for resuming after a suspend.
 2486 	 */
 2487 	pci_set_master(pdev);
 2488 	pci_save_state(pdev);
 2489 
 2490 	typhoon_init_interface(tp);
 2491 	typhoon_init_rings(tp);
 2492 
 2493 	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
 2494 		err_msg = "cannot boot 3XP sleep image";
 2495 		err = -EIO;
 2496 		goto error_out_reset;
 2497 	}
 2498 
 2499 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
 2500 	if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
 2501 		err_msg = "cannot read MAC address";
 2502 		err = -EIO;
 2503 		goto error_out_reset;
 2504 	}
 2505 
 2506 	*(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
 2507 	*(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
 2508 
 2509 	if(!is_valid_ether_addr(dev->dev_addr)) {
 2510 		err_msg = "Could not obtain valid ethernet address, aborting";
 2511 		goto error_out_reset;
 2512 	}
 2513 
 2514 	/* Read the Sleep Image version last, so the response is valid
 2515 	 * later when we print out the version reported.
 2516 	 */
 2517 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
 2518 	if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
 2519 		err_msg = "Could not get Sleep Image version";
 2520 		goto error_out_reset;
 2521 	}
 2522 
 2523 	tp->capabilities = typhoon_card_info[card_id].capabilities;
 2524 	tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
 2525 
 2526 	/* Typhoon 1.0 Sleep Images return one response descriptor to the
 2527 	 * READ_VERSIONS command. Those versions are OK after waking up
 2528 	 * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
 2529 	 * seem to need a little extra help to get started. Since we don't
 2530 	 * know how to nudge it along, just kick it.
 2531 	 */
 2532 	if(xp_resp[0].numDesc != 0)
 2533 		tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
 2534 
 2535 	if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
 2536 		err_msg = "cannot put adapter to sleep";
 2537 		err = -EIO;
 2538 		goto error_out_reset;
 2539 	}
 2540 
 2541 	/* The chip-specific entries in the device structure. */
 2542 	dev->netdev_ops		= &typhoon_netdev_ops;
 2543 	netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
 2544 	dev->watchdog_timeo	= TX_TIMEOUT;
 2545 
 2546 	dev->ethtool_ops = &typhoon_ethtool_ops;
 2547 
 2548 	/* We can handle scatter gather, up to 16 entries, and
 2549 	 * we can do IP checksumming (only version 4, doh...)
 2550 	 *
 2551 	 * There's no way to turn off the RX VLAN offloading and stripping
 2552 	 * on the current 3XP firmware -- it does not respect the offload
 2553 	 * settings -- so we only allow the user to toggle the TX processing.
 2554 	 */
 2555 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
 2556 		NETIF_F_HW_VLAN_CTAG_TX;
 2557 	dev->features = dev->hw_features |
 2558 		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
 2559 
 2560 	if(register_netdev(dev) < 0) {
 2561 		err_msg = "unable to register netdev";
 2562 		goto error_out_reset;
 2563 	}
 2564 
 2565 	pci_set_drvdata(pdev, dev);
 2566 
 2567 	netdev_info(dev, "%s at %s 0x%llx, %pM\n",
 2568 		    typhoon_card_info[card_id].name,
 2569 		    use_mmio ? "MMIO" : "IO",
 2570 		    (unsigned long long)pci_resource_start(pdev, use_mmio),
 2571 		    dev->dev_addr);
 2572 
 2573 	/* xp_resp still contains the response to the READ_VERSIONS command.
 2574 	 * For debugging, let the user know what version he has.
 2575 	 */
 2576 	if(xp_resp[0].numDesc == 0) {
 2577 		/* This is the Typhoon 1.0 type Sleep Image, last 16 bits
 2578 		 * of version is Month/Day of build.
 2579 		 */
 2580 		u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
 2581 		netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
 2582 			    monthday >> 8, monthday & 0xff);
 2583 	} else if(xp_resp[0].numDesc == 2) {
 2584 		/* This is the Typhoon 1.1+ type Sleep Image
 2585 		 */
 2586 		u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
 2587 		u8 *ver_string = (u8 *) &xp_resp[1];
 2588 		ver_string[25] = 0;
 2589 		netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
 2590 			    sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
 2591 			    sleep_ver & 0xfff, ver_string);
 2592 	} else {
 2593 		netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
 2594 			    xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
 2595 	}
 2596 
 2597 	return 0;
 2598 
 2599 error_out_reset:
 2600 	typhoon_reset(ioaddr, NoWait);
 2601 
 2602 error_out_dma:
 2603 	pci_free_consistent(pdev, sizeof(struct typhoon_shared),
 2604 			    shared, shared_dma);
 2605 error_out_remap:
 2606 	pci_iounmap(pdev, ioaddr);
 2607 error_out_regions:
 2608 	pci_release_regions(pdev);
 2609 error_out_mwi:
 2610 	pci_clear_mwi(pdev);
 2611 error_out_disable:
 2612 	pci_disable_device(pdev);
 2613 error_out_dev:
 2614 	free_netdev(dev);
 2615 error_out:
 2616 	pr_err("%s: %s\n", pci_name(pdev), err_msg);
 2617 	return err;
 2618 }
 2619 
 2620 static void
 2621 typhoon_remove_one(struct pci_dev *pdev)
 2622 {
 2623 	struct net_device *dev = pci_get_drvdata(pdev);
 2624 	struct typhoon *tp = netdev_priv(dev);
 2625 
 2626 	unregister_netdev(dev);
 2627 	pci_set_power_state(pdev, PCI_D0);
 2628 	pci_restore_state(pdev);
 2629 	typhoon_reset(tp->ioaddr, NoWait);
 2630 	pci_iounmap(pdev, tp->ioaddr);
 2631 	pci_free_consistent(pdev, sizeof(struct typhoon_shared),
 2632 			    tp->shared, tp->shared_dma);
 2633 	pci_release_regions(pdev);
 2634 	pci_clear_mwi(pdev);
 2635 	pci_disable_device(pdev);
 2636 	free_netdev(dev);
 2637 }
 2638 
 2639 static struct pci_driver typhoon_driver = {
 2640 	.name		= KBUILD_MODNAME,
 2641 	.id_table	= typhoon_pci_tbl,
 2642 	.probe		= typhoon_init_one,
 2643 	.remove		= typhoon_remove_one,
 2644 #ifdef CONFIG_PM
 2645 	.suspend	= typhoon_suspend,
 2646 	.resume		= typhoon_resume,
 2647 #endif
 2648 };
 2649 
 2650 static int __init
 2651 typhoon_init(void)
 2652 {
 2653 	return pci_register_driver(&typhoon_driver);
 2654 }
 2655 
 2656 static void __exit
 2657 typhoon_cleanup(void)
 2658 {
 2659 	release_firmware(typhoon_fw);
 2660 	pci_unregister_driver(&typhoon_driver);
 2661 }
 2662 
 2663 module_init(typhoon_init);
 2664 module_exit(typhoon_cleanup);
 2665 
 2666 
 2667 
 2668 
 2669 
 2670 /* LDV_COMMENT_BEGIN_MAIN */
 2671 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
 2672 
 2673 /*###########################################################################*/
 2674 
 2675 /*############## Driver Environment Generator 0.2 output ####################*/
 2676 
 2677 /*###########################################################################*/
 2678 
 2679 
 2680 
 2681 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
 2682 void ldv_check_final_state(void);
 2683 
 2684 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
 2685 void ldv_check_return_value(int res);
 2686 
 2687 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
 2688 void ldv_check_return_value_probe(int res);
 2689 
 2690 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
 2691 void ldv_initialize(void);
 2692 
 2693 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
 2694 void ldv_handler_precall(void);
 2695 
 2696 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
 2697 int nondet_int(void);
 2698 
 2699 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
 2700 int LDV_IN_INTERRUPT;
 2701 
 2702 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
 2703 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
 2704 
 2705 
 2706 
 2707 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
 2708 	/*============================= VARIABLE DECLARATION PART   =============================*/
 2709 	/** STRUCT: struct type: ethtool_ops, struct name: typhoon_ethtool_ops **/
 2710 	/* content: static int typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)*/
 2711 	/* LDV_COMMENT_BEGIN_PREP */
 2712 	#define TXHI_ENTRIES		2
 2713 	#define TXLO_ENTRIES		128
 2714 	#define RX_ENTRIES		32
 2715 	#define COMMAND_ENTRIES		16
 2716 	#define RESPONSE_ENTRIES	32
 2717 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 2718 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 2719 	#define RXFREE_ENTRIES		128
 2720 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 2721 	#define TX_TIMEOUT  (2*HZ)
 2722 	#define PKT_BUF_SZ		1536
 2723 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 2724 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 2725 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 2726 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 2727 	#undef NETIF_F_TSO
 2728 	#endif
 2729 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 2730 	#error TX ring too small!
 2731 	#endif
 2732 	#define TYPHOON_CRYPTO_NONE		0x00
 2733 	#define TYPHOON_CRYPTO_DES		0x01
 2734 	#define TYPHOON_CRYPTO_3DES		0x02
 2735 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 2736 	#define TYPHOON_FIBER			0x08
 2737 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 2738 	#define __3xp_aligned	____cacheline_aligned
 2739 	#define typhoon_post_pci_writes(x) \
 2740 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 2741 	#define TYPHOON_UDELAY			50
 2742 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 2743 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 2744 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 2745 	#if defined(NETIF_F_TSO)
 2746 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 2747 	#define TSO_NUM_DESCRIPTORS	2
 2748 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 2749 	#else
 2750 	#define NETIF_F_TSO 		0
 2751 	#define skb_tso_size(x) 	0
 2752 	#define TSO_NUM_DESCRIPTORS	0
 2753 	#define TSO_OFFLOAD_ON		0
 2754 	#endif
 2755 	/* LDV_COMMENT_END_PREP */
 2756 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_get_settings" */
 2757 	struct net_device * var_group1;
 2758 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_get_settings" */
 2759 	struct ethtool_cmd * var_group2;
 2760 	/* LDV_COMMENT_BEGIN_PREP */
 2761 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 2762 	#if 0
 2763 	#endif
 2764 	#ifdef CONFIG_PM
 2765 	#endif
 2766 	#ifdef CONFIG_PM
 2767 	#endif
 2768 	/* LDV_COMMENT_END_PREP */
 2769 	/* content: static int typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)*/
 2770 	/* LDV_COMMENT_BEGIN_PREP */
 2771 	#define TXHI_ENTRIES		2
 2772 	#define TXLO_ENTRIES		128
 2773 	#define RX_ENTRIES		32
 2774 	#define COMMAND_ENTRIES		16
 2775 	#define RESPONSE_ENTRIES	32
 2776 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 2777 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 2778 	#define RXFREE_ENTRIES		128
 2779 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 2780 	#define TX_TIMEOUT  (2*HZ)
 2781 	#define PKT_BUF_SZ		1536
 2782 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 2783 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 2784 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 2785 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 2786 	#undef NETIF_F_TSO
 2787 	#endif
 2788 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 2789 	#error TX ring too small!
 2790 	#endif
 2791 	#define TYPHOON_CRYPTO_NONE		0x00
 2792 	#define TYPHOON_CRYPTO_DES		0x01
 2793 	#define TYPHOON_CRYPTO_3DES		0x02
 2794 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 2795 	#define TYPHOON_FIBER			0x08
 2796 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 2797 	#define __3xp_aligned	____cacheline_aligned
 2798 	#define typhoon_post_pci_writes(x) \
 2799 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 2800 	#define TYPHOON_UDELAY			50
 2801 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 2802 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 2803 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 2804 	#if defined(NETIF_F_TSO)
 2805 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 2806 	#define TSO_NUM_DESCRIPTORS	2
 2807 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 2808 	#else
 2809 	#define NETIF_F_TSO 		0
 2810 	#define skb_tso_size(x) 	0
 2811 	#define TSO_NUM_DESCRIPTORS	0
 2812 	#define TSO_OFFLOAD_ON		0
 2813 	#endif
 2814 	/* LDV_COMMENT_END_PREP */
 2815 	/* LDV_COMMENT_BEGIN_PREP */
 2816 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 2817 	#if 0
 2818 	#endif
 2819 	#ifdef CONFIG_PM
 2820 	#endif
 2821 	#ifdef CONFIG_PM
 2822 	#endif
 2823 	/* LDV_COMMENT_END_PREP */
 2824 	/* content: static void typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)*/
 2825 	/* LDV_COMMENT_BEGIN_PREP */
 2826 	#define TXHI_ENTRIES		2
 2827 	#define TXLO_ENTRIES		128
 2828 	#define RX_ENTRIES		32
 2829 	#define COMMAND_ENTRIES		16
 2830 	#define RESPONSE_ENTRIES	32
 2831 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 2832 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 2833 	#define RXFREE_ENTRIES		128
 2834 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 2835 	#define TX_TIMEOUT  (2*HZ)
 2836 	#define PKT_BUF_SZ		1536
 2837 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 2838 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 2839 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 2840 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 2841 	#undef NETIF_F_TSO
 2842 	#endif
 2843 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 2844 	#error TX ring too small!
 2845 	#endif
 2846 	#define TYPHOON_CRYPTO_NONE		0x00
 2847 	#define TYPHOON_CRYPTO_DES		0x01
 2848 	#define TYPHOON_CRYPTO_3DES		0x02
 2849 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 2850 	#define TYPHOON_FIBER			0x08
 2851 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 2852 	#define __3xp_aligned	____cacheline_aligned
 2853 	#define typhoon_post_pci_writes(x) \
 2854 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 2855 	#define TYPHOON_UDELAY			50
 2856 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 2857 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 2858 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 2859 	#if defined(NETIF_F_TSO)
 2860 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 2861 	#define TSO_NUM_DESCRIPTORS	2
 2862 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 2863 	#else
 2864 	#define NETIF_F_TSO 		0
 2865 	#define skb_tso_size(x) 	0
 2866 	#define TSO_NUM_DESCRIPTORS	0
 2867 	#define TSO_OFFLOAD_ON		0
 2868 	#endif
 2869 	/* LDV_COMMENT_END_PREP */
 2870 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_get_drvinfo" */
 2871 	struct ethtool_drvinfo * var_group3;
 2872 	/* LDV_COMMENT_BEGIN_PREP */
 2873 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 2874 	#if 0
 2875 	#endif
 2876 	#ifdef CONFIG_PM
 2877 	#endif
 2878 	#ifdef CONFIG_PM
 2879 	#endif
 2880 	/* LDV_COMMENT_END_PREP */
 2881 	/* content: static void typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)*/
 2882 	/* LDV_COMMENT_BEGIN_PREP */
 2883 	#define TXHI_ENTRIES		2
 2884 	#define TXLO_ENTRIES		128
 2885 	#define RX_ENTRIES		32
 2886 	#define COMMAND_ENTRIES		16
 2887 	#define RESPONSE_ENTRIES	32
 2888 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 2889 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 2890 	#define RXFREE_ENTRIES		128
 2891 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 2892 	#define TX_TIMEOUT  (2*HZ)
 2893 	#define PKT_BUF_SZ		1536
 2894 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 2895 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 2896 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 2897 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 2898 	#undef NETIF_F_TSO
 2899 	#endif
 2900 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 2901 	#error TX ring too small!
 2902 	#endif
 2903 	#define TYPHOON_CRYPTO_NONE		0x00
 2904 	#define TYPHOON_CRYPTO_DES		0x01
 2905 	#define TYPHOON_CRYPTO_3DES		0x02
 2906 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 2907 	#define TYPHOON_FIBER			0x08
 2908 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 2909 	#define __3xp_aligned	____cacheline_aligned
 2910 	#define typhoon_post_pci_writes(x) \
 2911 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 2912 	#define TYPHOON_UDELAY			50
 2913 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 2914 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 2915 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 2916 	#if defined(NETIF_F_TSO)
 2917 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 2918 	#define TSO_NUM_DESCRIPTORS	2
 2919 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 2920 	#else
 2921 	#define NETIF_F_TSO 		0
 2922 	#define skb_tso_size(x) 	0
 2923 	#define TSO_NUM_DESCRIPTORS	0
 2924 	#define TSO_OFFLOAD_ON		0
 2925 	#endif
 2926 	/* LDV_COMMENT_END_PREP */
 2927 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_get_wol" */
 2928 	struct ethtool_wolinfo * var_group4;
 2929 	/* LDV_COMMENT_BEGIN_PREP */
 2930 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 2931 	#if 0
 2932 	#endif
 2933 	#ifdef CONFIG_PM
 2934 	#endif
 2935 	#ifdef CONFIG_PM
 2936 	#endif
 2937 	/* LDV_COMMENT_END_PREP */
 2938 	/* content: static int typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)*/
 2939 	/* LDV_COMMENT_BEGIN_PREP */
 2940 	#define TXHI_ENTRIES		2
 2941 	#define TXLO_ENTRIES		128
 2942 	#define RX_ENTRIES		32
 2943 	#define COMMAND_ENTRIES		16
 2944 	#define RESPONSE_ENTRIES	32
 2945 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 2946 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 2947 	#define RXFREE_ENTRIES		128
 2948 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 2949 	#define TX_TIMEOUT  (2*HZ)
 2950 	#define PKT_BUF_SZ		1536
 2951 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 2952 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 2953 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 2954 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 2955 	#undef NETIF_F_TSO
 2956 	#endif
 2957 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 2958 	#error TX ring too small!
 2959 	#endif
 2960 	#define TYPHOON_CRYPTO_NONE		0x00
 2961 	#define TYPHOON_CRYPTO_DES		0x01
 2962 	#define TYPHOON_CRYPTO_3DES		0x02
 2963 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 2964 	#define TYPHOON_FIBER			0x08
 2965 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 2966 	#define __3xp_aligned	____cacheline_aligned
 2967 	#define typhoon_post_pci_writes(x) \
 2968 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 2969 	#define TYPHOON_UDELAY			50
 2970 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 2971 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 2972 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 2973 	#if defined(NETIF_F_TSO)
 2974 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 2975 	#define TSO_NUM_DESCRIPTORS	2
 2976 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 2977 	#else
 2978 	#define NETIF_F_TSO 		0
 2979 	#define skb_tso_size(x) 	0
 2980 	#define TSO_NUM_DESCRIPTORS	0
 2981 	#define TSO_OFFLOAD_ON		0
 2982 	#endif
 2983 	/* LDV_COMMENT_END_PREP */
 2984 	/* LDV_COMMENT_BEGIN_PREP */
 2985 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 2986 	#if 0
 2987 	#endif
 2988 	#ifdef CONFIG_PM
 2989 	#endif
 2990 	#ifdef CONFIG_PM
 2991 	#endif
 2992 	/* LDV_COMMENT_END_PREP */
 2993 	/* content: static void typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)*/
 2994 	/* LDV_COMMENT_BEGIN_PREP */
 2995 	#define TXHI_ENTRIES		2
 2996 	#define TXLO_ENTRIES		128
 2997 	#define RX_ENTRIES		32
 2998 	#define COMMAND_ENTRIES		16
 2999 	#define RESPONSE_ENTRIES	32
 3000 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3001 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3002 	#define RXFREE_ENTRIES		128
 3003 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3004 	#define TX_TIMEOUT  (2*HZ)
 3005 	#define PKT_BUF_SZ		1536
 3006 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 3007 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 3008 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 3009 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 3010 	#undef NETIF_F_TSO
 3011 	#endif
 3012 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 3013 	#error TX ring too small!
 3014 	#endif
 3015 	#define TYPHOON_CRYPTO_NONE		0x00
 3016 	#define TYPHOON_CRYPTO_DES		0x01
 3017 	#define TYPHOON_CRYPTO_3DES		0x02
 3018 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 3019 	#define TYPHOON_FIBER			0x08
 3020 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 3021 	#define __3xp_aligned	____cacheline_aligned
 3022 	#define typhoon_post_pci_writes(x) \
 3023 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 3024 	#define TYPHOON_UDELAY			50
 3025 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 3026 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 3027 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 3028 	#if defined(NETIF_F_TSO)
 3029 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 3030 	#define TSO_NUM_DESCRIPTORS	2
 3031 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 3032 	#else
 3033 	#define NETIF_F_TSO 		0
 3034 	#define skb_tso_size(x) 	0
 3035 	#define TSO_NUM_DESCRIPTORS	0
 3036 	#define TSO_OFFLOAD_ON		0
 3037 	#endif
 3038 	/* LDV_COMMENT_END_PREP */
 3039 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_get_ringparam" */
 3040 	struct ethtool_ringparam * var_group5;
 3041 	/* LDV_COMMENT_BEGIN_PREP */
 3042 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 3043 	#if 0
 3044 	#endif
 3045 	#ifdef CONFIG_PM
 3046 	#endif
 3047 	#ifdef CONFIG_PM
 3048 	#endif
 3049 	/* LDV_COMMENT_END_PREP */
 3050 
 3051 	/** STRUCT: struct type: net_device_ops, struct name: typhoon_netdev_ops **/
 3052 	/* content: static int typhoon_open(struct net_device *dev)*/
 3053 	/* LDV_COMMENT_BEGIN_PREP */
 3054 	#define TXHI_ENTRIES		2
 3055 	#define TXLO_ENTRIES		128
 3056 	#define RX_ENTRIES		32
 3057 	#define COMMAND_ENTRIES		16
 3058 	#define RESPONSE_ENTRIES	32
 3059 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3060 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3061 	#define RXFREE_ENTRIES		128
 3062 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3063 	#define TX_TIMEOUT  (2*HZ)
 3064 	#define PKT_BUF_SZ		1536
 3065 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 3066 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 3067 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 3068 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 3069 	#undef NETIF_F_TSO
 3070 	#endif
 3071 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 3072 	#error TX ring too small!
 3073 	#endif
 3074 	#define TYPHOON_CRYPTO_NONE		0x00
 3075 	#define TYPHOON_CRYPTO_DES		0x01
 3076 	#define TYPHOON_CRYPTO_3DES		0x02
 3077 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 3078 	#define TYPHOON_FIBER			0x08
 3079 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 3080 	#define __3xp_aligned	____cacheline_aligned
 3081 	#define typhoon_post_pci_writes(x) \
 3082 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 3083 	#define TYPHOON_UDELAY			50
 3084 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 3085 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 3086 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 3087 	#if defined(NETIF_F_TSO)
 3088 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 3089 	#define TSO_NUM_DESCRIPTORS	2
 3090 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 3091 	#else
 3092 	#define NETIF_F_TSO 		0
 3093 	#define skb_tso_size(x) 	0
 3094 	#define TSO_NUM_DESCRIPTORS	0
 3095 	#define TSO_OFFLOAD_ON		0
 3096 	#endif
 3097 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 3098 	#if 0
 3099 	#endif
 3100 	/* LDV_COMMENT_END_PREP */
 3101 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "typhoon_open" */
 3102 	static int res_typhoon_open_47;
 3103 	/* LDV_COMMENT_BEGIN_PREP */
 3104 	#ifdef CONFIG_PM
 3105 	#endif
 3106 	#ifdef CONFIG_PM
 3107 	#endif
 3108 	/* LDV_COMMENT_END_PREP */
 3109 	/* content: static int typhoon_close(struct net_device *dev)*/
 3110 	/* LDV_COMMENT_BEGIN_PREP */
 3111 	#define TXHI_ENTRIES		2
 3112 	#define TXLO_ENTRIES		128
 3113 	#define RX_ENTRIES		32
 3114 	#define COMMAND_ENTRIES		16
 3115 	#define RESPONSE_ENTRIES	32
 3116 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3117 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3118 	#define RXFREE_ENTRIES		128
 3119 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3120 	#define TX_TIMEOUT  (2*HZ)
 3121 	#define PKT_BUF_SZ		1536
 3122 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 3123 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 3124 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 3125 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 3126 	#undef NETIF_F_TSO
 3127 	#endif
 3128 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 3129 	#error TX ring too small!
 3130 	#endif
 3131 	#define TYPHOON_CRYPTO_NONE		0x00
 3132 	#define TYPHOON_CRYPTO_DES		0x01
 3133 	#define TYPHOON_CRYPTO_3DES		0x02
 3134 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 3135 	#define TYPHOON_FIBER			0x08
 3136 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 3137 	#define __3xp_aligned	____cacheline_aligned
 3138 	#define typhoon_post_pci_writes(x) \
 3139 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 3140 	#define TYPHOON_UDELAY			50
 3141 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 3142 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 3143 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 3144 	#if defined(NETIF_F_TSO)
 3145 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 3146 	#define TSO_NUM_DESCRIPTORS	2
 3147 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 3148 	#else
 3149 	#define NETIF_F_TSO 		0
 3150 	#define skb_tso_size(x) 	0
 3151 	#define TSO_NUM_DESCRIPTORS	0
 3152 	#define TSO_OFFLOAD_ON		0
 3153 	#endif
 3154 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 3155 	#if 0
 3156 	#endif
 3157 	/* LDV_COMMENT_END_PREP */
 3158 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "typhoon_close" */
 3159 	static int res_typhoon_close_48;
 3160 	/* LDV_COMMENT_BEGIN_PREP */
 3161 	#ifdef CONFIG_PM
 3162 	#endif
 3163 	#ifdef CONFIG_PM
 3164 	#endif
 3165 	/* LDV_COMMENT_END_PREP */
 3166 	/* content: static netdev_tx_t typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)*/
 3167 	/* LDV_COMMENT_BEGIN_PREP */
 3168 	#define TXHI_ENTRIES		2
 3169 	#define TXLO_ENTRIES		128
 3170 	#define RX_ENTRIES		32
 3171 	#define COMMAND_ENTRIES		16
 3172 	#define RESPONSE_ENTRIES	32
 3173 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3174 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3175 	#define RXFREE_ENTRIES		128
 3176 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3177 	#define TX_TIMEOUT  (2*HZ)
 3178 	#define PKT_BUF_SZ		1536
 3179 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 3180 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 3181 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 3182 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 3183 	#undef NETIF_F_TSO
 3184 	#endif
 3185 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 3186 	#error TX ring too small!
 3187 	#endif
 3188 	#define TYPHOON_CRYPTO_NONE		0x00
 3189 	#define TYPHOON_CRYPTO_DES		0x01
 3190 	#define TYPHOON_CRYPTO_3DES		0x02
 3191 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 3192 	#define TYPHOON_FIBER			0x08
 3193 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 3194 	#define __3xp_aligned	____cacheline_aligned
 3195 	#define typhoon_post_pci_writes(x) \
 3196 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 3197 	#define TYPHOON_UDELAY			50
 3198 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 3199 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 3200 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 3201 	#if defined(NETIF_F_TSO)
 3202 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 3203 	#define TSO_NUM_DESCRIPTORS	2
 3204 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 3205 	#else
 3206 	#define NETIF_F_TSO 		0
 3207 	#define skb_tso_size(x) 	0
 3208 	#define TSO_NUM_DESCRIPTORS	0
 3209 	#define TSO_OFFLOAD_ON		0
 3210 	#endif
 3211 	/* LDV_COMMENT_END_PREP */
 3212 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_start_tx" */
 3213 	struct sk_buff * var_group6;
 3214 	/* LDV_COMMENT_BEGIN_PREP */
 3215 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 3216 	#if 0
 3217 	#endif
 3218 	#ifdef CONFIG_PM
 3219 	#endif
 3220 	#ifdef CONFIG_PM
 3221 	#endif
 3222 	/* LDV_COMMENT_END_PREP */
 3223 	/* content: static void typhoon_set_rx_mode(struct net_device *dev)*/
 3224 	/* LDV_COMMENT_BEGIN_PREP */
 3225 	#define TXHI_ENTRIES		2
 3226 	#define TXLO_ENTRIES		128
 3227 	#define RX_ENTRIES		32
 3228 	#define COMMAND_ENTRIES		16
 3229 	#define RESPONSE_ENTRIES	32
 3230 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3231 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3232 	#define RXFREE_ENTRIES		128
 3233 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3234 	#define TX_TIMEOUT  (2*HZ)
 3235 	#define PKT_BUF_SZ		1536
 3236 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 3237 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 3238 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 3239 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 3240 	#undef NETIF_F_TSO
 3241 	#endif
 3242 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 3243 	#error TX ring too small!
 3244 	#endif
 3245 	#define TYPHOON_CRYPTO_NONE		0x00
 3246 	#define TYPHOON_CRYPTO_DES		0x01
 3247 	#define TYPHOON_CRYPTO_3DES		0x02
 3248 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 3249 	#define TYPHOON_FIBER			0x08
 3250 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 3251 	#define __3xp_aligned	____cacheline_aligned
 3252 	#define typhoon_post_pci_writes(x) \
 3253 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 3254 	#define TYPHOON_UDELAY			50
 3255 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 3256 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 3257 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 3258 	#if defined(NETIF_F_TSO)
 3259 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 3260 	#define TSO_NUM_DESCRIPTORS	2
 3261 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 3262 	#else
 3263 	#define NETIF_F_TSO 		0
 3264 	#define skb_tso_size(x) 	0
 3265 	#define TSO_NUM_DESCRIPTORS	0
 3266 	#define TSO_OFFLOAD_ON		0
 3267 	#endif
 3268 	/* LDV_COMMENT_END_PREP */
 3269 	/* LDV_COMMENT_BEGIN_PREP */
 3270 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 3271 	#if 0
 3272 	#endif
 3273 	#ifdef CONFIG_PM
 3274 	#endif
 3275 	#ifdef CONFIG_PM
 3276 	#endif
 3277 	/* LDV_COMMENT_END_PREP */
 3278 	/* content: static void typhoon_tx_timeout(struct net_device *dev)*/
 3279 	/* LDV_COMMENT_BEGIN_PREP */
 3280 	#define TXHI_ENTRIES		2
 3281 	#define TXLO_ENTRIES		128
 3282 	#define RX_ENTRIES		32
 3283 	#define COMMAND_ENTRIES		16
 3284 	#define RESPONSE_ENTRIES	32
 3285 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3286 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3287 	#define RXFREE_ENTRIES		128
 3288 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3289 	#define TX_TIMEOUT  (2*HZ)
 3290 	#define PKT_BUF_SZ		1536
 3291 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 3292 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 3293 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 3294 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 3295 	#undef NETIF_F_TSO
 3296 	#endif
 3297 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 3298 	#error TX ring too small!
 3299 	#endif
 3300 	#define TYPHOON_CRYPTO_NONE		0x00
 3301 	#define TYPHOON_CRYPTO_DES		0x01
 3302 	#define TYPHOON_CRYPTO_3DES		0x02
 3303 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 3304 	#define TYPHOON_FIBER			0x08
 3305 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 3306 	#define __3xp_aligned	____cacheline_aligned
 3307 	#define typhoon_post_pci_writes(x) \
 3308 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 3309 	#define TYPHOON_UDELAY			50
 3310 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 3311 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 3312 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 3313 	#if defined(NETIF_F_TSO)
 3314 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 3315 	#define TSO_NUM_DESCRIPTORS	2
 3316 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 3317 	#else
 3318 	#define NETIF_F_TSO 		0
 3319 	#define skb_tso_size(x) 	0
 3320 	#define TSO_NUM_DESCRIPTORS	0
 3321 	#define TSO_OFFLOAD_ON		0
 3322 	#endif
 3323 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 3324 	#if 0
 3325 	#endif
 3326 	/* LDV_COMMENT_END_PREP */
 3327 	/* LDV_COMMENT_BEGIN_PREP */
 3328 	#ifdef CONFIG_PM
 3329 	#endif
 3330 	#ifdef CONFIG_PM
 3331 	#endif
 3332 	/* LDV_COMMENT_END_PREP */
 3333 	/* content: static struct net_device_stats * typhoon_get_stats(struct net_device *dev)*/
 3334 	/* LDV_COMMENT_BEGIN_PREP */
 3335 	#define TXHI_ENTRIES		2
 3336 	#define TXLO_ENTRIES		128
 3337 	#define RX_ENTRIES		32
 3338 	#define COMMAND_ENTRIES		16
 3339 	#define RESPONSE_ENTRIES	32
 3340 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3341 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3342 	#define RXFREE_ENTRIES		128
 3343 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3344 	#define TX_TIMEOUT  (2*HZ)
 3345 	#define PKT_BUF_SZ		1536
 3346 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 3347 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 3348 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 3349 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 3350 	#undef NETIF_F_TSO
 3351 	#endif
 3352 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 3353 	#error TX ring too small!
 3354 	#endif
 3355 	#define TYPHOON_CRYPTO_NONE		0x00
 3356 	#define TYPHOON_CRYPTO_DES		0x01
 3357 	#define TYPHOON_CRYPTO_3DES		0x02
 3358 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 3359 	#define TYPHOON_FIBER			0x08
 3360 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 3361 	#define __3xp_aligned	____cacheline_aligned
 3362 	#define typhoon_post_pci_writes(x) \
 3363 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 3364 	#define TYPHOON_UDELAY			50
 3365 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 3366 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 3367 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 3368 	#if defined(NETIF_F_TSO)
 3369 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 3370 	#define TSO_NUM_DESCRIPTORS	2
 3371 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 3372 	#else
 3373 	#define NETIF_F_TSO 		0
 3374 	#define skb_tso_size(x) 	0
 3375 	#define TSO_NUM_DESCRIPTORS	0
 3376 	#define TSO_OFFLOAD_ON		0
 3377 	#endif
 3378 	/* LDV_COMMENT_END_PREP */
 3379 	/* LDV_COMMENT_BEGIN_PREP */
 3380 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 3381 	#if 0
 3382 	#endif
 3383 	#ifdef CONFIG_PM
 3384 	#endif
 3385 	#ifdef CONFIG_PM
 3386 	#endif
 3387 	/* LDV_COMMENT_END_PREP */
 3388 
 3389 	/** STRUCT: struct type: pci_driver, struct name: typhoon_driver **/
 3390 	/* content: static int typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)*/
 3391 	/* LDV_COMMENT_BEGIN_PREP */
 3392 	#define TXHI_ENTRIES		2
 3393 	#define TXLO_ENTRIES		128
 3394 	#define RX_ENTRIES		32
 3395 	#define COMMAND_ENTRIES		16
 3396 	#define RESPONSE_ENTRIES	32
 3397 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3398 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3399 	#define RXFREE_ENTRIES		128
 3400 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3401 	#define TX_TIMEOUT  (2*HZ)
 3402 	#define PKT_BUF_SZ		1536
 3403 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 3404 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 3405 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 3406 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 3407 	#undef NETIF_F_TSO
 3408 	#endif
 3409 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 3410 	#error TX ring too small!
 3411 	#endif
 3412 	#define TYPHOON_CRYPTO_NONE		0x00
 3413 	#define TYPHOON_CRYPTO_DES		0x01
 3414 	#define TYPHOON_CRYPTO_3DES		0x02
 3415 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 3416 	#define TYPHOON_FIBER			0x08
 3417 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 3418 	#define __3xp_aligned	____cacheline_aligned
 3419 	#define typhoon_post_pci_writes(x) \
 3420 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 3421 	#define TYPHOON_UDELAY			50
 3422 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 3423 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 3424 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 3425 	#if defined(NETIF_F_TSO)
 3426 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 3427 	#define TSO_NUM_DESCRIPTORS	2
 3428 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 3429 	#else
 3430 	#define NETIF_F_TSO 		0
 3431 	#define skb_tso_size(x) 	0
 3432 	#define TSO_NUM_DESCRIPTORS	0
 3433 	#define TSO_OFFLOAD_ON		0
 3434 	#endif
 3435 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 3436 	#if 0
 3437 	#endif
 3438 	#ifdef CONFIG_PM
 3439 	#endif
 3440 	/* LDV_COMMENT_END_PREP */
 3441 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_init_one" */
 3442 	struct pci_dev * var_group7;
 3443 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_init_one" */
 3444 	const struct pci_device_id * var_typhoon_init_one_52_p1;
 3445 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "typhoon_init_one" */
 3446 	static int res_typhoon_init_one_52;
 3447 	/* LDV_COMMENT_BEGIN_PREP */
 3448 	#ifdef CONFIG_PM
 3449 	#endif
 3450 	/* LDV_COMMENT_END_PREP */
 3451 	/* content: static void typhoon_remove_one(struct pci_dev *pdev)*/
 3452 	/* LDV_COMMENT_BEGIN_PREP */
 3453 	#define TXHI_ENTRIES		2
 3454 	#define TXLO_ENTRIES		128
 3455 	#define RX_ENTRIES		32
 3456 	#define COMMAND_ENTRIES		16
 3457 	#define RESPONSE_ENTRIES	32
 3458 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3459 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3460 	#define RXFREE_ENTRIES		128
 3461 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3462 	#define TX_TIMEOUT  (2*HZ)
 3463 	#define PKT_BUF_SZ		1536
 3464 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 3465 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 3466 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 3467 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 3468 	#undef NETIF_F_TSO
 3469 	#endif
 3470 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 3471 	#error TX ring too small!
 3472 	#endif
 3473 	#define TYPHOON_CRYPTO_NONE		0x00
 3474 	#define TYPHOON_CRYPTO_DES		0x01
 3475 	#define TYPHOON_CRYPTO_3DES		0x02
 3476 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 3477 	#define TYPHOON_FIBER			0x08
 3478 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 3479 	#define __3xp_aligned	____cacheline_aligned
 3480 	#define typhoon_post_pci_writes(x) \
 3481 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 3482 	#define TYPHOON_UDELAY			50
 3483 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 3484 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 3485 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 3486 	#if defined(NETIF_F_TSO)
 3487 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 3488 	#define TSO_NUM_DESCRIPTORS	2
 3489 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 3490 	#else
 3491 	#define NETIF_F_TSO 		0
 3492 	#define skb_tso_size(x) 	0
 3493 	#define TSO_NUM_DESCRIPTORS	0
 3494 	#define TSO_OFFLOAD_ON		0
 3495 	#endif
 3496 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 3497 	#if 0
 3498 	#endif
 3499 	#ifdef CONFIG_PM
 3500 	#endif
 3501 	/* LDV_COMMENT_END_PREP */
 3502 	/* LDV_COMMENT_BEGIN_PREP */
 3503 	#ifdef CONFIG_PM
 3504 	#endif
 3505 	/* LDV_COMMENT_END_PREP */
 3506 	/* content: static int typhoon_suspend(struct pci_dev *pdev, pm_message_t state)*/
 3507 	/* LDV_COMMENT_BEGIN_PREP */
 3508 	#define TXHI_ENTRIES		2
 3509 	#define TXLO_ENTRIES		128
 3510 	#define RX_ENTRIES		32
 3511 	#define COMMAND_ENTRIES		16
 3512 	#define RESPONSE_ENTRIES	32
 3513 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3514 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3515 	#define RXFREE_ENTRIES		128
 3516 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3517 	#define TX_TIMEOUT  (2*HZ)
 3518 	#define PKT_BUF_SZ		1536
 3519 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 3520 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 3521 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 3522 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 3523 	#undef NETIF_F_TSO
 3524 	#endif
 3525 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 3526 	#error TX ring too small!
 3527 	#endif
 3528 	#define TYPHOON_CRYPTO_NONE		0x00
 3529 	#define TYPHOON_CRYPTO_DES		0x01
 3530 	#define TYPHOON_CRYPTO_3DES		0x02
 3531 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 3532 	#define TYPHOON_FIBER			0x08
 3533 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 3534 	#define __3xp_aligned	____cacheline_aligned
 3535 	#define typhoon_post_pci_writes(x) \
 3536 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 3537 	#define TYPHOON_UDELAY			50
 3538 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 3539 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 3540 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 3541 	#if defined(NETIF_F_TSO)
 3542 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 3543 	#define TSO_NUM_DESCRIPTORS	2
 3544 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 3545 	#else
 3546 	#define NETIF_F_TSO 		0
 3547 	#define skb_tso_size(x) 	0
 3548 	#define TSO_NUM_DESCRIPTORS	0
 3549 	#define TSO_OFFLOAD_ON		0
 3550 	#endif
 3551 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 3552 	#if 0
 3553 	#endif
 3554 	#ifdef CONFIG_PM
 3555 	/* LDV_COMMENT_END_PREP */
 3556 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_suspend" */
 3557 	pm_message_t  var_typhoon_suspend_50_p1;
 3558 	/* LDV_COMMENT_BEGIN_PREP */
 3559 	#endif
 3560 	#ifdef CONFIG_PM
 3561 	#endif
 3562 	/* LDV_COMMENT_END_PREP */
 3563 	/* content: static int typhoon_resume(struct pci_dev *pdev)*/
 3564 	/* LDV_COMMENT_BEGIN_PREP */
 3565 	#define TXHI_ENTRIES		2
 3566 	#define TXLO_ENTRIES		128
 3567 	#define RX_ENTRIES		32
 3568 	#define COMMAND_ENTRIES		16
 3569 	#define RESPONSE_ENTRIES	32
 3570 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3571 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3572 	#define RXFREE_ENTRIES		128
 3573 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3574 	#define TX_TIMEOUT  (2*HZ)
 3575 	#define PKT_BUF_SZ		1536
 3576 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 3577 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 3578 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 3579 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 3580 	#undef NETIF_F_TSO
 3581 	#endif
 3582 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 3583 	#error TX ring too small!
 3584 	#endif
 3585 	#define TYPHOON_CRYPTO_NONE		0x00
 3586 	#define TYPHOON_CRYPTO_DES		0x01
 3587 	#define TYPHOON_CRYPTO_3DES		0x02
 3588 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 3589 	#define TYPHOON_FIBER			0x08
 3590 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 3591 	#define __3xp_aligned	____cacheline_aligned
 3592 	#define typhoon_post_pci_writes(x) \
 3593 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 3594 	#define TYPHOON_UDELAY			50
 3595 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 3596 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 3597 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 3598 	#if defined(NETIF_F_TSO)
 3599 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 3600 	#define TSO_NUM_DESCRIPTORS	2
 3601 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 3602 	#else
 3603 	#define NETIF_F_TSO 		0
 3604 	#define skb_tso_size(x) 	0
 3605 	#define TSO_NUM_DESCRIPTORS	0
 3606 	#define TSO_OFFLOAD_ON		0
 3607 	#endif
 3608 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 3609 	#if 0
 3610 	#endif
 3611 	#ifdef CONFIG_PM
 3612 	/* LDV_COMMENT_END_PREP */
 3613 	/* LDV_COMMENT_BEGIN_PREP */
 3614 	#endif
 3615 	#ifdef CONFIG_PM
 3616 	#endif
 3617 	/* LDV_COMMENT_END_PREP */
 3618 
 3619 	/** CALLBACK SECTION request_irq **/
 3620 	/* content: static irqreturn_t typhoon_interrupt(int irq, void *dev_instance)*/
 3621 	/* LDV_COMMENT_BEGIN_PREP */
 3622 	#define TXHI_ENTRIES		2
 3623 	#define TXLO_ENTRIES		128
 3624 	#define RX_ENTRIES		32
 3625 	#define COMMAND_ENTRIES		16
 3626 	#define RESPONSE_ENTRIES	32
 3627 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3628 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3629 	#define RXFREE_ENTRIES		128
 3630 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3631 	#define TX_TIMEOUT  (2*HZ)
 3632 	#define PKT_BUF_SZ		1536
 3633 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 3634 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 3635 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 3636 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 3637 	#undef NETIF_F_TSO
 3638 	#endif
 3639 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 3640 	#error TX ring too small!
 3641 	#endif
 3642 	#define TYPHOON_CRYPTO_NONE		0x00
 3643 	#define TYPHOON_CRYPTO_DES		0x01
 3644 	#define TYPHOON_CRYPTO_3DES		0x02
 3645 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 3646 	#define TYPHOON_FIBER			0x08
 3647 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 3648 	#define __3xp_aligned	____cacheline_aligned
 3649 	#define typhoon_post_pci_writes(x) \
 3650 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 3651 	#define TYPHOON_UDELAY			50
 3652 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 3653 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 3654 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 3655 	#if defined(NETIF_F_TSO)
 3656 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 3657 	#define TSO_NUM_DESCRIPTORS	2
 3658 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 3659 	#else
 3660 	#define NETIF_F_TSO 		0
 3661 	#define skb_tso_size(x) 	0
 3662 	#define TSO_NUM_DESCRIPTORS	0
 3663 	#define TSO_OFFLOAD_ON		0
 3664 	#endif
 3665 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 3666 	#if 0
 3667 	#endif
 3668 	/* LDV_COMMENT_END_PREP */
 3669 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_interrupt" */
 3670 	int  var_typhoon_interrupt_40_p0;
 3671 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "typhoon_interrupt" */
 3672 	void * var_typhoon_interrupt_40_p1;
 3673 	/* LDV_COMMENT_BEGIN_PREP */
 3674 	#ifdef CONFIG_PM
 3675 	#endif
 3676 	#ifdef CONFIG_PM
 3677 	#endif
 3678 	/* LDV_COMMENT_END_PREP */
 3679 
 3680 
 3681 
 3682 
 3683 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 3684 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 3685 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 3686 	LDV_IN_INTERRUPT=1;
 3687 
 3688 
 3689 
 3690 
 3691 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 3692 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 3693 	/*============================= FUNCTION CALL SECTION       =============================*/
 3694 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 3695 	ldv_initialize();
 3696 
 3697 	/** INIT: init_type: ST_MODULE_INIT **/
 3698 	/* content: static int __init typhoon_init(void)*/
 3699 	/* LDV_COMMENT_BEGIN_PREP */
 3700 	#define TXHI_ENTRIES		2
 3701 	#define TXLO_ENTRIES		128
 3702 	#define RX_ENTRIES		32
 3703 	#define COMMAND_ENTRIES		16
 3704 	#define RESPONSE_ENTRIES	32
 3705 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3706 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3707 	#define RXFREE_ENTRIES		128
 3708 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3709 	#define TX_TIMEOUT  (2*HZ)
 3710 	#define PKT_BUF_SZ		1536
 3711 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 3712 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 3713 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 3714 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 3715 	#undef NETIF_F_TSO
 3716 	#endif
 3717 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 3718 	#error TX ring too small!
 3719 	#endif
 3720 	#define TYPHOON_CRYPTO_NONE		0x00
 3721 	#define TYPHOON_CRYPTO_DES		0x01
 3722 	#define TYPHOON_CRYPTO_3DES		0x02
 3723 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 3724 	#define TYPHOON_FIBER			0x08
 3725 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 3726 	#define __3xp_aligned	____cacheline_aligned
 3727 	#define typhoon_post_pci_writes(x) \
 3728 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 3729 	#define TYPHOON_UDELAY			50
 3730 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 3731 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 3732 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 3733 	#if defined(NETIF_F_TSO)
 3734 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 3735 	#define TSO_NUM_DESCRIPTORS	2
 3736 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 3737 	#else
 3738 	#define NETIF_F_TSO 		0
 3739 	#define skb_tso_size(x) 	0
 3740 	#define TSO_NUM_DESCRIPTORS	0
 3741 	#define TSO_OFFLOAD_ON		0
 3742 	#endif
 3743 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 3744 	#if 0
 3745 	#endif
 3746 	#ifdef CONFIG_PM
 3747 	#endif
 3748 	#ifdef CONFIG_PM
 3749 	#endif
 3750 	/* LDV_COMMENT_END_PREP */
 3751 	/* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
 3752 	ldv_handler_precall();
 3753 	 if(typhoon_init()) 
 3754 		goto ldv_final;
 3755 	
 3756 
 3757 	int ldv_s_typhoon_netdev_ops_net_device_ops = 0;
 3758 	
 3759 
 3760 	int ldv_s_typhoon_driver_pci_driver = 0;
 3761 	
 3762 
 3763 	
 3764 
 3765 
 3766 	while(  nondet_int()
 3767 		|| !(ldv_s_typhoon_netdev_ops_net_device_ops == 0)
 3768 		|| !(ldv_s_typhoon_driver_pci_driver == 0)
 3769 	) {
 3770 
 3771 		switch(nondet_int()) {
 3772 
 3773 			case 0: {
 3774 
 3775 				/** STRUCT: struct type: ethtool_ops, struct name: typhoon_ethtool_ops **/
 3776 				
 3777 
 3778 				/* content: static int typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)*/
 3779 				/* LDV_COMMENT_BEGIN_PREP */
 3780 				#define TXHI_ENTRIES		2
 3781 				#define TXLO_ENTRIES		128
 3782 				#define RX_ENTRIES		32
 3783 				#define COMMAND_ENTRIES		16
 3784 				#define RESPONSE_ENTRIES	32
 3785 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3786 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3787 				#define RXFREE_ENTRIES		128
 3788 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3789 				#define TX_TIMEOUT  (2*HZ)
 3790 				#define PKT_BUF_SZ		1536
 3791 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 3792 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 3793 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 3794 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 3795 				#undef NETIF_F_TSO
 3796 				#endif
 3797 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 3798 				#error TX ring too small!
 3799 				#endif
 3800 				#define TYPHOON_CRYPTO_NONE		0x00
 3801 				#define TYPHOON_CRYPTO_DES		0x01
 3802 				#define TYPHOON_CRYPTO_3DES		0x02
 3803 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 3804 				#define TYPHOON_FIBER			0x08
 3805 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 3806 				#define __3xp_aligned	____cacheline_aligned
 3807 				#define typhoon_post_pci_writes(x) \
 3808 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 3809 				#define TYPHOON_UDELAY			50
 3810 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 3811 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 3812 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 3813 				#if defined(NETIF_F_TSO)
 3814 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 3815 				#define TSO_NUM_DESCRIPTORS	2
 3816 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 3817 				#else
 3818 				#define NETIF_F_TSO 		0
 3819 				#define skb_tso_size(x) 	0
 3820 				#define TSO_NUM_DESCRIPTORS	0
 3821 				#define TSO_OFFLOAD_ON		0
 3822 				#endif
 3823 				/* LDV_COMMENT_END_PREP */
 3824 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_settings" from driver structure with callbacks "typhoon_ethtool_ops" */
 3825 				ldv_handler_precall();
 3826 				typhoon_get_settings( var_group1, var_group2);
 3827 				/* LDV_COMMENT_BEGIN_PREP */
 3828 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 3829 				#if 0
 3830 				#endif
 3831 				#ifdef CONFIG_PM
 3832 				#endif
 3833 				#ifdef CONFIG_PM
 3834 				#endif
 3835 				/* LDV_COMMENT_END_PREP */
 3836 				
 3837 
 3838 				
 3839 
 3840 			}
 3841 
 3842 			break;
 3843 			case 1: {
 3844 
 3845 				/** STRUCT: struct type: ethtool_ops, struct name: typhoon_ethtool_ops **/
 3846 				
 3847 
 3848 				/* content: static int typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)*/
 3849 				/* LDV_COMMENT_BEGIN_PREP */
 3850 				#define TXHI_ENTRIES		2
 3851 				#define TXLO_ENTRIES		128
 3852 				#define RX_ENTRIES		32
 3853 				#define COMMAND_ENTRIES		16
 3854 				#define RESPONSE_ENTRIES	32
 3855 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3856 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3857 				#define RXFREE_ENTRIES		128
 3858 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3859 				#define TX_TIMEOUT  (2*HZ)
 3860 				#define PKT_BUF_SZ		1536
 3861 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 3862 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 3863 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 3864 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 3865 				#undef NETIF_F_TSO
 3866 				#endif
 3867 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 3868 				#error TX ring too small!
 3869 				#endif
 3870 				#define TYPHOON_CRYPTO_NONE		0x00
 3871 				#define TYPHOON_CRYPTO_DES		0x01
 3872 				#define TYPHOON_CRYPTO_3DES		0x02
 3873 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 3874 				#define TYPHOON_FIBER			0x08
 3875 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 3876 				#define __3xp_aligned	____cacheline_aligned
 3877 				#define typhoon_post_pci_writes(x) \
 3878 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 3879 				#define TYPHOON_UDELAY			50
 3880 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 3881 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 3882 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 3883 				#if defined(NETIF_F_TSO)
 3884 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 3885 				#define TSO_NUM_DESCRIPTORS	2
 3886 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 3887 				#else
 3888 				#define NETIF_F_TSO 		0
 3889 				#define skb_tso_size(x) 	0
 3890 				#define TSO_NUM_DESCRIPTORS	0
 3891 				#define TSO_OFFLOAD_ON		0
 3892 				#endif
 3893 				/* LDV_COMMENT_END_PREP */
 3894 				/* LDV_COMMENT_FUNCTION_CALL Function from field "set_settings" from driver structure with callbacks "typhoon_ethtool_ops" */
 3895 				ldv_handler_precall();
 3896 				typhoon_set_settings( var_group1, var_group2);
 3897 				/* LDV_COMMENT_BEGIN_PREP */
 3898 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 3899 				#if 0
 3900 				#endif
 3901 				#ifdef CONFIG_PM
 3902 				#endif
 3903 				#ifdef CONFIG_PM
 3904 				#endif
 3905 				/* LDV_COMMENT_END_PREP */
 3906 				
 3907 
 3908 				
 3909 
 3910 			}
 3911 
 3912 			break;
 3913 			case 2: {
 3914 
 3915 				/** STRUCT: struct type: ethtool_ops, struct name: typhoon_ethtool_ops **/
 3916 				
 3917 
 3918 				/* content: static void typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)*/
 3919 				/* LDV_COMMENT_BEGIN_PREP */
 3920 				#define TXHI_ENTRIES		2
 3921 				#define TXLO_ENTRIES		128
 3922 				#define RX_ENTRIES		32
 3923 				#define COMMAND_ENTRIES		16
 3924 				#define RESPONSE_ENTRIES	32
 3925 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3926 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3927 				#define RXFREE_ENTRIES		128
 3928 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3929 				#define TX_TIMEOUT  (2*HZ)
 3930 				#define PKT_BUF_SZ		1536
 3931 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 3932 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 3933 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 3934 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 3935 				#undef NETIF_F_TSO
 3936 				#endif
 3937 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 3938 				#error TX ring too small!
 3939 				#endif
 3940 				#define TYPHOON_CRYPTO_NONE		0x00
 3941 				#define TYPHOON_CRYPTO_DES		0x01
 3942 				#define TYPHOON_CRYPTO_3DES		0x02
 3943 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 3944 				#define TYPHOON_FIBER			0x08
 3945 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 3946 				#define __3xp_aligned	____cacheline_aligned
 3947 				#define typhoon_post_pci_writes(x) \
 3948 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 3949 				#define TYPHOON_UDELAY			50
 3950 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 3951 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 3952 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 3953 				#if defined(NETIF_F_TSO)
 3954 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 3955 				#define TSO_NUM_DESCRIPTORS	2
 3956 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 3957 				#else
 3958 				#define NETIF_F_TSO 		0
 3959 				#define skb_tso_size(x) 	0
 3960 				#define TSO_NUM_DESCRIPTORS	0
 3961 				#define TSO_OFFLOAD_ON		0
 3962 				#endif
 3963 				/* LDV_COMMENT_END_PREP */
 3964 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_drvinfo" from driver structure with callbacks "typhoon_ethtool_ops" */
 3965 				ldv_handler_precall();
 3966 				typhoon_get_drvinfo( var_group1, var_group3);
 3967 				/* LDV_COMMENT_BEGIN_PREP */
 3968 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 3969 				#if 0
 3970 				#endif
 3971 				#ifdef CONFIG_PM
 3972 				#endif
 3973 				#ifdef CONFIG_PM
 3974 				#endif
 3975 				/* LDV_COMMENT_END_PREP */
 3976 				
 3977 
 3978 				
 3979 
 3980 			}
 3981 
 3982 			break;
 3983 			case 3: {
 3984 
 3985 				/** STRUCT: struct type: ethtool_ops, struct name: typhoon_ethtool_ops **/
 3986 				
 3987 
 3988 				/* content: static void typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)*/
 3989 				/* LDV_COMMENT_BEGIN_PREP */
 3990 				#define TXHI_ENTRIES		2
 3991 				#define TXLO_ENTRIES		128
 3992 				#define RX_ENTRIES		32
 3993 				#define COMMAND_ENTRIES		16
 3994 				#define RESPONSE_ENTRIES	32
 3995 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 3996 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 3997 				#define RXFREE_ENTRIES		128
 3998 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 3999 				#define TX_TIMEOUT  (2*HZ)
 4000 				#define PKT_BUF_SZ		1536
 4001 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 4002 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 4003 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 4004 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 4005 				#undef NETIF_F_TSO
 4006 				#endif
 4007 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 4008 				#error TX ring too small!
 4009 				#endif
 4010 				#define TYPHOON_CRYPTO_NONE		0x00
 4011 				#define TYPHOON_CRYPTO_DES		0x01
 4012 				#define TYPHOON_CRYPTO_3DES		0x02
 4013 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 4014 				#define TYPHOON_FIBER			0x08
 4015 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 4016 				#define __3xp_aligned	____cacheline_aligned
 4017 				#define typhoon_post_pci_writes(x) \
 4018 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 4019 				#define TYPHOON_UDELAY			50
 4020 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 4021 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 4022 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 4023 				#if defined(NETIF_F_TSO)
 4024 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 4025 				#define TSO_NUM_DESCRIPTORS	2
 4026 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 4027 				#else
 4028 				#define NETIF_F_TSO 		0
 4029 				#define skb_tso_size(x) 	0
 4030 				#define TSO_NUM_DESCRIPTORS	0
 4031 				#define TSO_OFFLOAD_ON		0
 4032 				#endif
 4033 				/* LDV_COMMENT_END_PREP */
 4034 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_wol" from driver structure with callbacks "typhoon_ethtool_ops" */
 4035 				ldv_handler_precall();
 4036 				typhoon_get_wol( var_group1, var_group4);
 4037 				/* LDV_COMMENT_BEGIN_PREP */
 4038 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 4039 				#if 0
 4040 				#endif
 4041 				#ifdef CONFIG_PM
 4042 				#endif
 4043 				#ifdef CONFIG_PM
 4044 				#endif
 4045 				/* LDV_COMMENT_END_PREP */
 4046 				
 4047 
 4048 				
 4049 
 4050 			}
 4051 
 4052 			break;
 4053 			case 4: {
 4054 
 4055 				/** STRUCT: struct type: ethtool_ops, struct name: typhoon_ethtool_ops **/
 4056 				
 4057 
 4058 				/* content: static int typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)*/
 4059 				/* LDV_COMMENT_BEGIN_PREP */
 4060 				#define TXHI_ENTRIES		2
 4061 				#define TXLO_ENTRIES		128
 4062 				#define RX_ENTRIES		32
 4063 				#define COMMAND_ENTRIES		16
 4064 				#define RESPONSE_ENTRIES	32
 4065 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 4066 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 4067 				#define RXFREE_ENTRIES		128
 4068 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 4069 				#define TX_TIMEOUT  (2*HZ)
 4070 				#define PKT_BUF_SZ		1536
 4071 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 4072 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 4073 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 4074 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 4075 				#undef NETIF_F_TSO
 4076 				#endif
 4077 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 4078 				#error TX ring too small!
 4079 				#endif
 4080 				#define TYPHOON_CRYPTO_NONE		0x00
 4081 				#define TYPHOON_CRYPTO_DES		0x01
 4082 				#define TYPHOON_CRYPTO_3DES		0x02
 4083 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 4084 				#define TYPHOON_FIBER			0x08
 4085 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 4086 				#define __3xp_aligned	____cacheline_aligned
 4087 				#define typhoon_post_pci_writes(x) \
 4088 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 4089 				#define TYPHOON_UDELAY			50
 4090 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 4091 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 4092 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 4093 				#if defined(NETIF_F_TSO)
 4094 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 4095 				#define TSO_NUM_DESCRIPTORS	2
 4096 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 4097 				#else
 4098 				#define NETIF_F_TSO 		0
 4099 				#define skb_tso_size(x) 	0
 4100 				#define TSO_NUM_DESCRIPTORS	0
 4101 				#define TSO_OFFLOAD_ON		0
 4102 				#endif
 4103 				/* LDV_COMMENT_END_PREP */
 4104 				/* LDV_COMMENT_FUNCTION_CALL Function from field "set_wol" from driver structure with callbacks "typhoon_ethtool_ops" */
 4105 				ldv_handler_precall();
 4106 				typhoon_set_wol( var_group1, var_group4);
 4107 				/* LDV_COMMENT_BEGIN_PREP */
 4108 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 4109 				#if 0
 4110 				#endif
 4111 				#ifdef CONFIG_PM
 4112 				#endif
 4113 				#ifdef CONFIG_PM
 4114 				#endif
 4115 				/* LDV_COMMENT_END_PREP */
 4116 				
 4117 
 4118 				
 4119 
 4120 			}
 4121 
 4122 			break;
 4123 			case 5: {
 4124 
 4125 				/** STRUCT: struct type: ethtool_ops, struct name: typhoon_ethtool_ops **/
 4126 				
 4127 
 4128 				/* content: static void typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)*/
 4129 				/* LDV_COMMENT_BEGIN_PREP */
 4130 				#define TXHI_ENTRIES		2
 4131 				#define TXLO_ENTRIES		128
 4132 				#define RX_ENTRIES		32
 4133 				#define COMMAND_ENTRIES		16
 4134 				#define RESPONSE_ENTRIES	32
 4135 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 4136 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 4137 				#define RXFREE_ENTRIES		128
 4138 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 4139 				#define TX_TIMEOUT  (2*HZ)
 4140 				#define PKT_BUF_SZ		1536
 4141 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 4142 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 4143 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 4144 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 4145 				#undef NETIF_F_TSO
 4146 				#endif
 4147 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 4148 				#error TX ring too small!
 4149 				#endif
 4150 				#define TYPHOON_CRYPTO_NONE		0x00
 4151 				#define TYPHOON_CRYPTO_DES		0x01
 4152 				#define TYPHOON_CRYPTO_3DES		0x02
 4153 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 4154 				#define TYPHOON_FIBER			0x08
 4155 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 4156 				#define __3xp_aligned	____cacheline_aligned
 4157 				#define typhoon_post_pci_writes(x) \
 4158 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 4159 				#define TYPHOON_UDELAY			50
 4160 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 4161 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 4162 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 4163 				#if defined(NETIF_F_TSO)
 4164 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 4165 				#define TSO_NUM_DESCRIPTORS	2
 4166 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 4167 				#else
 4168 				#define NETIF_F_TSO 		0
 4169 				#define skb_tso_size(x) 	0
 4170 				#define TSO_NUM_DESCRIPTORS	0
 4171 				#define TSO_OFFLOAD_ON		0
 4172 				#endif
 4173 				/* LDV_COMMENT_END_PREP */
 4174 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_ringparam" from driver structure with callbacks "typhoon_ethtool_ops" */
 4175 				ldv_handler_precall();
 4176 				typhoon_get_ringparam( var_group1, var_group5);
 4177 				/* LDV_COMMENT_BEGIN_PREP */
 4178 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 4179 				#if 0
 4180 				#endif
 4181 				#ifdef CONFIG_PM
 4182 				#endif
 4183 				#ifdef CONFIG_PM
 4184 				#endif
 4185 				/* LDV_COMMENT_END_PREP */
 4186 				
 4187 
 4188 				
 4189 
 4190 			}
 4191 
 4192 			break;
 4193 			case 6: {
 4194 
 4195 				/** STRUCT: struct type: net_device_ops, struct name: typhoon_netdev_ops **/
 4196 				if(ldv_s_typhoon_netdev_ops_net_device_ops==0) {
 4197 
 4198 				/* content: static int typhoon_open(struct net_device *dev)*/
 4199 				/* LDV_COMMENT_BEGIN_PREP */
 4200 				#define TXHI_ENTRIES		2
 4201 				#define TXLO_ENTRIES		128
 4202 				#define RX_ENTRIES		32
 4203 				#define COMMAND_ENTRIES		16
 4204 				#define RESPONSE_ENTRIES	32
 4205 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 4206 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 4207 				#define RXFREE_ENTRIES		128
 4208 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 4209 				#define TX_TIMEOUT  (2*HZ)
 4210 				#define PKT_BUF_SZ		1536
 4211 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 4212 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 4213 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 4214 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 4215 				#undef NETIF_F_TSO
 4216 				#endif
 4217 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 4218 				#error TX ring too small!
 4219 				#endif
 4220 				#define TYPHOON_CRYPTO_NONE		0x00
 4221 				#define TYPHOON_CRYPTO_DES		0x01
 4222 				#define TYPHOON_CRYPTO_3DES		0x02
 4223 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 4224 				#define TYPHOON_FIBER			0x08
 4225 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 4226 				#define __3xp_aligned	____cacheline_aligned
 4227 				#define typhoon_post_pci_writes(x) \
 4228 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 4229 				#define TYPHOON_UDELAY			50
 4230 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 4231 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 4232 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 4233 				#if defined(NETIF_F_TSO)
 4234 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 4235 				#define TSO_NUM_DESCRIPTORS	2
 4236 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 4237 				#else
 4238 				#define NETIF_F_TSO 		0
 4239 				#define skb_tso_size(x) 	0
 4240 				#define TSO_NUM_DESCRIPTORS	0
 4241 				#define TSO_OFFLOAD_ON		0
 4242 				#endif
 4243 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 4244 				#if 0
 4245 				#endif
 4246 				/* LDV_COMMENT_END_PREP */
 4247 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "typhoon_netdev_ops". Standart function test for correct return result. */
 4248 				ldv_handler_precall();
 4249 				res_typhoon_open_47 = typhoon_open( var_group1);
 4250 				 ldv_check_return_value(res_typhoon_open_47);
 4251 				 if(res_typhoon_open_47 < 0) 
 4252 					goto ldv_module_exit;
 4253 				/* LDV_COMMENT_BEGIN_PREP */
 4254 				#ifdef CONFIG_PM
 4255 				#endif
 4256 				#ifdef CONFIG_PM
 4257 				#endif
 4258 				/* LDV_COMMENT_END_PREP */
 4259 				ldv_s_typhoon_netdev_ops_net_device_ops++;
 4260 
 4261 				}
 4262 
 4263 			}
 4264 
 4265 			break;
 4266 			case 7: {
 4267 
 4268 				/** STRUCT: struct type: net_device_ops, struct name: typhoon_netdev_ops **/
 4269 				if(ldv_s_typhoon_netdev_ops_net_device_ops==1) {
 4270 
 4271 				/* content: static int typhoon_close(struct net_device *dev)*/
 4272 				/* LDV_COMMENT_BEGIN_PREP */
 4273 				#define TXHI_ENTRIES		2
 4274 				#define TXLO_ENTRIES		128
 4275 				#define RX_ENTRIES		32
 4276 				#define COMMAND_ENTRIES		16
 4277 				#define RESPONSE_ENTRIES	32
 4278 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 4279 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 4280 				#define RXFREE_ENTRIES		128
 4281 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 4282 				#define TX_TIMEOUT  (2*HZ)
 4283 				#define PKT_BUF_SZ		1536
 4284 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 4285 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 4286 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 4287 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 4288 				#undef NETIF_F_TSO
 4289 				#endif
 4290 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 4291 				#error TX ring too small!
 4292 				#endif
 4293 				#define TYPHOON_CRYPTO_NONE		0x00
 4294 				#define TYPHOON_CRYPTO_DES		0x01
 4295 				#define TYPHOON_CRYPTO_3DES		0x02
 4296 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 4297 				#define TYPHOON_FIBER			0x08
 4298 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 4299 				#define __3xp_aligned	____cacheline_aligned
 4300 				#define typhoon_post_pci_writes(x) \
 4301 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 4302 				#define TYPHOON_UDELAY			50
 4303 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 4304 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 4305 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 4306 				#if defined(NETIF_F_TSO)
 4307 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 4308 				#define TSO_NUM_DESCRIPTORS	2
 4309 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 4310 				#else
 4311 				#define NETIF_F_TSO 		0
 4312 				#define skb_tso_size(x) 	0
 4313 				#define TSO_NUM_DESCRIPTORS	0
 4314 				#define TSO_OFFLOAD_ON		0
 4315 				#endif
 4316 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 4317 				#if 0
 4318 				#endif
 4319 				/* LDV_COMMENT_END_PREP */
 4320 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "typhoon_netdev_ops". Standart function test for correct return result. */
 4321 				ldv_handler_precall();
 4322 				res_typhoon_close_48 = typhoon_close( var_group1);
 4323 				 ldv_check_return_value(res_typhoon_close_48);
 4324 				 if(res_typhoon_close_48) 
 4325 					goto ldv_module_exit;
 4326 				/* LDV_COMMENT_BEGIN_PREP */
 4327 				#ifdef CONFIG_PM
 4328 				#endif
 4329 				#ifdef CONFIG_PM
 4330 				#endif
 4331 				/* LDV_COMMENT_END_PREP */
 4332 				ldv_s_typhoon_netdev_ops_net_device_ops=0;
 4333 
 4334 				}
 4335 
 4336 			}
 4337 
 4338 			break;
 4339 			case 8: {
 4340 
 4341 				/** STRUCT: struct type: net_device_ops, struct name: typhoon_netdev_ops **/
 4342 				
 4343 
 4344 				/* content: static netdev_tx_t typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)*/
 4345 				/* LDV_COMMENT_BEGIN_PREP */
 4346 				#define TXHI_ENTRIES		2
 4347 				#define TXLO_ENTRIES		128
 4348 				#define RX_ENTRIES		32
 4349 				#define COMMAND_ENTRIES		16
 4350 				#define RESPONSE_ENTRIES	32
 4351 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 4352 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 4353 				#define RXFREE_ENTRIES		128
 4354 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 4355 				#define TX_TIMEOUT  (2*HZ)
 4356 				#define PKT_BUF_SZ		1536
 4357 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 4358 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 4359 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 4360 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 4361 				#undef NETIF_F_TSO
 4362 				#endif
 4363 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 4364 				#error TX ring too small!
 4365 				#endif
 4366 				#define TYPHOON_CRYPTO_NONE		0x00
 4367 				#define TYPHOON_CRYPTO_DES		0x01
 4368 				#define TYPHOON_CRYPTO_3DES		0x02
 4369 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 4370 				#define TYPHOON_FIBER			0x08
 4371 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 4372 				#define __3xp_aligned	____cacheline_aligned
 4373 				#define typhoon_post_pci_writes(x) \
 4374 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 4375 				#define TYPHOON_UDELAY			50
 4376 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 4377 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 4378 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 4379 				#if defined(NETIF_F_TSO)
 4380 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 4381 				#define TSO_NUM_DESCRIPTORS	2
 4382 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 4383 				#else
 4384 				#define NETIF_F_TSO 		0
 4385 				#define skb_tso_size(x) 	0
 4386 				#define TSO_NUM_DESCRIPTORS	0
 4387 				#define TSO_OFFLOAD_ON		0
 4388 				#endif
 4389 				/* LDV_COMMENT_END_PREP */
 4390 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_start_xmit" from driver structure with callbacks "typhoon_netdev_ops" */
 4391 				ldv_handler_precall();
 4392 				typhoon_start_tx( var_group6, var_group1);
 4393 				/* LDV_COMMENT_BEGIN_PREP */
 4394 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 4395 				#if 0
 4396 				#endif
 4397 				#ifdef CONFIG_PM
 4398 				#endif
 4399 				#ifdef CONFIG_PM
 4400 				#endif
 4401 				/* LDV_COMMENT_END_PREP */
 4402 				
 4403 
 4404 				
 4405 
 4406 			}
 4407 
 4408 			break;
 4409 			case 9: {
 4410 
 4411 				/** STRUCT: struct type: net_device_ops, struct name: typhoon_netdev_ops **/
 4412 				
 4413 
 4414 				/* content: static void typhoon_set_rx_mode(struct net_device *dev)*/
 4415 				/* LDV_COMMENT_BEGIN_PREP */
 4416 				#define TXHI_ENTRIES		2
 4417 				#define TXLO_ENTRIES		128
 4418 				#define RX_ENTRIES		32
 4419 				#define COMMAND_ENTRIES		16
 4420 				#define RESPONSE_ENTRIES	32
 4421 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 4422 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 4423 				#define RXFREE_ENTRIES		128
 4424 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 4425 				#define TX_TIMEOUT  (2*HZ)
 4426 				#define PKT_BUF_SZ		1536
 4427 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 4428 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 4429 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 4430 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 4431 				#undef NETIF_F_TSO
 4432 				#endif
 4433 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 4434 				#error TX ring too small!
 4435 				#endif
 4436 				#define TYPHOON_CRYPTO_NONE		0x00
 4437 				#define TYPHOON_CRYPTO_DES		0x01
 4438 				#define TYPHOON_CRYPTO_3DES		0x02
 4439 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 4440 				#define TYPHOON_FIBER			0x08
 4441 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 4442 				#define __3xp_aligned	____cacheline_aligned
 4443 				#define typhoon_post_pci_writes(x) \
 4444 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 4445 				#define TYPHOON_UDELAY			50
 4446 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 4447 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 4448 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 4449 				#if defined(NETIF_F_TSO)
 4450 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 4451 				#define TSO_NUM_DESCRIPTORS	2
 4452 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 4453 				#else
 4454 				#define NETIF_F_TSO 		0
 4455 				#define skb_tso_size(x) 	0
 4456 				#define TSO_NUM_DESCRIPTORS	0
 4457 				#define TSO_OFFLOAD_ON		0
 4458 				#endif
 4459 				/* LDV_COMMENT_END_PREP */
 4460 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_set_rx_mode" from driver structure with callbacks "typhoon_netdev_ops" */
 4461 				ldv_handler_precall();
 4462 				typhoon_set_rx_mode( var_group1);
 4463 				/* LDV_COMMENT_BEGIN_PREP */
 4464 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 4465 				#if 0
 4466 				#endif
 4467 				#ifdef CONFIG_PM
 4468 				#endif
 4469 				#ifdef CONFIG_PM
 4470 				#endif
 4471 				/* LDV_COMMENT_END_PREP */
 4472 				
 4473 
 4474 				
 4475 
 4476 			}
 4477 
 4478 			break;
 4479 			case 10: {
 4480 
 4481 				/** STRUCT: struct type: net_device_ops, struct name: typhoon_netdev_ops **/
 4482 				
 4483 
 4484 				/* content: static void typhoon_tx_timeout(struct net_device *dev)*/
 4485 				/* LDV_COMMENT_BEGIN_PREP */
 4486 				#define TXHI_ENTRIES		2
 4487 				#define TXLO_ENTRIES		128
 4488 				#define RX_ENTRIES		32
 4489 				#define COMMAND_ENTRIES		16
 4490 				#define RESPONSE_ENTRIES	32
 4491 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 4492 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 4493 				#define RXFREE_ENTRIES		128
 4494 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 4495 				#define TX_TIMEOUT  (2*HZ)
 4496 				#define PKT_BUF_SZ		1536
 4497 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 4498 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 4499 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 4500 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 4501 				#undef NETIF_F_TSO
 4502 				#endif
 4503 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 4504 				#error TX ring too small!
 4505 				#endif
 4506 				#define TYPHOON_CRYPTO_NONE		0x00
 4507 				#define TYPHOON_CRYPTO_DES		0x01
 4508 				#define TYPHOON_CRYPTO_3DES		0x02
 4509 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 4510 				#define TYPHOON_FIBER			0x08
 4511 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 4512 				#define __3xp_aligned	____cacheline_aligned
 4513 				#define typhoon_post_pci_writes(x) \
 4514 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 4515 				#define TYPHOON_UDELAY			50
 4516 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 4517 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 4518 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 4519 				#if defined(NETIF_F_TSO)
 4520 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 4521 				#define TSO_NUM_DESCRIPTORS	2
 4522 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 4523 				#else
 4524 				#define NETIF_F_TSO 		0
 4525 				#define skb_tso_size(x) 	0
 4526 				#define TSO_NUM_DESCRIPTORS	0
 4527 				#define TSO_OFFLOAD_ON		0
 4528 				#endif
 4529 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 4530 				#if 0
 4531 				#endif
 4532 				/* LDV_COMMENT_END_PREP */
 4533 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_tx_timeout" from driver structure with callbacks "typhoon_netdev_ops" */
 4534 				ldv_handler_precall();
 4535 				typhoon_tx_timeout( var_group1);
 4536 				/* LDV_COMMENT_BEGIN_PREP */
 4537 				#ifdef CONFIG_PM
 4538 				#endif
 4539 				#ifdef CONFIG_PM
 4540 				#endif
 4541 				/* LDV_COMMENT_END_PREP */
 4542 				
 4543 
 4544 				
 4545 
 4546 			}
 4547 
 4548 			break;
 4549 			case 11: {
 4550 
 4551 				/** STRUCT: struct type: net_device_ops, struct name: typhoon_netdev_ops **/
 4552 				
 4553 
 4554 				/* content: static struct net_device_stats * typhoon_get_stats(struct net_device *dev)*/
 4555 				/* LDV_COMMENT_BEGIN_PREP */
 4556 				#define TXHI_ENTRIES		2
 4557 				#define TXLO_ENTRIES		128
 4558 				#define RX_ENTRIES		32
 4559 				#define COMMAND_ENTRIES		16
 4560 				#define RESPONSE_ENTRIES	32
 4561 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 4562 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 4563 				#define RXFREE_ENTRIES		128
 4564 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 4565 				#define TX_TIMEOUT  (2*HZ)
 4566 				#define PKT_BUF_SZ		1536
 4567 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 4568 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 4569 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 4570 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 4571 				#undef NETIF_F_TSO
 4572 				#endif
 4573 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 4574 				#error TX ring too small!
 4575 				#endif
 4576 				#define TYPHOON_CRYPTO_NONE		0x00
 4577 				#define TYPHOON_CRYPTO_DES		0x01
 4578 				#define TYPHOON_CRYPTO_3DES		0x02
 4579 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 4580 				#define TYPHOON_FIBER			0x08
 4581 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 4582 				#define __3xp_aligned	____cacheline_aligned
 4583 				#define typhoon_post_pci_writes(x) \
 4584 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 4585 				#define TYPHOON_UDELAY			50
 4586 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 4587 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 4588 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 4589 				#if defined(NETIF_F_TSO)
 4590 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 4591 				#define TSO_NUM_DESCRIPTORS	2
 4592 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 4593 				#else
 4594 				#define NETIF_F_TSO 		0
 4595 				#define skb_tso_size(x) 	0
 4596 				#define TSO_NUM_DESCRIPTORS	0
 4597 				#define TSO_OFFLOAD_ON		0
 4598 				#endif
 4599 				/* LDV_COMMENT_END_PREP */
 4600 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_get_stats" from driver structure with callbacks "typhoon_netdev_ops" */
 4601 				ldv_handler_precall();
 4602 				typhoon_get_stats( var_group1);
 4603 				/* LDV_COMMENT_BEGIN_PREP */
 4604 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 4605 				#if 0
 4606 				#endif
 4607 				#ifdef CONFIG_PM
 4608 				#endif
 4609 				#ifdef CONFIG_PM
 4610 				#endif
 4611 				/* LDV_COMMENT_END_PREP */
 4612 				
 4613 
 4614 				
 4615 
 4616 			}
 4617 
 4618 			break;
 4619 			case 12: {
 4620 
 4621 				/** STRUCT: struct type: pci_driver, struct name: typhoon_driver **/
 4622 				if(ldv_s_typhoon_driver_pci_driver==0) {
 4623 
 4624 				/* content: static int typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)*/
 4625 				/* LDV_COMMENT_BEGIN_PREP */
 4626 				#define TXHI_ENTRIES		2
 4627 				#define TXLO_ENTRIES		128
 4628 				#define RX_ENTRIES		32
 4629 				#define COMMAND_ENTRIES		16
 4630 				#define RESPONSE_ENTRIES	32
 4631 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 4632 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 4633 				#define RXFREE_ENTRIES		128
 4634 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 4635 				#define TX_TIMEOUT  (2*HZ)
 4636 				#define PKT_BUF_SZ		1536
 4637 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 4638 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 4639 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 4640 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 4641 				#undef NETIF_F_TSO
 4642 				#endif
 4643 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 4644 				#error TX ring too small!
 4645 				#endif
 4646 				#define TYPHOON_CRYPTO_NONE		0x00
 4647 				#define TYPHOON_CRYPTO_DES		0x01
 4648 				#define TYPHOON_CRYPTO_3DES		0x02
 4649 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 4650 				#define TYPHOON_FIBER			0x08
 4651 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 4652 				#define __3xp_aligned	____cacheline_aligned
 4653 				#define typhoon_post_pci_writes(x) \
 4654 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 4655 				#define TYPHOON_UDELAY			50
 4656 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 4657 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 4658 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 4659 				#if defined(NETIF_F_TSO)
 4660 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 4661 				#define TSO_NUM_DESCRIPTORS	2
 4662 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 4663 				#else
 4664 				#define NETIF_F_TSO 		0
 4665 				#define skb_tso_size(x) 	0
 4666 				#define TSO_NUM_DESCRIPTORS	0
 4667 				#define TSO_OFFLOAD_ON		0
 4668 				#endif
 4669 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 4670 				#if 0
 4671 				#endif
 4672 				#ifdef CONFIG_PM
 4673 				#endif
 4674 				/* LDV_COMMENT_END_PREP */
 4675 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "typhoon_driver". Standart function test for correct return result. */
 4676 				res_typhoon_init_one_52 = typhoon_init_one( var_group7, var_typhoon_init_one_52_p1);
 4677 				 ldv_check_return_value(res_typhoon_init_one_52);
 4678 				 ldv_check_return_value_probe(res_typhoon_init_one_52);
 4679 				 if(res_typhoon_init_one_52) 
 4680 					goto ldv_module_exit;
 4681 				/* LDV_COMMENT_BEGIN_PREP */
 4682 				#ifdef CONFIG_PM
 4683 				#endif
 4684 				/* LDV_COMMENT_END_PREP */
 4685 				ldv_s_typhoon_driver_pci_driver++;
 4686 
 4687 				}
 4688 
 4689 			}
 4690 
 4691 			break;
 4692 			case 13: {
 4693 
 4694 				/** STRUCT: struct type: pci_driver, struct name: typhoon_driver **/
 4695 				if(ldv_s_typhoon_driver_pci_driver==1) {
 4696 
 4697 				/* content: static void typhoon_remove_one(struct pci_dev *pdev)*/
 4698 				/* LDV_COMMENT_BEGIN_PREP */
 4699 				#define TXHI_ENTRIES		2
 4700 				#define TXLO_ENTRIES		128
 4701 				#define RX_ENTRIES		32
 4702 				#define COMMAND_ENTRIES		16
 4703 				#define RESPONSE_ENTRIES	32
 4704 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 4705 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 4706 				#define RXFREE_ENTRIES		128
 4707 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 4708 				#define TX_TIMEOUT  (2*HZ)
 4709 				#define PKT_BUF_SZ		1536
 4710 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 4711 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 4712 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 4713 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 4714 				#undef NETIF_F_TSO
 4715 				#endif
 4716 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 4717 				#error TX ring too small!
 4718 				#endif
 4719 				#define TYPHOON_CRYPTO_NONE		0x00
 4720 				#define TYPHOON_CRYPTO_DES		0x01
 4721 				#define TYPHOON_CRYPTO_3DES		0x02
 4722 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 4723 				#define TYPHOON_FIBER			0x08
 4724 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 4725 				#define __3xp_aligned	____cacheline_aligned
 4726 				#define typhoon_post_pci_writes(x) \
 4727 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 4728 				#define TYPHOON_UDELAY			50
 4729 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 4730 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 4731 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 4732 				#if defined(NETIF_F_TSO)
 4733 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 4734 				#define TSO_NUM_DESCRIPTORS	2
 4735 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 4736 				#else
 4737 				#define NETIF_F_TSO 		0
 4738 				#define skb_tso_size(x) 	0
 4739 				#define TSO_NUM_DESCRIPTORS	0
 4740 				#define TSO_OFFLOAD_ON		0
 4741 				#endif
 4742 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 4743 				#if 0
 4744 				#endif
 4745 				#ifdef CONFIG_PM
 4746 				#endif
 4747 				/* LDV_COMMENT_END_PREP */
 4748 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "typhoon_driver" */
 4749 				ldv_handler_precall();
 4750 				typhoon_remove_one( var_group7);
 4751 				/* LDV_COMMENT_BEGIN_PREP */
 4752 				#ifdef CONFIG_PM
 4753 				#endif
 4754 				/* LDV_COMMENT_END_PREP */
 4755 				ldv_s_typhoon_driver_pci_driver=0;
 4756 
 4757 				}
 4758 
 4759 			}
 4760 
 4761 			break;
 4762 			case 14: {
 4763 
 4764 				/** STRUCT: struct type: pci_driver, struct name: typhoon_driver **/
 4765 				
 4766 
 4767 				/* content: static int typhoon_suspend(struct pci_dev *pdev, pm_message_t state)*/
 4768 				/* LDV_COMMENT_BEGIN_PREP */
 4769 				#define TXHI_ENTRIES		2
 4770 				#define TXLO_ENTRIES		128
 4771 				#define RX_ENTRIES		32
 4772 				#define COMMAND_ENTRIES		16
 4773 				#define RESPONSE_ENTRIES	32
 4774 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 4775 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 4776 				#define RXFREE_ENTRIES		128
 4777 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 4778 				#define TX_TIMEOUT  (2*HZ)
 4779 				#define PKT_BUF_SZ		1536
 4780 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 4781 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 4782 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 4783 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 4784 				#undef NETIF_F_TSO
 4785 				#endif
 4786 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 4787 				#error TX ring too small!
 4788 				#endif
 4789 				#define TYPHOON_CRYPTO_NONE		0x00
 4790 				#define TYPHOON_CRYPTO_DES		0x01
 4791 				#define TYPHOON_CRYPTO_3DES		0x02
 4792 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 4793 				#define TYPHOON_FIBER			0x08
 4794 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 4795 				#define __3xp_aligned	____cacheline_aligned
 4796 				#define typhoon_post_pci_writes(x) \
 4797 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 4798 				#define TYPHOON_UDELAY			50
 4799 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 4800 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 4801 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 4802 				#if defined(NETIF_F_TSO)
 4803 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 4804 				#define TSO_NUM_DESCRIPTORS	2
 4805 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 4806 				#else
 4807 				#define NETIF_F_TSO 		0
 4808 				#define skb_tso_size(x) 	0
 4809 				#define TSO_NUM_DESCRIPTORS	0
 4810 				#define TSO_OFFLOAD_ON		0
 4811 				#endif
 4812 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 4813 				#if 0
 4814 				#endif
 4815 				#ifdef CONFIG_PM
 4816 				/* LDV_COMMENT_END_PREP */
 4817 				/* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "typhoon_driver" */
 4818 				ldv_handler_precall();
 4819 				typhoon_suspend( var_group7, var_typhoon_suspend_50_p1);
 4820 				/* LDV_COMMENT_BEGIN_PREP */
 4821 				#endif
 4822 				#ifdef CONFIG_PM
 4823 				#endif
 4824 				/* LDV_COMMENT_END_PREP */
 4825 				
 4826 
 4827 				
 4828 
 4829 			}
 4830 
 4831 			break;
 4832 			case 15: {
 4833 
 4834 				/** STRUCT: struct type: pci_driver, struct name: typhoon_driver **/
 4835 				
 4836 
 4837 				/* content: static int typhoon_resume(struct pci_dev *pdev)*/
 4838 				/* LDV_COMMENT_BEGIN_PREP */
 4839 				#define TXHI_ENTRIES		2
 4840 				#define TXLO_ENTRIES		128
 4841 				#define RX_ENTRIES		32
 4842 				#define COMMAND_ENTRIES		16
 4843 				#define RESPONSE_ENTRIES	32
 4844 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 4845 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 4846 				#define RXFREE_ENTRIES		128
 4847 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 4848 				#define TX_TIMEOUT  (2*HZ)
 4849 				#define PKT_BUF_SZ		1536
 4850 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 4851 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 4852 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 4853 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 4854 				#undef NETIF_F_TSO
 4855 				#endif
 4856 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 4857 				#error TX ring too small!
 4858 				#endif
 4859 				#define TYPHOON_CRYPTO_NONE		0x00
 4860 				#define TYPHOON_CRYPTO_DES		0x01
 4861 				#define TYPHOON_CRYPTO_3DES		0x02
 4862 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 4863 				#define TYPHOON_FIBER			0x08
 4864 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 4865 				#define __3xp_aligned	____cacheline_aligned
 4866 				#define typhoon_post_pci_writes(x) \
 4867 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 4868 				#define TYPHOON_UDELAY			50
 4869 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 4870 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 4871 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 4872 				#if defined(NETIF_F_TSO)
 4873 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 4874 				#define TSO_NUM_DESCRIPTORS	2
 4875 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 4876 				#else
 4877 				#define NETIF_F_TSO 		0
 4878 				#define skb_tso_size(x) 	0
 4879 				#define TSO_NUM_DESCRIPTORS	0
 4880 				#define TSO_OFFLOAD_ON		0
 4881 				#endif
 4882 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 4883 				#if 0
 4884 				#endif
 4885 				#ifdef CONFIG_PM
 4886 				/* LDV_COMMENT_END_PREP */
 4887 				/* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "typhoon_driver" */
 4888 				ldv_handler_precall();
 4889 				typhoon_resume( var_group7);
 4890 				/* LDV_COMMENT_BEGIN_PREP */
 4891 				#endif
 4892 				#ifdef CONFIG_PM
 4893 				#endif
 4894 				/* LDV_COMMENT_END_PREP */
 4895 				
 4896 
 4897 				
 4898 
 4899 			}
 4900 
 4901 			break;
 4902 			case 16: {
 4903 
 4904 				/** CALLBACK SECTION request_irq **/
 4905 				LDV_IN_INTERRUPT=2;
 4906 
 4907 				/* content: static irqreturn_t typhoon_interrupt(int irq, void *dev_instance)*/
 4908 				/* LDV_COMMENT_BEGIN_PREP */
 4909 				#define TXHI_ENTRIES		2
 4910 				#define TXLO_ENTRIES		128
 4911 				#define RX_ENTRIES		32
 4912 				#define COMMAND_ENTRIES		16
 4913 				#define RESPONSE_ENTRIES	32
 4914 				#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 4915 				#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 4916 				#define RXFREE_ENTRIES		128
 4917 				#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 4918 				#define TX_TIMEOUT  (2*HZ)
 4919 				#define PKT_BUF_SZ		1536
 4920 				#define FIRMWARE_NAME		"3com/typhoon.bin"
 4921 				#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 4922 				#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 4923 				#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 4924 				#undef NETIF_F_TSO
 4925 				#endif
 4926 				#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 4927 				#error TX ring too small!
 4928 				#endif
 4929 				#define TYPHOON_CRYPTO_NONE		0x00
 4930 				#define TYPHOON_CRYPTO_DES		0x01
 4931 				#define TYPHOON_CRYPTO_3DES		0x02
 4932 				#define	TYPHOON_CRYPTO_VARIABLE		0x04
 4933 				#define TYPHOON_FIBER			0x08
 4934 				#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 4935 				#define __3xp_aligned	____cacheline_aligned
 4936 				#define typhoon_post_pci_writes(x) \
 4937 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 4938 				#define TYPHOON_UDELAY			50
 4939 				#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 4940 				#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 4941 				#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 4942 				#if defined(NETIF_F_TSO)
 4943 				#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 4944 				#define TSO_NUM_DESCRIPTORS	2
 4945 				#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 4946 				#else
 4947 				#define NETIF_F_TSO 		0
 4948 				#define skb_tso_size(x) 	0
 4949 				#define TSO_NUM_DESCRIPTORS	0
 4950 				#define TSO_OFFLOAD_ON		0
 4951 				#endif
 4952 				#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 4953 				#if 0
 4954 				#endif
 4955 				/* LDV_COMMENT_END_PREP */
 4956 				/* LDV_COMMENT_FUNCTION_CALL */
 4957 				ldv_handler_precall();
 4958 				typhoon_interrupt( var_typhoon_interrupt_40_p0, var_typhoon_interrupt_40_p1);
 4959 				/* LDV_COMMENT_BEGIN_PREP */
 4960 				#ifdef CONFIG_PM
 4961 				#endif
 4962 				#ifdef CONFIG_PM
 4963 				#endif
 4964 				/* LDV_COMMENT_END_PREP */
 4965 				LDV_IN_INTERRUPT=1;
 4966 
 4967 				
 4968 
 4969 			}
 4970 
 4971 			break;
 4972 			default: break;
 4973 
 4974 		}
 4975 
 4976 	}
 4977 
 4978 	ldv_module_exit: 
 4979 
 4980 	/** INIT: init_type: ST_MODULE_EXIT **/
 4981 	/* content: static void __exit typhoon_cleanup(void)*/
 4982 	/* LDV_COMMENT_BEGIN_PREP */
 4983 	#define TXHI_ENTRIES		2
 4984 	#define TXLO_ENTRIES		128
 4985 	#define RX_ENTRIES		32
 4986 	#define COMMAND_ENTRIES		16
 4987 	#define RESPONSE_ENTRIES	32
 4988 	#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
 4989 	#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
 4990 	#define RXFREE_ENTRIES		128
 4991 	#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
 4992 	#define TX_TIMEOUT  (2*HZ)
 4993 	#define PKT_BUF_SZ		1536
 4994 	#define FIRMWARE_NAME		"3com/typhoon.bin"
 4995 	#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
 4996 	#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 4997 	#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 4998 	#undef NETIF_F_TSO
 4999 	#endif
 5000 	#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 5001 	#error TX ring too small!
 5002 	#endif
 5003 	#define TYPHOON_CRYPTO_NONE		0x00
 5004 	#define TYPHOON_CRYPTO_DES		0x01
 5005 	#define TYPHOON_CRYPTO_3DES		0x02
 5006 	#define	TYPHOON_CRYPTO_VARIABLE		0x04
 5007 	#define TYPHOON_FIBER			0x08
 5008 	#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
 5009 	#define __3xp_aligned	____cacheline_aligned
 5010 	#define typhoon_post_pci_writes(x) \
 5011 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 5012 	#define TYPHOON_UDELAY			50
 5013 	#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
 5014 	#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
 5015 	#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
 5016 	#if defined(NETIF_F_TSO)
 5017 	#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
 5018 	#define TSO_NUM_DESCRIPTORS	2
 5019 	#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
 5020 	#else
 5021 	#define NETIF_F_TSO 		0
 5022 	#define skb_tso_size(x) 	0
 5023 	#define TSO_NUM_DESCRIPTORS	0
 5024 	#define TSO_OFFLOAD_ON		0
 5025 	#endif
 5026 	#define shared_offset(x)	offsetof(struct typhoon_shared, x)
 5027 	#if 0
 5028 	#endif
 5029 	#ifdef CONFIG_PM
 5030 	#endif
 5031 	#ifdef CONFIG_PM
 5032 	#endif
 5033 	/* LDV_COMMENT_END_PREP */
 5034 	/* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
 5035 	ldv_handler_precall();
 5036 	typhoon_cleanup();
 5037 
 5038 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 5039 	ldv_final: ldv_check_final_state();
 5040 
 5041 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 5042 	return;
 5043 
 5044 }
 5045 #endif
 5046 
 5047 /* LDV_COMMENT_END_MAIN */                 1 
    2 
    3 #include <linux/kernel.h>
    4 #include <linux/spinlock.h>
    5 
    6 #include <verifier/rcv.h>
    7 
    8 static int ldv_spin__xmit_lock_of_netdev_queue;
    9 
   10 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock__xmit_lock_of_netdev_queue') Check that spin '_xmit_lock_of_netdev_queue' was not locked and lock it */
   11 void ldv_spin_lock__xmit_lock_of_netdev_queue(void)
   12 {
   13   /* LDV_COMMENT_ASSERT Spin '_xmit_lock_of_netdev_queue' must be unlocked */
   14   ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 1);
   15   /* LDV_COMMENT_CHANGE_STATE Lock spin '_xmit_lock_of_netdev_queue' */
   16   ldv_spin__xmit_lock_of_netdev_queue = 2;
   17 }
   18 
   19 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock__xmit_lock_of_netdev_queue') Check that spin '_xmit_lock_of_netdev_queue' was locked and unlock it */
   20 void ldv_spin_unlock__xmit_lock_of_netdev_queue(void)
   21 {
   22   /* LDV_COMMENT_ASSERT Spin '_xmit_lock_of_netdev_queue' must be locked */
   23   ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 2);
   24   /* LDV_COMMENT_CHANGE_STATE Unlock spin '_xmit_lock_of_netdev_queue' */
   25   ldv_spin__xmit_lock_of_netdev_queue = 1;
   26 }
   27 
   28 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock__xmit_lock_of_netdev_queue') Check that spin '_xmit_lock_of_netdev_queue' was not locked and nondeterministically lock it. Return 0 on fails */
   29 int ldv_spin_trylock__xmit_lock_of_netdev_queue(void)
   30 {
   31   int is_spin_held_by_another_thread;
   32 
   33   /* LDV_COMMENT_ASSERT It may be an error if spin '_xmit_lock_of_netdev_queue' is locked at this point */
   34   ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 1);
   35 
   36   /* LDV_COMMENT_OTHER Construct nondetermined result */
   37   is_spin_held_by_another_thread = ldv_undef_int();
   38 
   39   /* LDV_COMMENT_ASSERT Nondeterministically lock spin '_xmit_lock_of_netdev_queue' */
   40   if (is_spin_held_by_another_thread)
   41   {
   42     /* LDV_COMMENT_RETURN Spin '_xmit_lock_of_netdev_queue' was not locked. Finish with fail */
   43     return 0;
   44   }
   45   else
   46   {
   47     /* LDV_COMMENT_CHANGE_STATE Lock spin '_xmit_lock_of_netdev_queue' */
   48     ldv_spin__xmit_lock_of_netdev_queue = 2;
   49     /* LDV_COMMENT_RETURN Finish with success */
   50     return 1;
   51   }
   52 }
   53 
   54 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait__xmit_lock_of_netdev_queue') The same process can not both lock spin '_xmit_lock_of_netdev_queue' and wait until it will be unlocked  */
   55 void ldv_spin_unlock_wait__xmit_lock_of_netdev_queue(void)
   56 {
   57   /* LDV_COMMENT_ASSERT Spin '_xmit_lock_of_netdev_queue' must not be locked by a current process */
   58   ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 1);
   59 }
   60 
   61 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked__xmit_lock_of_netdev_queue') Check whether spin '_xmit_lock_of_netdev_queue' was locked */
   62 int ldv_spin_is_locked__xmit_lock_of_netdev_queue(void)
   63 {
   64   int is_spin_held_by_another_thread;
   65 
   66   /* LDV_COMMENT_OTHER Construct nondetermined result */
   67   is_spin_held_by_another_thread = ldv_undef_int();
   68 
   69   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin '_xmit_lock_of_netdev_queue' was locked */
   70   if(ldv_spin__xmit_lock_of_netdev_queue == 1 && !is_spin_held_by_another_thread)
   71   {
   72     /* LDV_COMMENT_RETURN Spin '_xmit_lock_of_netdev_queue' was unlocked */
   73     return 0;
   74   }
   75   else
   76   {
   77     /* LDV_COMMENT_RETURN Spin '_xmit_lock_of_netdev_queue' was locked */
   78     return 1;
   79   }
   80 }
   81 
   82 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock__xmit_lock_of_netdev_queue') Check whether spin '_xmit_lock_of_netdev_queue' was locked */
   83 int ldv_spin_can_lock__xmit_lock_of_netdev_queue(void)
   84 {
   85   /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
   86   return !ldv_spin_is_locked__xmit_lock_of_netdev_queue();
   87 }
   88 
   89 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended__xmit_lock_of_netdev_queue') Check whether spin '_xmit_lock_of_netdev_queue' is contended */
   90 int ldv_spin_is_contended__xmit_lock_of_netdev_queue(void)
   91 {
   92   int is_spin_contended;
   93 
   94   /* LDV_COMMENT_OTHER Construct nondetermined result */
   95   is_spin_contended = ldv_undef_int();
   96 
   97   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin '_xmit_lock_of_netdev_queue' is contended */
   98   if(is_spin_contended)
   99   {
  100     /* LDV_COMMENT_RETURN Spin '_xmit_lock_of_netdev_queue' is contended */
  101     return 0;
  102   }
  103   else
  104   {
  105     /* LDV_COMMENT_RETURN Spin '_xmit_lock_of_netdev_queue' isn't contended */
  106     return 1;
  107   }
  108 }
  109 
  110 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock__xmit_lock_of_netdev_queue') Lock spin '_xmit_lock_of_netdev_queue' if atomic decrement result is zero */
  111 int ldv_atomic_dec_and_lock__xmit_lock_of_netdev_queue(void)
  112 {
  113   int atomic_value_after_dec;
  114 
  115   /* LDV_COMMENT_ASSERT Spin '_xmit_lock_of_netdev_queue' must be unlocked (since we may lock it in this function) */
  116   ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 1);
  117 
  118   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
  119   atomic_value_after_dec = ldv_undef_int();
  120 
  121   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
  122   if (atomic_value_after_dec == 0)
  123   {
  124     /* LDV_COMMENT_CHANGE_STATE Lock spin '_xmit_lock_of_netdev_queue', as atomic has decremented to zero */
  125     ldv_spin__xmit_lock_of_netdev_queue = 2;
  126     /* LDV_COMMENT_RETURN Return 1 with locked spin '_xmit_lock_of_netdev_queue' */
  127     return 1;
  128   }
  129 
  130   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin '_xmit_lock_of_netdev_queue' */
  131   return 0;
  132 }
  133 static int ldv_spin_addr_list_lock_of_net_device;
  134 
  135 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_addr_list_lock_of_net_device') Check that spin 'addr_list_lock_of_net_device' was not locked and lock it */
  136 void ldv_spin_lock_addr_list_lock_of_net_device(void)
  137 {
  138   /* LDV_COMMENT_ASSERT Spin 'addr_list_lock_of_net_device' must be unlocked */
  139   ldv_assert(ldv_spin_addr_list_lock_of_net_device == 1);
  140   /* LDV_COMMENT_CHANGE_STATE Lock spin 'addr_list_lock_of_net_device' */
  141   ldv_spin_addr_list_lock_of_net_device = 2;
  142 }
  143 
  144 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_addr_list_lock_of_net_device') Check that spin 'addr_list_lock_of_net_device' was locked and unlock it */
  145 void ldv_spin_unlock_addr_list_lock_of_net_device(void)
  146 {
  147   /* LDV_COMMENT_ASSERT Spin 'addr_list_lock_of_net_device' must be locked */
  148   ldv_assert(ldv_spin_addr_list_lock_of_net_device == 2);
  149   /* LDV_COMMENT_CHANGE_STATE Unlock spin 'addr_list_lock_of_net_device' */
  150   ldv_spin_addr_list_lock_of_net_device = 1;
  151 }
  152 
  153 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_addr_list_lock_of_net_device') Check that spin 'addr_list_lock_of_net_device' was not locked and nondeterministically lock it. Return 0 on fails */
  154 int ldv_spin_trylock_addr_list_lock_of_net_device(void)
  155 {
  156   int is_spin_held_by_another_thread;
  157 
  158   /* LDV_COMMENT_ASSERT It may be an error if spin 'addr_list_lock_of_net_device' is locked at this point */
  159   ldv_assert(ldv_spin_addr_list_lock_of_net_device == 1);
  160 
  161   /* LDV_COMMENT_OTHER Construct nondetermined result */
  162   is_spin_held_by_another_thread = ldv_undef_int();
  163 
  164   /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'addr_list_lock_of_net_device' */
  165   if (is_spin_held_by_another_thread)
  166   {
  167     /* LDV_COMMENT_RETURN Spin 'addr_list_lock_of_net_device' was not locked. Finish with fail */
  168     return 0;
  169   }
  170   else
  171   {
  172     /* LDV_COMMENT_CHANGE_STATE Lock spin 'addr_list_lock_of_net_device' */
  173     ldv_spin_addr_list_lock_of_net_device = 2;
  174     /* LDV_COMMENT_RETURN Finish with success */
  175     return 1;
  176   }
  177 }
  178 
  179 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_addr_list_lock_of_net_device') The same process can not both lock spin 'addr_list_lock_of_net_device' and wait until it will be unlocked  */
  180 void ldv_spin_unlock_wait_addr_list_lock_of_net_device(void)
  181 {
  182   /* LDV_COMMENT_ASSERT Spin 'addr_list_lock_of_net_device' must not be locked by a current process */
  183   ldv_assert(ldv_spin_addr_list_lock_of_net_device == 1);
  184 }
  185 
  186 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_addr_list_lock_of_net_device') Check whether spin 'addr_list_lock_of_net_device' was locked */
  187 int ldv_spin_is_locked_addr_list_lock_of_net_device(void)
  188 {
  189   int is_spin_held_by_another_thread;
  190 
  191   /* LDV_COMMENT_OTHER Construct nondetermined result */
  192   is_spin_held_by_another_thread = ldv_undef_int();
  193 
  194   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'addr_list_lock_of_net_device' was locked */
  195   if(ldv_spin_addr_list_lock_of_net_device == 1 && !is_spin_held_by_another_thread)
  196   {
  197     /* LDV_COMMENT_RETURN Spin 'addr_list_lock_of_net_device' was unlocked */
  198     return 0;
  199   }
  200   else
  201   {
  202     /* LDV_COMMENT_RETURN Spin 'addr_list_lock_of_net_device' was locked */
  203     return 1;
  204   }
  205 }
  206 
  207 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_addr_list_lock_of_net_device') Check whether spin 'addr_list_lock_of_net_device' was locked */
  208 int ldv_spin_can_lock_addr_list_lock_of_net_device(void)
  209 {
  210   /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
  211   return !ldv_spin_is_locked_addr_list_lock_of_net_device();
  212 }
  213 
  214 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_addr_list_lock_of_net_device') Check whether spin 'addr_list_lock_of_net_device' is contended */
  215 int ldv_spin_is_contended_addr_list_lock_of_net_device(void)
  216 {
  217   int is_spin_contended;
  218 
  219   /* LDV_COMMENT_OTHER Construct nondetermined result */
  220   is_spin_contended = ldv_undef_int();
  221 
  222   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'addr_list_lock_of_net_device' is contended */
  223   if(is_spin_contended)
  224   {
  225     /* LDV_COMMENT_RETURN Spin 'addr_list_lock_of_net_device' is contended */
  226     return 0;
  227   }
  228   else
  229   {
  230     /* LDV_COMMENT_RETURN Spin 'addr_list_lock_of_net_device' isn't contended */
  231     return 1;
  232   }
  233 }
  234 
  235 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_addr_list_lock_of_net_device') Lock spin 'addr_list_lock_of_net_device' if atomic decrement result is zero */
  236 int ldv_atomic_dec_and_lock_addr_list_lock_of_net_device(void)
  237 {
  238   int atomic_value_after_dec;
  239 
  240   /* LDV_COMMENT_ASSERT Spin 'addr_list_lock_of_net_device' must be unlocked (since we may lock it in this function) */
  241   ldv_assert(ldv_spin_addr_list_lock_of_net_device == 1);
  242 
  243   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
  244   atomic_value_after_dec = ldv_undef_int();
  245 
  246   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
  247   if (atomic_value_after_dec == 0)
  248   {
  249     /* LDV_COMMENT_CHANGE_STATE Lock spin 'addr_list_lock_of_net_device', as atomic has decremented to zero */
  250     ldv_spin_addr_list_lock_of_net_device = 2;
  251     /* LDV_COMMENT_RETURN Return 1 with locked spin 'addr_list_lock_of_net_device' */
  252     return 1;
  253   }
  254 
  255   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'addr_list_lock_of_net_device' */
  256   return 0;
  257 }
  258 static int ldv_spin_alloc_lock_of_task_struct;
  259 
  260 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_alloc_lock_of_task_struct') Check that spin 'alloc_lock_of_task_struct' was not locked and lock it */
  261 void ldv_spin_lock_alloc_lock_of_task_struct(void)
  262 {
  263   /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must be unlocked */
  264   ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1);
  265   /* LDV_COMMENT_CHANGE_STATE Lock spin 'alloc_lock_of_task_struct' */
  266   ldv_spin_alloc_lock_of_task_struct = 2;
  267 }
  268 
  269 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_alloc_lock_of_task_struct') Check that spin 'alloc_lock_of_task_struct' was locked and unlock it */
  270 void ldv_spin_unlock_alloc_lock_of_task_struct(void)
  271 {
  272   /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must be locked */
  273   ldv_assert(ldv_spin_alloc_lock_of_task_struct == 2);
  274   /* LDV_COMMENT_CHANGE_STATE Unlock spin 'alloc_lock_of_task_struct' */
  275   ldv_spin_alloc_lock_of_task_struct = 1;
  276 }
  277 
  278 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_alloc_lock_of_task_struct') Check that spin 'alloc_lock_of_task_struct' was not locked and nondeterministically lock it. Return 0 on fails */
  279 int ldv_spin_trylock_alloc_lock_of_task_struct(void)
  280 {
  281   int is_spin_held_by_another_thread;
  282 
  283   /* LDV_COMMENT_ASSERT It may be an error if spin 'alloc_lock_of_task_struct' is locked at this point */
  284   ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1);
  285 
  286   /* LDV_COMMENT_OTHER Construct nondetermined result */
  287   is_spin_held_by_another_thread = ldv_undef_int();
  288 
  289   /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'alloc_lock_of_task_struct' */
  290   if (is_spin_held_by_another_thread)
  291   {
  292     /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' was not locked. Finish with fail */
  293     return 0;
  294   }
  295   else
  296   {
  297     /* LDV_COMMENT_CHANGE_STATE Lock spin 'alloc_lock_of_task_struct' */
  298     ldv_spin_alloc_lock_of_task_struct = 2;
  299     /* LDV_COMMENT_RETURN Finish with success */
  300     return 1;
  301   }
  302 }
  303 
  304 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_alloc_lock_of_task_struct') The same process can not both lock spin 'alloc_lock_of_task_struct' and wait until it will be unlocked  */
  305 void ldv_spin_unlock_wait_alloc_lock_of_task_struct(void)
  306 {
  307   /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must not be locked by a current process */
  308   ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1);
  309 }
  310 
  311 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_alloc_lock_of_task_struct') Check whether spin 'alloc_lock_of_task_struct' was locked */
  312 int ldv_spin_is_locked_alloc_lock_of_task_struct(void)
  313 {
  314   int is_spin_held_by_another_thread;
  315 
  316   /* LDV_COMMENT_OTHER Construct nondetermined result */
  317   is_spin_held_by_another_thread = ldv_undef_int();
  318 
  319   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'alloc_lock_of_task_struct' was locked */
  320   if(ldv_spin_alloc_lock_of_task_struct == 1 && !is_spin_held_by_another_thread)
  321   {
  322     /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' was unlocked */
  323     return 0;
  324   }
  325   else
  326   {
  327     /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' was locked */
  328     return 1;
  329   }
  330 }
  331 
  332 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_alloc_lock_of_task_struct') Check whether spin 'alloc_lock_of_task_struct' was locked */
  333 int ldv_spin_can_lock_alloc_lock_of_task_struct(void)
  334 {
  335   /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
  336   return !ldv_spin_is_locked_alloc_lock_of_task_struct();
  337 }
  338 
  339 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_alloc_lock_of_task_struct') Check whether spin 'alloc_lock_of_task_struct' is contended */
  340 int ldv_spin_is_contended_alloc_lock_of_task_struct(void)
  341 {
  342   int is_spin_contended;
  343 
  344   /* LDV_COMMENT_OTHER Construct nondetermined result */
  345   is_spin_contended = ldv_undef_int();
  346 
  347   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'alloc_lock_of_task_struct' is contended */
  348   if(is_spin_contended)
  349   {
  350     /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' is contended */
  351     return 0;
  352   }
  353   else
  354   {
  355     /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' isn't contended */
  356     return 1;
  357   }
  358 }
  359 
  360 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_alloc_lock_of_task_struct') Lock spin 'alloc_lock_of_task_struct' if atomic decrement result is zero */
  361 int ldv_atomic_dec_and_lock_alloc_lock_of_task_struct(void)
  362 {
  363   int atomic_value_after_dec;
  364 
  365   /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must be unlocked (since we may lock it in this function) */
  366   ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1);
  367 
  368   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
  369   atomic_value_after_dec = ldv_undef_int();
  370 
  371   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
  372   if (atomic_value_after_dec == 0)
  373   {
  374     /* LDV_COMMENT_CHANGE_STATE Lock spin 'alloc_lock_of_task_struct', as atomic has decremented to zero */
  375     ldv_spin_alloc_lock_of_task_struct = 2;
  376     /* LDV_COMMENT_RETURN Return 1 with locked spin 'alloc_lock_of_task_struct' */
  377     return 1;
  378   }
  379 
  380   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'alloc_lock_of_task_struct' */
  381   return 0;
  382 }
  383 static int ldv_spin_command_lock_of_typhoon;
  384 
  385 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_command_lock_of_typhoon') Check that spin 'command_lock_of_typhoon' was not locked and lock it */
  386 void ldv_spin_lock_command_lock_of_typhoon(void)
  387 {
  388   /* LDV_COMMENT_ASSERT Spin 'command_lock_of_typhoon' must be unlocked */
  389   ldv_assert(ldv_spin_command_lock_of_typhoon == 1);
  390   /* LDV_COMMENT_CHANGE_STATE Lock spin 'command_lock_of_typhoon' */
  391   ldv_spin_command_lock_of_typhoon = 2;
  392 }
  393 
  394 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_command_lock_of_typhoon') Check that spin 'command_lock_of_typhoon' was locked and unlock it */
  395 void ldv_spin_unlock_command_lock_of_typhoon(void)
  396 {
  397   /* LDV_COMMENT_ASSERT Spin 'command_lock_of_typhoon' must be locked */
  398   ldv_assert(ldv_spin_command_lock_of_typhoon == 2);
  399   /* LDV_COMMENT_CHANGE_STATE Unlock spin 'command_lock_of_typhoon' */
  400   ldv_spin_command_lock_of_typhoon = 1;
  401 }
  402 
  403 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_command_lock_of_typhoon') Check that spin 'command_lock_of_typhoon' was not locked and nondeterministically lock it. Return 0 on fails */
  404 int ldv_spin_trylock_command_lock_of_typhoon(void)
  405 {
  406   int is_spin_held_by_another_thread;
  407 
  408   /* LDV_COMMENT_ASSERT It may be an error if spin 'command_lock_of_typhoon' is locked at this point */
  409   ldv_assert(ldv_spin_command_lock_of_typhoon == 1);
  410 
  411   /* LDV_COMMENT_OTHER Construct nondetermined result */
  412   is_spin_held_by_another_thread = ldv_undef_int();
  413 
  414   /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'command_lock_of_typhoon' */
  415   if (is_spin_held_by_another_thread)
  416   {
  417     /* LDV_COMMENT_RETURN Spin 'command_lock_of_typhoon' was not locked. Finish with fail */
  418     return 0;
  419   }
  420   else
  421   {
  422     /* LDV_COMMENT_CHANGE_STATE Lock spin 'command_lock_of_typhoon' */
  423     ldv_spin_command_lock_of_typhoon = 2;
  424     /* LDV_COMMENT_RETURN Finish with success */
  425     return 1;
  426   }
  427 }
  428 
  429 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_command_lock_of_typhoon') The same process can not both lock spin 'command_lock_of_typhoon' and wait until it will be unlocked  */
  430 void ldv_spin_unlock_wait_command_lock_of_typhoon(void)
  431 {
  432   /* LDV_COMMENT_ASSERT Spin 'command_lock_of_typhoon' must not be locked by a current process */
  433   ldv_assert(ldv_spin_command_lock_of_typhoon == 1);
  434 }
  435 
  436 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_command_lock_of_typhoon') Check whether spin 'command_lock_of_typhoon' was locked */
  437 int ldv_spin_is_locked_command_lock_of_typhoon(void)
  438 {
  439   int is_spin_held_by_another_thread;
  440 
  441   /* LDV_COMMENT_OTHER Construct nondetermined result */
  442   is_spin_held_by_another_thread = ldv_undef_int();
  443 
  444   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'command_lock_of_typhoon' was locked */
  445   if(ldv_spin_command_lock_of_typhoon == 1 && !is_spin_held_by_another_thread)
  446   {
  447     /* LDV_COMMENT_RETURN Spin 'command_lock_of_typhoon' was unlocked */
  448     return 0;
  449   }
  450   else
  451   {
  452     /* LDV_COMMENT_RETURN Spin 'command_lock_of_typhoon' was locked */
  453     return 1;
  454   }
  455 }
  456 
  457 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_command_lock_of_typhoon') Check whether spin 'command_lock_of_typhoon' was locked */
  458 int ldv_spin_can_lock_command_lock_of_typhoon(void)
  459 {
  460   /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
  461   return !ldv_spin_is_locked_command_lock_of_typhoon();
  462 }
  463 
  464 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_command_lock_of_typhoon') Check whether spin 'command_lock_of_typhoon' is contended */
  465 int ldv_spin_is_contended_command_lock_of_typhoon(void)
  466 {
  467   int is_spin_contended;
  468 
  469   /* LDV_COMMENT_OTHER Construct nondetermined result */
  470   is_spin_contended = ldv_undef_int();
  471 
  472   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'command_lock_of_typhoon' is contended */
  473   if(is_spin_contended)
  474   {
  475     /* LDV_COMMENT_RETURN Spin 'command_lock_of_typhoon' is contended */
  476     return 0;
  477   }
  478   else
  479   {
  480     /* LDV_COMMENT_RETURN Spin 'command_lock_of_typhoon' isn't contended */
  481     return 1;
  482   }
  483 }
  484 
  485 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_command_lock_of_typhoon') Lock spin 'command_lock_of_typhoon' if atomic decrement result is zero */
  486 int ldv_atomic_dec_and_lock_command_lock_of_typhoon(void)
  487 {
  488   int atomic_value_after_dec;
  489 
  490   /* LDV_COMMENT_ASSERT Spin 'command_lock_of_typhoon' must be unlocked (since we may lock it in this function) */
  491   ldv_assert(ldv_spin_command_lock_of_typhoon == 1);
  492 
  493   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
  494   atomic_value_after_dec = ldv_undef_int();
  495 
  496   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
  497   if (atomic_value_after_dec == 0)
  498   {
  499     /* LDV_COMMENT_CHANGE_STATE Lock spin 'command_lock_of_typhoon', as atomic has decremented to zero */
  500     ldv_spin_command_lock_of_typhoon = 2;
  501     /* LDV_COMMENT_RETURN Return 1 with locked spin 'command_lock_of_typhoon' */
  502     return 1;
  503   }
  504 
  505   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'command_lock_of_typhoon' */
  506   return 0;
  507 }
  508 static int ldv_spin_i_lock_of_inode;
  509 
  510 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_i_lock_of_inode') Check that spin 'i_lock_of_inode' was not locked and lock it */
  511 void ldv_spin_lock_i_lock_of_inode(void)
  512 {
  513   /* LDV_COMMENT_ASSERT Spin 'i_lock_of_inode' must be unlocked */
  514   ldv_assert(ldv_spin_i_lock_of_inode == 1);
  515   /* LDV_COMMENT_CHANGE_STATE Lock spin 'i_lock_of_inode' */
  516   ldv_spin_i_lock_of_inode = 2;
  517 }
  518 
  519 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_i_lock_of_inode') Check that spin 'i_lock_of_inode' was locked and unlock it */
  520 void ldv_spin_unlock_i_lock_of_inode(void)
  521 {
  522   /* LDV_COMMENT_ASSERT Spin 'i_lock_of_inode' must be locked */
  523   ldv_assert(ldv_spin_i_lock_of_inode == 2);
  524   /* LDV_COMMENT_CHANGE_STATE Unlock spin 'i_lock_of_inode' */
  525   ldv_spin_i_lock_of_inode = 1;
  526 }
  527 
  528 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_i_lock_of_inode') Check that spin 'i_lock_of_inode' was not locked and nondeterministically lock it. Return 0 on fails */
  529 int ldv_spin_trylock_i_lock_of_inode(void)
  530 {
  531   int is_spin_held_by_another_thread;
  532 
  533   /* LDV_COMMENT_ASSERT It may be an error if spin 'i_lock_of_inode' is locked at this point */
  534   ldv_assert(ldv_spin_i_lock_of_inode == 1);
  535 
  536   /* LDV_COMMENT_OTHER Construct nondetermined result */
  537   is_spin_held_by_another_thread = ldv_undef_int();
  538 
  539   /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'i_lock_of_inode' */
  540   if (is_spin_held_by_another_thread)
  541   {
  542     /* LDV_COMMENT_RETURN Spin 'i_lock_of_inode' was not locked. Finish with fail */
  543     return 0;
  544   }
  545   else
  546   {
  547     /* LDV_COMMENT_CHANGE_STATE Lock spin 'i_lock_of_inode' */
  548     ldv_spin_i_lock_of_inode = 2;
  549     /* LDV_COMMENT_RETURN Finish with success */
  550     return 1;
  551   }
  552 }
  553 
  554 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_i_lock_of_inode') The same process can not both lock spin 'i_lock_of_inode' and wait until it will be unlocked  */
  555 void ldv_spin_unlock_wait_i_lock_of_inode(void)
  556 {
  557   /* LDV_COMMENT_ASSERT Spin 'i_lock_of_inode' must not be locked by a current process */
  558   ldv_assert(ldv_spin_i_lock_of_inode == 1);
  559 }
  560 
  561 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_i_lock_of_inode') Check whether spin 'i_lock_of_inode' was locked */
  562 int ldv_spin_is_locked_i_lock_of_inode(void)
  563 {
  564   int is_spin_held_by_another_thread;
  565 
  566   /* LDV_COMMENT_OTHER Construct nondetermined result */
  567   is_spin_held_by_another_thread = ldv_undef_int();
  568 
  569   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'i_lock_of_inode' was locked */
  570   if(ldv_spin_i_lock_of_inode == 1 && !is_spin_held_by_another_thread)
  571   {
  572     /* LDV_COMMENT_RETURN Spin 'i_lock_of_inode' was unlocked */
  573     return 0;
  574   }
  575   else
  576   {
  577     /* LDV_COMMENT_RETURN Spin 'i_lock_of_inode' was locked */
  578     return 1;
  579   }
  580 }
  581 
  582 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_i_lock_of_inode') Check whether spin 'i_lock_of_inode' was locked */
  583 int ldv_spin_can_lock_i_lock_of_inode(void)
  584 {
  585   /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
  586   return !ldv_spin_is_locked_i_lock_of_inode();
  587 }
  588 
  589 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_i_lock_of_inode') Check whether spin 'i_lock_of_inode' is contended */
  590 int ldv_spin_is_contended_i_lock_of_inode(void)
  591 {
  592   int is_spin_contended;
  593 
  594   /* LDV_COMMENT_OTHER Construct nondetermined result */
  595   is_spin_contended = ldv_undef_int();
  596 
  597   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'i_lock_of_inode' is contended */
  598   if(is_spin_contended)
  599   {
  600     /* LDV_COMMENT_RETURN Spin 'i_lock_of_inode' is contended */
  601     return 0;
  602   }
  603   else
  604   {
  605     /* LDV_COMMENT_RETURN Spin 'i_lock_of_inode' isn't contended */
  606     return 1;
  607   }
  608 }
  609 
  610 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_i_lock_of_inode') Lock spin 'i_lock_of_inode' if atomic decrement result is zero */
  611 int ldv_atomic_dec_and_lock_i_lock_of_inode(void)
  612 {
  613   int atomic_value_after_dec;
  614 
  615   /* LDV_COMMENT_ASSERT Spin 'i_lock_of_inode' must be unlocked (since we may lock it in this function) */
  616   ldv_assert(ldv_spin_i_lock_of_inode == 1);
  617 
  618   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
  619   atomic_value_after_dec = ldv_undef_int();
  620 
  621   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
  622   if (atomic_value_after_dec == 0)
  623   {
  624     /* LDV_COMMENT_CHANGE_STATE Lock spin 'i_lock_of_inode', as atomic has decremented to zero */
  625     ldv_spin_i_lock_of_inode = 2;
  626     /* LDV_COMMENT_RETURN Return 1 with locked spin 'i_lock_of_inode' */
  627     return 1;
  628   }
  629 
  630   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'i_lock_of_inode' */
  631   return 0;
  632 }
  633 static int ldv_spin_lock;
  634 
  635 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_lock') Check that spin 'lock' was not locked and lock it */
  636 void ldv_spin_lock_lock(void)
  637 {
  638   /* LDV_COMMENT_ASSERT Spin 'lock' must be unlocked */
  639   ldv_assert(ldv_spin_lock == 1);
  640   /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock' */
  641   ldv_spin_lock = 2;
  642 }
  643 
  644 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_lock') Check that spin 'lock' was locked and unlock it */
  645 void ldv_spin_unlock_lock(void)
  646 {
  647   /* LDV_COMMENT_ASSERT Spin 'lock' must be locked */
  648   ldv_assert(ldv_spin_lock == 2);
  649   /* LDV_COMMENT_CHANGE_STATE Unlock spin 'lock' */
  650   ldv_spin_lock = 1;
  651 }
  652 
  653 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_lock') Check that spin 'lock' was not locked and nondeterministically lock it. Return 0 on fails */
  654 int ldv_spin_trylock_lock(void)
  655 {
  656   int is_spin_held_by_another_thread;
  657 
  658   /* LDV_COMMENT_ASSERT It may be an error if spin 'lock' is locked at this point */
  659   ldv_assert(ldv_spin_lock == 1);
  660 
  661   /* LDV_COMMENT_OTHER Construct nondetermined result */
  662   is_spin_held_by_another_thread = ldv_undef_int();
  663 
  664   /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'lock' */
  665   if (is_spin_held_by_another_thread)
  666   {
  667     /* LDV_COMMENT_RETURN Spin 'lock' was not locked. Finish with fail */
  668     return 0;
  669   }
  670   else
  671   {
  672     /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock' */
  673     ldv_spin_lock = 2;
  674     /* LDV_COMMENT_RETURN Finish with success */
  675     return 1;
  676   }
  677 }
  678 
  679 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_lock') The same process can not both lock spin 'lock' and wait until it will be unlocked  */
  680 void ldv_spin_unlock_wait_lock(void)
  681 {
  682   /* LDV_COMMENT_ASSERT Spin 'lock' must not be locked by a current process */
  683   ldv_assert(ldv_spin_lock == 1);
  684 }
  685 
  686 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_lock') Check whether spin 'lock' was locked */
  687 int ldv_spin_is_locked_lock(void)
  688 {
  689   int is_spin_held_by_another_thread;
  690 
  691   /* LDV_COMMENT_OTHER Construct nondetermined result */
  692   is_spin_held_by_another_thread = ldv_undef_int();
  693 
  694   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock' was locked */
  695   if(ldv_spin_lock == 1 && !is_spin_held_by_another_thread)
  696   {
  697     /* LDV_COMMENT_RETURN Spin 'lock' was unlocked */
  698     return 0;
  699   }
  700   else
  701   {
  702     /* LDV_COMMENT_RETURN Spin 'lock' was locked */
  703     return 1;
  704   }
  705 }
  706 
  707 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_lock') Check whether spin 'lock' was locked */
  708 int ldv_spin_can_lock_lock(void)
  709 {
  710   /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
  711   return !ldv_spin_is_locked_lock();
  712 }
  713 
  714 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_lock') Check whether spin 'lock' is contended */
  715 int ldv_spin_is_contended_lock(void)
  716 {
  717   int is_spin_contended;
  718 
  719   /* LDV_COMMENT_OTHER Construct nondetermined result */
  720   is_spin_contended = ldv_undef_int();
  721 
  722   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock' is contended */
  723   if(is_spin_contended)
  724   {
  725     /* LDV_COMMENT_RETURN Spin 'lock' is contended */
  726     return 0;
  727   }
  728   else
  729   {
  730     /* LDV_COMMENT_RETURN Spin 'lock' isn't contended */
  731     return 1;
  732   }
  733 }
  734 
  735 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_lock') Lock spin 'lock' if atomic decrement result is zero */
  736 int ldv_atomic_dec_and_lock_lock(void)
  737 {
  738   int atomic_value_after_dec;
  739 
  740   /* LDV_COMMENT_ASSERT Spin 'lock' must be unlocked (since we may lock it in this function) */
  741   ldv_assert(ldv_spin_lock == 1);
  742 
  743   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
  744   atomic_value_after_dec = ldv_undef_int();
  745 
  746   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
  747   if (atomic_value_after_dec == 0)
  748   {
  749     /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock', as atomic has decremented to zero */
  750     ldv_spin_lock = 2;
  751     /* LDV_COMMENT_RETURN Return 1 with locked spin 'lock' */
  752     return 1;
  753   }
  754 
  755   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'lock' */
  756   return 0;
  757 }
  758 static int ldv_spin_lock_of_NOT_ARG_SIGN;
  759 
  760 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_lock_of_NOT_ARG_SIGN') Check that spin 'lock_of_NOT_ARG_SIGN' was not locked and lock it */
  761 void ldv_spin_lock_lock_of_NOT_ARG_SIGN(void)
  762 {
  763   /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must be unlocked */
  764   ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
  765   /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_NOT_ARG_SIGN' */
  766   ldv_spin_lock_of_NOT_ARG_SIGN = 2;
  767 }
  768 
  769 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_lock_of_NOT_ARG_SIGN') Check that spin 'lock_of_NOT_ARG_SIGN' was locked and unlock it */
  770 void ldv_spin_unlock_lock_of_NOT_ARG_SIGN(void)
  771 {
  772   /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must be locked */
  773   ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 2);
  774   /* LDV_COMMENT_CHANGE_STATE Unlock spin 'lock_of_NOT_ARG_SIGN' */
  775   ldv_spin_lock_of_NOT_ARG_SIGN = 1;
  776 }
  777 
  778 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_lock_of_NOT_ARG_SIGN') Check that spin 'lock_of_NOT_ARG_SIGN' was not locked and nondeterministically lock it. Return 0 on fails */
  779 int ldv_spin_trylock_lock_of_NOT_ARG_SIGN(void)
  780 {
  781   int is_spin_held_by_another_thread;
  782 
  783   /* LDV_COMMENT_ASSERT It may be an error if spin 'lock_of_NOT_ARG_SIGN' is locked at this point */
  784   ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
  785 
  786   /* LDV_COMMENT_OTHER Construct nondetermined result */
  787   is_spin_held_by_another_thread = ldv_undef_int();
  788 
  789   /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'lock_of_NOT_ARG_SIGN' */
  790   if (is_spin_held_by_another_thread)
  791   {
  792     /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' was not locked. Finish with fail */
  793     return 0;
  794   }
  795   else
  796   {
  797     /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_NOT_ARG_SIGN' */
  798     ldv_spin_lock_of_NOT_ARG_SIGN = 2;
  799     /* LDV_COMMENT_RETURN Finish with success */
  800     return 1;
  801   }
  802 }
  803 
  804 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN') The same process can not both lock spin 'lock_of_NOT_ARG_SIGN' and wait until it will be unlocked  */
  805 void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN(void)
  806 {
  807   /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must not be locked by a current process */
  808   ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
  809 }
  810 
  811 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_lock_of_NOT_ARG_SIGN') Check whether spin 'lock_of_NOT_ARG_SIGN' was locked */
  812 int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN(void)
  813 {
  814   int is_spin_held_by_another_thread;
  815 
  816   /* LDV_COMMENT_OTHER Construct nondetermined result */
  817   is_spin_held_by_another_thread = ldv_undef_int();
  818 
  819   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock_of_NOT_ARG_SIGN' was locked */
  820   if(ldv_spin_lock_of_NOT_ARG_SIGN == 1 && !is_spin_held_by_another_thread)
  821   {
  822     /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' was unlocked */
  823     return 0;
  824   }
  825   else
  826   {
  827     /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' was locked */
  828     return 1;
  829   }
  830 }
  831 
  832 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_lock_of_NOT_ARG_SIGN') Check whether spin 'lock_of_NOT_ARG_SIGN' was locked */
  833 int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN(void)
  834 {
  835   /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
  836   return !ldv_spin_is_locked_lock_of_NOT_ARG_SIGN();
  837 }
  838 
  839 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_lock_of_NOT_ARG_SIGN') Check whether spin 'lock_of_NOT_ARG_SIGN' is contended */
  840 int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN(void)
  841 {
  842   int is_spin_contended;
  843 
  844   /* LDV_COMMENT_OTHER Construct nondetermined result */
  845   is_spin_contended = ldv_undef_int();
  846 
  847   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock_of_NOT_ARG_SIGN' is contended */
  848   if(is_spin_contended)
  849   {
  850     /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' is contended */
  851     return 0;
  852   }
  853   else
  854   {
  855     /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' isn't contended */
  856     return 1;
  857   }
  858 }
  859 
  860 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN') Lock spin 'lock_of_NOT_ARG_SIGN' if atomic decrement result is zero */
  861 int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN(void)
  862 {
  863   int atomic_value_after_dec;
  864 
  865   /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must be unlocked (since we may lock it in this function) */
  866   ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
  867 
  868   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
  869   atomic_value_after_dec = ldv_undef_int();
  870 
  871   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
  872   if (atomic_value_after_dec == 0)
  873   {
  874     /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_NOT_ARG_SIGN', as atomic has decremented to zero */
  875     ldv_spin_lock_of_NOT_ARG_SIGN = 2;
  876     /* LDV_COMMENT_RETURN Return 1 with locked spin 'lock_of_NOT_ARG_SIGN' */
  877     return 1;
  878   }
  879 
  880   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'lock_of_NOT_ARG_SIGN' */
  881   return 0;
  882 }
  883 static int ldv_spin_lru_lock_of_netns_frags;
  884 
  885 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_lru_lock_of_netns_frags') Check that spin 'lru_lock_of_netns_frags' was not locked and lock it */
  886 void ldv_spin_lock_lru_lock_of_netns_frags(void)
  887 {
  888   /* LDV_COMMENT_ASSERT Spin 'lru_lock_of_netns_frags' must be unlocked */
  889   ldv_assert(ldv_spin_lru_lock_of_netns_frags == 1);
  890   /* LDV_COMMENT_CHANGE_STATE Lock spin 'lru_lock_of_netns_frags' */
  891   ldv_spin_lru_lock_of_netns_frags = 2;
  892 }
  893 
  894 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_lru_lock_of_netns_frags') Check that spin 'lru_lock_of_netns_frags' was locked and unlock it */
  895 void ldv_spin_unlock_lru_lock_of_netns_frags(void)
  896 {
  897   /* LDV_COMMENT_ASSERT Spin 'lru_lock_of_netns_frags' must be locked */
  898   ldv_assert(ldv_spin_lru_lock_of_netns_frags == 2);
  899   /* LDV_COMMENT_CHANGE_STATE Unlock spin 'lru_lock_of_netns_frags' */
  900   ldv_spin_lru_lock_of_netns_frags = 1;
  901 }
  902 
  903 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_lru_lock_of_netns_frags') Check that spin 'lru_lock_of_netns_frags' was not locked and nondeterministically lock it. Return 0 on fails */
  904 int ldv_spin_trylock_lru_lock_of_netns_frags(void)
  905 {
  906   int is_spin_held_by_another_thread;
  907 
  908   /* LDV_COMMENT_ASSERT It may be an error if spin 'lru_lock_of_netns_frags' is locked at this point */
  909   ldv_assert(ldv_spin_lru_lock_of_netns_frags == 1);
  910 
  911   /* LDV_COMMENT_OTHER Construct nondetermined result */
  912   is_spin_held_by_another_thread = ldv_undef_int();
  913 
  914   /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'lru_lock_of_netns_frags' */
  915   if (is_spin_held_by_another_thread)
  916   {
  917     /* LDV_COMMENT_RETURN Spin 'lru_lock_of_netns_frags' was not locked. Finish with fail */
  918     return 0;
  919   }
  920   else
  921   {
  922     /* LDV_COMMENT_CHANGE_STATE Lock spin 'lru_lock_of_netns_frags' */
  923     ldv_spin_lru_lock_of_netns_frags = 2;
  924     /* LDV_COMMENT_RETURN Finish with success */
  925     return 1;
  926   }
  927 }
  928 
  929 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_lru_lock_of_netns_frags') The same process can not both lock spin 'lru_lock_of_netns_frags' and wait until it will be unlocked  */
  930 void ldv_spin_unlock_wait_lru_lock_of_netns_frags(void)
  931 {
  932   /* LDV_COMMENT_ASSERT Spin 'lru_lock_of_netns_frags' must not be locked by a current process */
  933   ldv_assert(ldv_spin_lru_lock_of_netns_frags == 1);
  934 }
  935 
  936 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_lru_lock_of_netns_frags') Check whether spin 'lru_lock_of_netns_frags' was locked */
  937 int ldv_spin_is_locked_lru_lock_of_netns_frags(void)
  938 {
  939   int is_spin_held_by_another_thread;
  940 
  941   /* LDV_COMMENT_OTHER Construct nondetermined result */
  942   is_spin_held_by_another_thread = ldv_undef_int();
  943 
  944   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lru_lock_of_netns_frags' was locked */
  945   if(ldv_spin_lru_lock_of_netns_frags == 1 && !is_spin_held_by_another_thread)
  946   {
  947     /* LDV_COMMENT_RETURN Spin 'lru_lock_of_netns_frags' was unlocked */
  948     return 0;
  949   }
  950   else
  951   {
  952     /* LDV_COMMENT_RETURN Spin 'lru_lock_of_netns_frags' was locked */
  953     return 1;
  954   }
  955 }
  956 
  957 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_lru_lock_of_netns_frags') Check whether spin 'lru_lock_of_netns_frags' was locked */
  958 int ldv_spin_can_lock_lru_lock_of_netns_frags(void)
  959 {
  960   /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
  961   return !ldv_spin_is_locked_lru_lock_of_netns_frags();
  962 }
  963 
  964 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_lru_lock_of_netns_frags') Check whether spin 'lru_lock_of_netns_frags' is contended */
  965 int ldv_spin_is_contended_lru_lock_of_netns_frags(void)
  966 {
  967   int is_spin_contended;
  968 
  969   /* LDV_COMMENT_OTHER Construct nondetermined result */
  970   is_spin_contended = ldv_undef_int();
  971 
  972   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lru_lock_of_netns_frags' is contended */
  973   if(is_spin_contended)
  974   {
  975     /* LDV_COMMENT_RETURN Spin 'lru_lock_of_netns_frags' is contended */
  976     return 0;
  977   }
  978   else
  979   {
  980     /* LDV_COMMENT_RETURN Spin 'lru_lock_of_netns_frags' isn't contended */
  981     return 1;
  982   }
  983 }
  984 
  985 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_lru_lock_of_netns_frags') Lock spin 'lru_lock_of_netns_frags' if atomic decrement result is zero */
  986 int ldv_atomic_dec_and_lock_lru_lock_of_netns_frags(void)
  987 {
  988   int atomic_value_after_dec;
  989 
  990   /* LDV_COMMENT_ASSERT Spin 'lru_lock_of_netns_frags' must be unlocked (since we may lock it in this function) */
  991   ldv_assert(ldv_spin_lru_lock_of_netns_frags == 1);
  992 
  993   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
  994   atomic_value_after_dec = ldv_undef_int();
  995 
  996   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
  997   if (atomic_value_after_dec == 0)
  998   {
  999     /* LDV_COMMENT_CHANGE_STATE Lock spin 'lru_lock_of_netns_frags', as atomic has decremented to zero */
 1000     ldv_spin_lru_lock_of_netns_frags = 2;
 1001     /* LDV_COMMENT_RETURN Return 1 with locked spin 'lru_lock_of_netns_frags' */
 1002     return 1;
 1003   }
 1004 
 1005   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'lru_lock_of_netns_frags' */
 1006   return 0;
 1007 }
 1008 static int ldv_spin_node_size_lock_of_pglist_data;
 1009 
 1010 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_node_size_lock_of_pglist_data') Check that spin 'node_size_lock_of_pglist_data' was not locked and lock it */
 1011 void ldv_spin_lock_node_size_lock_of_pglist_data(void)
 1012 {
 1013   /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must be unlocked */
 1014   ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
 1015   /* LDV_COMMENT_CHANGE_STATE Lock spin 'node_size_lock_of_pglist_data' */
 1016   ldv_spin_node_size_lock_of_pglist_data = 2;
 1017 }
 1018 
 1019 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_node_size_lock_of_pglist_data') Check that spin 'node_size_lock_of_pglist_data' was locked and unlock it */
 1020 void ldv_spin_unlock_node_size_lock_of_pglist_data(void)
 1021 {
 1022   /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must be locked */
 1023   ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 2);
 1024   /* LDV_COMMENT_CHANGE_STATE Unlock spin 'node_size_lock_of_pglist_data' */
 1025   ldv_spin_node_size_lock_of_pglist_data = 1;
 1026 }
 1027 
 1028 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_node_size_lock_of_pglist_data') Check that spin 'node_size_lock_of_pglist_data' was not locked and nondeterministically lock it. Return 0 on fails */
 1029 int ldv_spin_trylock_node_size_lock_of_pglist_data(void)
 1030 {
 1031   int is_spin_held_by_another_thread;
 1032 
 1033   /* LDV_COMMENT_ASSERT It may be an error if spin 'node_size_lock_of_pglist_data' is locked at this point */
 1034   ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
 1035 
 1036   /* LDV_COMMENT_OTHER Construct nondetermined result */
 1037   is_spin_held_by_another_thread = ldv_undef_int();
 1038 
 1039   /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'node_size_lock_of_pglist_data' */
 1040   if (is_spin_held_by_another_thread)
 1041   {
 1042     /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' was not locked. Finish with fail */
 1043     return 0;
 1044   }
 1045   else
 1046   {
 1047     /* LDV_COMMENT_CHANGE_STATE Lock spin 'node_size_lock_of_pglist_data' */
 1048     ldv_spin_node_size_lock_of_pglist_data = 2;
 1049     /* LDV_COMMENT_RETURN Finish with success */
 1050     return 1;
 1051   }
 1052 }
 1053 
 1054 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_node_size_lock_of_pglist_data') The same process can not both lock spin 'node_size_lock_of_pglist_data' and wait until it will be unlocked  */
 1055 void ldv_spin_unlock_wait_node_size_lock_of_pglist_data(void)
 1056 {
 1057   /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must not be locked by a current process */
 1058   ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
 1059 }
 1060 
 1061 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_node_size_lock_of_pglist_data') Check whether spin 'node_size_lock_of_pglist_data' was locked */
 1062 int ldv_spin_is_locked_node_size_lock_of_pglist_data(void)
 1063 {
 1064   int is_spin_held_by_another_thread;
 1065 
 1066   /* LDV_COMMENT_OTHER Construct nondetermined result */
 1067   is_spin_held_by_another_thread = ldv_undef_int();
 1068 
 1069   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'node_size_lock_of_pglist_data' was locked */
 1070   if(ldv_spin_node_size_lock_of_pglist_data == 1 && !is_spin_held_by_another_thread)
 1071   {
 1072     /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' was unlocked */
 1073     return 0;
 1074   }
 1075   else
 1076   {
 1077     /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' was locked */
 1078     return 1;
 1079   }
 1080 }
 1081 
 1082 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_node_size_lock_of_pglist_data') Check whether spin 'node_size_lock_of_pglist_data' was locked */
 1083 int ldv_spin_can_lock_node_size_lock_of_pglist_data(void)
 1084 {
 1085   /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
 1086   return !ldv_spin_is_locked_node_size_lock_of_pglist_data();
 1087 }
 1088 
 1089 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_node_size_lock_of_pglist_data') Check whether spin 'node_size_lock_of_pglist_data' is contended */
 1090 int ldv_spin_is_contended_node_size_lock_of_pglist_data(void)
 1091 {
 1092   int is_spin_contended;
 1093 
 1094   /* LDV_COMMENT_OTHER Construct nondetermined result */
 1095   is_spin_contended = ldv_undef_int();
 1096 
 1097   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'node_size_lock_of_pglist_data' is contended */
 1098   if(is_spin_contended)
 1099   {
 1100     /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' is contended */
 1101     return 0;
 1102   }
 1103   else
 1104   {
 1105     /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' isn't contended */
 1106     return 1;
 1107   }
 1108 }
 1109 
 1110 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data') Lock spin 'node_size_lock_of_pglist_data' if atomic decrement result is zero */
 1111 int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data(void)
 1112 {
 1113   int atomic_value_after_dec;
 1114 
 1115   /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must be unlocked (since we may lock it in this function) */
 1116   ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
 1117 
 1118   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
 1119   atomic_value_after_dec = ldv_undef_int();
 1120 
 1121   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
 1122   if (atomic_value_after_dec == 0)
 1123   {
 1124     /* LDV_COMMENT_CHANGE_STATE Lock spin 'node_size_lock_of_pglist_data', as atomic has decremented to zero */
 1125     ldv_spin_node_size_lock_of_pglist_data = 2;
 1126     /* LDV_COMMENT_RETURN Return 1 with locked spin 'node_size_lock_of_pglist_data' */
 1127     return 1;
 1128   }
 1129 
 1130   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'node_size_lock_of_pglist_data' */
 1131   return 0;
 1132 }
 1133 static int ldv_spin_ptl;
 1134 
 1135 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_ptl') Check that spin 'ptl' was not locked and lock it */
 1136 void ldv_spin_lock_ptl(void)
 1137 {
 1138   /* LDV_COMMENT_ASSERT Spin 'ptl' must be unlocked */
 1139   ldv_assert(ldv_spin_ptl == 1);
 1140   /* LDV_COMMENT_CHANGE_STATE Lock spin 'ptl' */
 1141   ldv_spin_ptl = 2;
 1142 }
 1143 
 1144 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_ptl') Check that spin 'ptl' was locked and unlock it */
 1145 void ldv_spin_unlock_ptl(void)
 1146 {
 1147   /* LDV_COMMENT_ASSERT Spin 'ptl' must be locked */
 1148   ldv_assert(ldv_spin_ptl == 2);
 1149   /* LDV_COMMENT_CHANGE_STATE Unlock spin 'ptl' */
 1150   ldv_spin_ptl = 1;
 1151 }
 1152 
 1153 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_ptl') Check that spin 'ptl' was not locked and nondeterministically lock it. Return 0 on fails */
 1154 int ldv_spin_trylock_ptl(void)
 1155 {
 1156   int is_spin_held_by_another_thread;
 1157 
 1158   /* LDV_COMMENT_ASSERT It may be an error if spin 'ptl' is locked at this point */
 1159   ldv_assert(ldv_spin_ptl == 1);
 1160 
 1161   /* LDV_COMMENT_OTHER Construct nondetermined result */
 1162   is_spin_held_by_another_thread = ldv_undef_int();
 1163 
 1164   /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'ptl' */
 1165   if (is_spin_held_by_another_thread)
 1166   {
 1167     /* LDV_COMMENT_RETURN Spin 'ptl' was not locked. Finish with fail */
 1168     return 0;
 1169   }
 1170   else
 1171   {
 1172     /* LDV_COMMENT_CHANGE_STATE Lock spin 'ptl' */
 1173     ldv_spin_ptl = 2;
 1174     /* LDV_COMMENT_RETURN Finish with success */
 1175     return 1;
 1176   }
 1177 }
 1178 
 1179 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_ptl') The same process can not both lock spin 'ptl' and wait until it will be unlocked  */
 1180 void ldv_spin_unlock_wait_ptl(void)
 1181 {
 1182   /* LDV_COMMENT_ASSERT Spin 'ptl' must not be locked by a current process */
 1183   ldv_assert(ldv_spin_ptl == 1);
 1184 }
 1185 
 1186 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_ptl') Check whether spin 'ptl' was locked */
 1187 int ldv_spin_is_locked_ptl(void)
 1188 {
 1189   int is_spin_held_by_another_thread;
 1190 
 1191   /* LDV_COMMENT_OTHER Construct nondetermined result */
 1192   is_spin_held_by_another_thread = ldv_undef_int();
 1193 
 1194   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'ptl' was locked */
 1195   if(ldv_spin_ptl == 1 && !is_spin_held_by_another_thread)
 1196   {
 1197     /* LDV_COMMENT_RETURN Spin 'ptl' was unlocked */
 1198     return 0;
 1199   }
 1200   else
 1201   {
 1202     /* LDV_COMMENT_RETURN Spin 'ptl' was locked */
 1203     return 1;
 1204   }
 1205 }
 1206 
 1207 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_ptl') Check whether spin 'ptl' was locked */
 1208 int ldv_spin_can_lock_ptl(void)
 1209 {
 1210   /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
 1211   return !ldv_spin_is_locked_ptl();
 1212 }
 1213 
 1214 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_ptl') Check whether spin 'ptl' is contended */
 1215 int ldv_spin_is_contended_ptl(void)
 1216 {
 1217   int is_spin_contended;
 1218 
 1219   /* LDV_COMMENT_OTHER Construct nondetermined result */
 1220   is_spin_contended = ldv_undef_int();
 1221 
 1222   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'ptl' is contended */
 1223   if(is_spin_contended)
 1224   {
 1225     /* LDV_COMMENT_RETURN Spin 'ptl' is contended */
 1226     return 0;
 1227   }
 1228   else
 1229   {
 1230     /* LDV_COMMENT_RETURN Spin 'ptl' isn't contended */
 1231     return 1;
 1232   }
 1233 }
 1234 
 1235 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_ptl') Lock spin 'ptl' if atomic decrement result is zero */
 1236 int ldv_atomic_dec_and_lock_ptl(void)
 1237 {
 1238   int atomic_value_after_dec;
 1239 
 1240   /* LDV_COMMENT_ASSERT Spin 'ptl' must be unlocked (since we may lock it in this function) */
 1241   ldv_assert(ldv_spin_ptl == 1);
 1242 
 1243   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
 1244   atomic_value_after_dec = ldv_undef_int();
 1245 
 1246   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
 1247   if (atomic_value_after_dec == 0)
 1248   {
 1249     /* LDV_COMMENT_CHANGE_STATE Lock spin 'ptl', as atomic has decremented to zero */
 1250     ldv_spin_ptl = 2;
 1251     /* LDV_COMMENT_RETURN Return 1 with locked spin 'ptl' */
 1252     return 1;
 1253   }
 1254 
 1255   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'ptl' */
 1256   return 0;
 1257 }
 1258 static int ldv_spin_siglock_of_sighand_struct;
 1259 
 1260 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_siglock_of_sighand_struct') Check that spin 'siglock_of_sighand_struct' was not locked and lock it */
 1261 void ldv_spin_lock_siglock_of_sighand_struct(void)
 1262 {
 1263   /* LDV_COMMENT_ASSERT Spin 'siglock_of_sighand_struct' must be unlocked */
 1264   ldv_assert(ldv_spin_siglock_of_sighand_struct == 1);
 1265   /* LDV_COMMENT_CHANGE_STATE Lock spin 'siglock_of_sighand_struct' */
 1266   ldv_spin_siglock_of_sighand_struct = 2;
 1267 }
 1268 
 1269 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_siglock_of_sighand_struct') Check that spin 'siglock_of_sighand_struct' was locked and unlock it */
 1270 void ldv_spin_unlock_siglock_of_sighand_struct(void)
 1271 {
 1272   /* LDV_COMMENT_ASSERT Spin 'siglock_of_sighand_struct' must be locked */
 1273   ldv_assert(ldv_spin_siglock_of_sighand_struct == 2);
 1274   /* LDV_COMMENT_CHANGE_STATE Unlock spin 'siglock_of_sighand_struct' */
 1275   ldv_spin_siglock_of_sighand_struct = 1;
 1276 }
 1277 
 1278 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_siglock_of_sighand_struct') Check that spin 'siglock_of_sighand_struct' was not locked and nondeterministically lock it. Return 0 on fails */
 1279 int ldv_spin_trylock_siglock_of_sighand_struct(void)
 1280 {
 1281   int is_spin_held_by_another_thread;
 1282 
 1283   /* LDV_COMMENT_ASSERT It may be an error if spin 'siglock_of_sighand_struct' is locked at this point */
 1284   ldv_assert(ldv_spin_siglock_of_sighand_struct == 1);
 1285 
 1286   /* LDV_COMMENT_OTHER Construct nondetermined result */
 1287   is_spin_held_by_another_thread = ldv_undef_int();
 1288 
 1289   /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'siglock_of_sighand_struct' */
 1290   if (is_spin_held_by_another_thread)
 1291   {
 1292     /* LDV_COMMENT_RETURN Spin 'siglock_of_sighand_struct' was not locked. Finish with fail */
 1293     return 0;
 1294   }
 1295   else
 1296   {
 1297     /* LDV_COMMENT_CHANGE_STATE Lock spin 'siglock_of_sighand_struct' */
 1298     ldv_spin_siglock_of_sighand_struct = 2;
 1299     /* LDV_COMMENT_RETURN Finish with success */
 1300     return 1;
 1301   }
 1302 }
 1303 
 1304 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_siglock_of_sighand_struct') The same process can not both lock spin 'siglock_of_sighand_struct' and wait until it will be unlocked  */
 1305 void ldv_spin_unlock_wait_siglock_of_sighand_struct(void)
 1306 {
 1307   /* LDV_COMMENT_ASSERT Spin 'siglock_of_sighand_struct' must not be locked by a current process */
 1308   ldv_assert(ldv_spin_siglock_of_sighand_struct == 1);
 1309 }
 1310 
 1311 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_siglock_of_sighand_struct') Check whether spin 'siglock_of_sighand_struct' was locked */
 1312 int ldv_spin_is_locked_siglock_of_sighand_struct(void)
 1313 {
 1314   int is_spin_held_by_another_thread;
 1315 
 1316   /* LDV_COMMENT_OTHER Construct nondetermined result */
 1317   is_spin_held_by_another_thread = ldv_undef_int();
 1318 
 1319   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'siglock_of_sighand_struct' was locked */
 1320   if(ldv_spin_siglock_of_sighand_struct == 1 && !is_spin_held_by_another_thread)
 1321   {
 1322     /* LDV_COMMENT_RETURN Spin 'siglock_of_sighand_struct' was unlocked */
 1323     return 0;
 1324   }
 1325   else
 1326   {
 1327     /* LDV_COMMENT_RETURN Spin 'siglock_of_sighand_struct' was locked */
 1328     return 1;
 1329   }
 1330 }
 1331 
 1332 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_siglock_of_sighand_struct') Check whether spin 'siglock_of_sighand_struct' was locked */
 1333 int ldv_spin_can_lock_siglock_of_sighand_struct(void)
 1334 {
 1335   /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
 1336   return !ldv_spin_is_locked_siglock_of_sighand_struct();
 1337 }
 1338 
 1339 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_siglock_of_sighand_struct') Check whether spin 'siglock_of_sighand_struct' is contended */
 1340 int ldv_spin_is_contended_siglock_of_sighand_struct(void)
 1341 {
 1342   int is_spin_contended;
 1343 
 1344   /* LDV_COMMENT_OTHER Construct nondetermined result */
 1345   is_spin_contended = ldv_undef_int();
 1346 
 1347   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'siglock_of_sighand_struct' is contended */
 1348   if(is_spin_contended)
 1349   {
 1350     /* LDV_COMMENT_RETURN Spin 'siglock_of_sighand_struct' is contended */
 1351     return 0;
 1352   }
 1353   else
 1354   {
 1355     /* LDV_COMMENT_RETURN Spin 'siglock_of_sighand_struct' isn't contended */
 1356     return 1;
 1357   }
 1358 }
 1359 
 1360 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_siglock_of_sighand_struct') Lock spin 'siglock_of_sighand_struct' if atomic decrement result is zero */
 1361 int ldv_atomic_dec_and_lock_siglock_of_sighand_struct(void)
 1362 {
 1363   int atomic_value_after_dec;
 1364 
 1365   /* LDV_COMMENT_ASSERT Spin 'siglock_of_sighand_struct' must be unlocked (since we may lock it in this function) */
 1366   ldv_assert(ldv_spin_siglock_of_sighand_struct == 1);
 1367 
 1368   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
 1369   atomic_value_after_dec = ldv_undef_int();
 1370 
 1371   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
 1372   if (atomic_value_after_dec == 0)
 1373   {
 1374     /* LDV_COMMENT_CHANGE_STATE Lock spin 'siglock_of_sighand_struct', as atomic has decremented to zero */
 1375     ldv_spin_siglock_of_sighand_struct = 2;
 1376     /* LDV_COMMENT_RETURN Return 1 with locked spin 'siglock_of_sighand_struct' */
 1377     return 1;
 1378   }
 1379 
 1380   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'siglock_of_sighand_struct' */
 1381   return 0;
 1382 }
 1383 static int ldv_spin_tx_global_lock_of_net_device;
 1384 
 1385 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_tx_global_lock_of_net_device') Check that spin 'tx_global_lock_of_net_device' was not locked and lock it */
 1386 void ldv_spin_lock_tx_global_lock_of_net_device(void)
 1387 {
 1388   /* LDV_COMMENT_ASSERT Spin 'tx_global_lock_of_net_device' must be unlocked */
 1389   ldv_assert(ldv_spin_tx_global_lock_of_net_device == 1);
 1390   /* LDV_COMMENT_CHANGE_STATE Lock spin 'tx_global_lock_of_net_device' */
 1391   ldv_spin_tx_global_lock_of_net_device = 2;
 1392 }
 1393 
 1394 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_tx_global_lock_of_net_device') Check that spin 'tx_global_lock_of_net_device' was locked and unlock it */
 1395 void ldv_spin_unlock_tx_global_lock_of_net_device(void)
 1396 {
 1397   /* LDV_COMMENT_ASSERT Spin 'tx_global_lock_of_net_device' must be locked */
 1398   ldv_assert(ldv_spin_tx_global_lock_of_net_device == 2);
 1399   /* LDV_COMMENT_CHANGE_STATE Unlock spin 'tx_global_lock_of_net_device' */
 1400   ldv_spin_tx_global_lock_of_net_device = 1;
 1401 }
 1402 
 1403 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_tx_global_lock_of_net_device') Check that spin 'tx_global_lock_of_net_device' was not locked and nondeterministically lock it. Return 0 on fails */
 1404 int ldv_spin_trylock_tx_global_lock_of_net_device(void)
 1405 {
 1406   int is_spin_held_by_another_thread;
 1407 
 1408   /* LDV_COMMENT_ASSERT It may be an error if spin 'tx_global_lock_of_net_device' is locked at this point */
 1409   ldv_assert(ldv_spin_tx_global_lock_of_net_device == 1);
 1410 
 1411   /* LDV_COMMENT_OTHER Construct nondetermined result */
 1412   is_spin_held_by_another_thread = ldv_undef_int();
 1413 
 1414   /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'tx_global_lock_of_net_device' */
 1415   if (is_spin_held_by_another_thread)
 1416   {
 1417     /* LDV_COMMENT_RETURN Spin 'tx_global_lock_of_net_device' was not locked. Finish with fail */
 1418     return 0;
 1419   }
 1420   else
 1421   {
 1422     /* LDV_COMMENT_CHANGE_STATE Lock spin 'tx_global_lock_of_net_device' */
 1423     ldv_spin_tx_global_lock_of_net_device = 2;
 1424     /* LDV_COMMENT_RETURN Finish with success */
 1425     return 1;
 1426   }
 1427 }
 1428 
 1429 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_tx_global_lock_of_net_device') The same process can not both lock spin 'tx_global_lock_of_net_device' and wait until it will be unlocked  */
 1430 void ldv_spin_unlock_wait_tx_global_lock_of_net_device(void)
 1431 {
 1432   /* LDV_COMMENT_ASSERT Spin 'tx_global_lock_of_net_device' must not be locked by a current process */
 1433   ldv_assert(ldv_spin_tx_global_lock_of_net_device == 1);
 1434 }
 1435 
 1436 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_tx_global_lock_of_net_device') Check whether spin 'tx_global_lock_of_net_device' was locked */
 1437 int ldv_spin_is_locked_tx_global_lock_of_net_device(void)
 1438 {
 1439   int is_spin_held_by_another_thread;
 1440 
 1441   /* LDV_COMMENT_OTHER Construct nondetermined result */
 1442   is_spin_held_by_another_thread = ldv_undef_int();
 1443 
 1444   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'tx_global_lock_of_net_device' was locked */
 1445   if(ldv_spin_tx_global_lock_of_net_device == 1 && !is_spin_held_by_another_thread)
 1446   {
 1447     /* LDV_COMMENT_RETURN Spin 'tx_global_lock_of_net_device' was unlocked */
 1448     return 0;
 1449   }
 1450   else
 1451   {
 1452     /* LDV_COMMENT_RETURN Spin 'tx_global_lock_of_net_device' was locked */
 1453     return 1;
 1454   }
 1455 }
 1456 
 1457 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_tx_global_lock_of_net_device') Check whether spin 'tx_global_lock_of_net_device' was locked */
 1458 int ldv_spin_can_lock_tx_global_lock_of_net_device(void)
 1459 {
 1460   /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
 1461   return !ldv_spin_is_locked_tx_global_lock_of_net_device();
 1462 }
 1463 
 1464 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_tx_global_lock_of_net_device') Check whether spin 'tx_global_lock_of_net_device' is contended */
 1465 int ldv_spin_is_contended_tx_global_lock_of_net_device(void)
 1466 {
 1467   int is_spin_contended;
 1468 
 1469   /* LDV_COMMENT_OTHER Construct nondetermined result */
 1470   is_spin_contended = ldv_undef_int();
 1471 
 1472   /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'tx_global_lock_of_net_device' is contended */
 1473   if(is_spin_contended)
 1474   {
 1475     /* LDV_COMMENT_RETURN Spin 'tx_global_lock_of_net_device' is contended */
 1476     return 0;
 1477   }
 1478   else
 1479   {
 1480     /* LDV_COMMENT_RETURN Spin 'tx_global_lock_of_net_device' isn't contended */
 1481     return 1;
 1482   }
 1483 }
 1484 
 1485 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_tx_global_lock_of_net_device') Lock spin 'tx_global_lock_of_net_device' if atomic decrement result is zero */
 1486 int ldv_atomic_dec_and_lock_tx_global_lock_of_net_device(void)
 1487 {
 1488   int atomic_value_after_dec;
 1489 
 1490   /* LDV_COMMENT_ASSERT Spin 'tx_global_lock_of_net_device' must be unlocked (since we may lock it in this function) */
 1491   ldv_assert(ldv_spin_tx_global_lock_of_net_device == 1);
 1492 
 1493   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
 1494   atomic_value_after_dec = ldv_undef_int();
 1495 
 1496   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
 1497   if (atomic_value_after_dec == 0)
 1498   {
 1499     /* LDV_COMMENT_CHANGE_STATE Lock spin 'tx_global_lock_of_net_device', as atomic has decremented to zero */
 1500     ldv_spin_tx_global_lock_of_net_device = 2;
 1501     /* LDV_COMMENT_RETURN Return 1 with locked spin 'tx_global_lock_of_net_device' */
 1502     return 1;
 1503   }
 1504 
 1505   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'tx_global_lock_of_net_device' */
 1506   return 0;
 1507 }
 1508 
 1509 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_initialize') Make all spins unlocked at the beginning */
 1510 void ldv_initialize(void)
 1511 {
 1512   /* LDV_COMMENT_CHANGE_STATE Make spin '_xmit_lock_of_netdev_queue' unlocked at the beginning */
 1513   ldv_spin__xmit_lock_of_netdev_queue = 1;
 1514   /* LDV_COMMENT_CHANGE_STATE Make spin 'addr_list_lock_of_net_device' unlocked at the beginning */
 1515   ldv_spin_addr_list_lock_of_net_device = 1;
 1516   /* LDV_COMMENT_CHANGE_STATE Make spin 'alloc_lock_of_task_struct' unlocked at the beginning */
 1517   ldv_spin_alloc_lock_of_task_struct = 1;
 1518   /* LDV_COMMENT_CHANGE_STATE Make spin 'command_lock_of_typhoon' unlocked at the beginning */
 1519   ldv_spin_command_lock_of_typhoon = 1;
 1520   /* LDV_COMMENT_CHANGE_STATE Make spin 'i_lock_of_inode' unlocked at the beginning */
 1521   ldv_spin_i_lock_of_inode = 1;
 1522   /* LDV_COMMENT_CHANGE_STATE Make spin 'lock' unlocked at the beginning */
 1523   ldv_spin_lock = 1;
 1524   /* LDV_COMMENT_CHANGE_STATE Make spin 'lock_of_NOT_ARG_SIGN' unlocked at the beginning */
 1525   ldv_spin_lock_of_NOT_ARG_SIGN = 1;
 1526   /* LDV_COMMENT_CHANGE_STATE Make spin 'lru_lock_of_netns_frags' unlocked at the beginning */
 1527   ldv_spin_lru_lock_of_netns_frags = 1;
 1528   /* LDV_COMMENT_CHANGE_STATE Make spin 'node_size_lock_of_pglist_data' unlocked at the beginning */
 1529   ldv_spin_node_size_lock_of_pglist_data = 1;
 1530   /* LDV_COMMENT_CHANGE_STATE Make spin 'ptl' unlocked at the beginning */
 1531   ldv_spin_ptl = 1;
 1532   /* LDV_COMMENT_CHANGE_STATE Make spin 'siglock_of_sighand_struct' unlocked at the beginning */
 1533   ldv_spin_siglock_of_sighand_struct = 1;
 1534   /* LDV_COMMENT_CHANGE_STATE Make spin 'tx_global_lock_of_net_device' unlocked at the beginning */
 1535   ldv_spin_tx_global_lock_of_net_device = 1;
 1536 }
 1537 
 1538 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all spins are unlocked at the end */
 1539 void ldv_check_final_state(void)
 1540 {
 1541   /* LDV_COMMENT_ASSERT Spin '_xmit_lock_of_netdev_queue' must be unlocked at the end */
 1542   ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 1);
 1543   /* LDV_COMMENT_ASSERT Spin 'addr_list_lock_of_net_device' must be unlocked at the end */
 1544   ldv_assert(ldv_spin_addr_list_lock_of_net_device == 1);
 1545   /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must be unlocked at the end */
 1546   ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1);
 1547   /* LDV_COMMENT_ASSERT Spin 'command_lock_of_typhoon' must be unlocked at the end */
 1548   ldv_assert(ldv_spin_command_lock_of_typhoon == 1);
 1549   /* LDV_COMMENT_ASSERT Spin 'i_lock_of_inode' must be unlocked at the end */
 1550   ldv_assert(ldv_spin_i_lock_of_inode == 1);
 1551   /* LDV_COMMENT_ASSERT Spin 'lock' must be unlocked at the end */
 1552   ldv_assert(ldv_spin_lock == 1);
 1553   /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must be unlocked at the end */
 1554   ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
 1555   /* LDV_COMMENT_ASSERT Spin 'lru_lock_of_netns_frags' must be unlocked at the end */
 1556   ldv_assert(ldv_spin_lru_lock_of_netns_frags == 1);
 1557   /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must be unlocked at the end */
 1558   ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
 1559   /* LDV_COMMENT_ASSERT Spin 'ptl' must be unlocked at the end */
 1560   ldv_assert(ldv_spin_ptl == 1);
 1561   /* LDV_COMMENT_ASSERT Spin 'siglock_of_sighand_struct' must be unlocked at the end */
 1562   ldv_assert(ldv_spin_siglock_of_sighand_struct == 1);
 1563   /* LDV_COMMENT_ASSERT Spin 'tx_global_lock_of_net_device' must be unlocked at the end */
 1564   ldv_assert(ldv_spin_tx_global_lock_of_net_device == 1);
 1565 }                 1 #ifndef _LDV_RCV_H_
    2 #define _LDV_RCV_H_
    3 
    4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
    5    label like the standard assert(). */
    6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
    7 
    8 /* The error label wrapper. It is used because of some static verifiers (like
    9    BLAST) don't accept multiple error labels through a program. */
   10 static inline void ldv_error(void)
   11 {
   12   LDV_ERROR: goto LDV_ERROR;
   13 }
   14 
   15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
   16    avoided by verifiers. */
   17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
   18 
   19 /* Infinite loop, that causes verifiers to skip such paths. */
   20 static inline void ldv_stop(void) {
   21   LDV_STOP: goto LDV_STOP;
   22 }
   23 
   24 /* Special nondeterministic functions. */
   25 int ldv_undef_int(void);
   26 void *ldv_undef_ptr(void);
   27 unsigned long ldv_undef_ulong(void);
   28 /* Return nondeterministic negative integer number. */
   29 static inline int ldv_undef_int_negative(void)
   30 {
   31   int ret = ldv_undef_int();
   32 
   33   ldv_assume(ret < 0);
   34 
   35   return ret;
   36 }
   37 /* Return nondeterministic nonpositive integer number. */
   38 static inline int ldv_undef_int_nonpositive(void)
   39 {
   40   int ret = ldv_undef_int();
   41 
   42   ldv_assume(ret <= 0);
   43 
   44   return ret;
   45 }
   46 
   47 /* Add explicit model for __builin_expect GCC function. Without the model a
   48    return value will be treated as nondetermined by verifiers. */
   49 long __builtin_expect(long exp, long c)
   50 {
   51   return exp;
   52 }
   53 
   54 /* This function causes the program to exit abnormally. GCC implements this
   55 function by using a target-dependent mechanism (such as intentionally executing
   56 an illegal instruction) or by calling abort. The mechanism used may vary from
   57 release to release so you should not rely on any particular implementation.
   58 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
   59 void __builtin_trap(void)
   60 {
   61   ldv_assert(0);
   62 }
   63 
   64 /* The constant is for simulating an error of ldv_undef_ptr() function. */
   65 #define LDV_PTR_MAX 2012
   66 
   67 #endif /* _LDV_RCV_H_ */                 1 /*
    2  * device.h - generic, centralized driver model
    3  *
    4  * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
    5  * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
    6  * Copyright (c) 2008-2009 Novell Inc.
    7  *
    8  * This file is released under the GPLv2
    9  *
   10  * See Documentation/driver-model/ for more information.
   11  */
   12 
   13 #ifndef _DEVICE_H_
   14 #define _DEVICE_H_
   15 
   16 #include <linux/ioport.h>
   17 #include <linux/kobject.h>
   18 #include <linux/klist.h>
   19 #include <linux/list.h>
   20 #include <linux/lockdep.h>
   21 #include <linux/compiler.h>
   22 #include <linux/types.h>
   23 #include <linux/mutex.h>
   24 #include <linux/pinctrl/devinfo.h>
   25 #include <linux/pm.h>
   26 #include <linux/atomic.h>
   27 #include <linux/ratelimit.h>
   28 #include <linux/uidgid.h>
   29 #include <linux/gfp.h>
   30 #include <asm/device.h>
   31 
   32 struct device;
   33 struct device_private;
   34 struct device_driver;
   35 struct driver_private;
   36 struct module;
   37 struct class;
   38 struct subsys_private;
   39 struct bus_type;
   40 struct device_node;
   41 struct iommu_ops;
   42 struct iommu_group;
   43 
   44 struct bus_attribute {
   45 	struct attribute	attr;
   46 	ssize_t (*show)(struct bus_type *bus, char *buf);
   47 	ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
   48 };
   49 
   50 #define BUS_ATTR(_name, _mode, _show, _store)	\
   51 	struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
   52 #define BUS_ATTR_RW(_name) \
   53 	struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
   54 #define BUS_ATTR_RO(_name) \
   55 	struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
   56 
   57 extern int __must_check bus_create_file(struct bus_type *,
   58 					struct bus_attribute *);
   59 extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
   60 
   61 /**
   62  * struct bus_type - The bus type of the device
   63  *
   64  * @name:	The name of the bus.
   65  * @dev_name:	Used for subsystems to enumerate devices like ("foo%u", dev->id).
   66  * @dev_root:	Default device to use as the parent.
   67  * @dev_attrs:	Default attributes of the devices on the bus.
   68  * @bus_groups:	Default attributes of the bus.
   69  * @dev_groups:	Default attributes of the devices on the bus.
   70  * @drv_groups: Default attributes of the device drivers on the bus.
   71  * @match:	Called, perhaps multiple times, whenever a new device or driver
   72  *		is added for this bus. It should return a nonzero value if the
   73  *		given device can be handled by the given driver.
   74  * @uevent:	Called when a device is added, removed, or a few other things
   75  *		that generate uevents to add the environment variables.
   76  * @probe:	Called when a new device or driver add to this bus, and callback
   77  *		the specific driver's probe to initial the matched device.
   78  * @remove:	Called when a device removed from this bus.
   79  * @shutdown:	Called at shut-down time to quiesce the device.
   80  *
   81  * @online:	Called to put the device back online (after offlining it).
   82  * @offline:	Called to put the device offline for hot-removal. May fail.
   83  *
   84  * @suspend:	Called when a device on this bus wants to go to sleep mode.
   85  * @resume:	Called to bring a device on this bus out of sleep mode.
   86  * @pm:		Power management operations of this bus, callback the specific
   87  *		device driver's pm-ops.
   88  * @iommu_ops:  IOMMU specific operations for this bus, used to attach IOMMU
   89  *              driver implementations to a bus and allow the driver to do
   90  *              bus-specific setup
   91  * @p:		The private data of the driver core, only the driver core can
   92  *		touch this.
   93  * @lock_key:	Lock class key for use by the lock validator
   94  *
   95  * A bus is a channel between the processor and one or more devices. For the
   96  * purposes of the device model, all devices are connected via a bus, even if
   97  * it is an internal, virtual, "platform" bus. Buses can plug into each other.
   98  * A USB controller is usually a PCI device, for example. The device model
   99  * represents the actual connections between buses and the devices they control.
  100  * A bus is represented by the bus_type structure. It contains the name, the
  101  * default attributes, the bus' methods, PM operations, and the driver core's
  102  * private data.
  103  */
  104 struct bus_type {
  105 	const char		*name;
  106 	const char		*dev_name;
  107 	struct device		*dev_root;
  108 	struct device_attribute	*dev_attrs;	/* use dev_groups instead */
  109 	const struct attribute_group **bus_groups;
  110 	const struct attribute_group **dev_groups;
  111 	const struct attribute_group **drv_groups;
  112 
  113 	int (*match)(struct device *dev, struct device_driver *drv);
  114 	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
  115 	int (*probe)(struct device *dev);
  116 	int (*remove)(struct device *dev);
  117 	void (*shutdown)(struct device *dev);
  118 
  119 	int (*online)(struct device *dev);
  120 	int (*offline)(struct device *dev);
  121 
  122 	int (*suspend)(struct device *dev, pm_message_t state);
  123 	int (*resume)(struct device *dev);
  124 
  125 	const struct dev_pm_ops *pm;
  126 
  127 	struct iommu_ops *iommu_ops;
  128 
  129 	struct subsys_private *p;
  130 	struct lock_class_key lock_key;
  131 };
  132 
  133 extern int __must_check bus_register(struct bus_type *bus);
  134 
  135 extern void bus_unregister(struct bus_type *bus);
  136 
  137 extern int __must_check bus_rescan_devices(struct bus_type *bus);
  138 
  139 /* iterator helpers for buses */
  140 struct subsys_dev_iter {
  141 	struct klist_iter		ki;
  142 	const struct device_type	*type;
  143 };
  144 void subsys_dev_iter_init(struct subsys_dev_iter *iter,
  145 			 struct bus_type *subsys,
  146 			 struct device *start,
  147 			 const struct device_type *type);
  148 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
  149 void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
  150 
  151 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
  152 		     int (*fn)(struct device *dev, void *data));
  153 struct device *bus_find_device(struct bus_type *bus, struct device *start,
  154 			       void *data,
  155 			       int (*match)(struct device *dev, void *data));
  156 struct device *bus_find_device_by_name(struct bus_type *bus,
  157 				       struct device *start,
  158 				       const char *name);
  159 struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
  160 					struct device *hint);
  161 int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
  162 		     void *data, int (*fn)(struct device_driver *, void *));
  163 void bus_sort_breadthfirst(struct bus_type *bus,
  164 			   int (*compare)(const struct device *a,
  165 					  const struct device *b));
  166 /*
  167  * Bus notifiers: Get notified of addition/removal of devices
  168  * and binding/unbinding of drivers to devices.
  169  * In the long run, it should be a replacement for the platform
  170  * notify hooks.
  171  */
  172 struct notifier_block;
  173 
  174 extern int bus_register_notifier(struct bus_type *bus,
  175 				 struct notifier_block *nb);
  176 extern int bus_unregister_notifier(struct bus_type *bus,
  177 				   struct notifier_block *nb);
  178 
  179 /* All 4 notifers below get called with the target struct device *
  180  * as an argument. Note that those functions are likely to be called
  181  * with the device lock held in the core, so be careful.
  182  */
  183 #define BUS_NOTIFY_ADD_DEVICE		0x00000001 /* device added */
  184 #define BUS_NOTIFY_DEL_DEVICE		0x00000002 /* device removed */
  185 #define BUS_NOTIFY_BIND_DRIVER		0x00000003 /* driver about to be
  186 						      bound */
  187 #define BUS_NOTIFY_BOUND_DRIVER		0x00000004 /* driver bound to device */
  188 #define BUS_NOTIFY_UNBIND_DRIVER	0x00000005 /* driver about to be
  189 						      unbound */
  190 #define BUS_NOTIFY_UNBOUND_DRIVER	0x00000006 /* driver is unbound
  191 						      from the device */
  192 
  193 extern struct kset *bus_get_kset(struct bus_type *bus);
  194 extern struct klist *bus_get_device_klist(struct bus_type *bus);
  195 
  196 /**
  197  * struct device_driver - The basic device driver structure
  198  * @name:	Name of the device driver.
  199  * @bus:	The bus which the device of this driver belongs to.
  200  * @owner:	The module owner.
  201  * @mod_name:	Used for built-in modules.
  202  * @suppress_bind_attrs: Disables bind/unbind via sysfs.
  203  * @of_match_table: The open firmware table.
  204  * @acpi_match_table: The ACPI match table.
  205  * @probe:	Called to query the existence of a specific device,
  206  *		whether this driver can work with it, and bind the driver
  207  *		to a specific device.
  208  * @remove:	Called when the device is removed from the system to
  209  *		unbind a device from this driver.
  210  * @shutdown:	Called at shut-down time to quiesce the device.
  211  * @suspend:	Called to put the device to sleep mode. Usually to a
  212  *		low power state.
  213  * @resume:	Called to bring a device from sleep mode.
  214  * @groups:	Default attributes that get created by the driver core
  215  *		automatically.
  216  * @pm:		Power management operations of the device which matched
  217  *		this driver.
  218  * @p:		Driver core's private data, no one other than the driver
  219  *		core can touch this.
  220  *
  221  * The device driver-model tracks all of the drivers known to the system.
  222  * The main reason for this tracking is to enable the driver core to match
  223  * up drivers with new devices. Once drivers are known objects within the
  224  * system, however, a number of other things become possible. Device drivers
  225  * can export information and configuration variables that are independent
  226  * of any specific device.
  227  */
  228 struct device_driver {
  229 	const char		*name;
  230 	struct bus_type		*bus;
  231 
  232 	struct module		*owner;
  233 	const char		*mod_name;	/* used for built-in modules */
  234 
  235 	bool suppress_bind_attrs;	/* disables bind/unbind via sysfs */
  236 
  237 	const struct of_device_id	*of_match_table;
  238 	const struct acpi_device_id	*acpi_match_table;
  239 
  240 	int (*probe) (struct device *dev);
  241 	int (*remove) (struct device *dev);
  242 	void (*shutdown) (struct device *dev);
  243 	int (*suspend) (struct device *dev, pm_message_t state);
  244 	int (*resume) (struct device *dev);
  245 	const struct attribute_group **groups;
  246 
  247 	const struct dev_pm_ops *pm;
  248 
  249 	struct driver_private *p;
  250 };
  251 
  252 
  253 extern int __must_check driver_register(struct device_driver *drv);
  254 extern void driver_unregister(struct device_driver *drv);
  255 
  256 extern struct device_driver *driver_find(const char *name,
  257 					 struct bus_type *bus);
  258 extern int driver_probe_done(void);
  259 extern void wait_for_device_probe(void);
  260 
  261 
  262 /* sysfs interface for exporting driver attributes */
  263 
  264 struct driver_attribute {
  265 	struct attribute attr;
  266 	ssize_t (*show)(struct device_driver *driver, char *buf);
  267 	ssize_t (*store)(struct device_driver *driver, const char *buf,
  268 			 size_t count);
  269 };
  270 
  271 #define DRIVER_ATTR(_name, _mode, _show, _store) \
  272 	struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
  273 #define DRIVER_ATTR_RW(_name) \
  274 	struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
  275 #define DRIVER_ATTR_RO(_name) \
  276 	struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
  277 #define DRIVER_ATTR_WO(_name) \
  278 	struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
  279 
  280 extern int __must_check driver_create_file(struct device_driver *driver,
  281 					const struct driver_attribute *attr);
  282 extern void driver_remove_file(struct device_driver *driver,
  283 			       const struct driver_attribute *attr);
  284 
  285 extern int __must_check driver_for_each_device(struct device_driver *drv,
  286 					       struct device *start,
  287 					       void *data,
  288 					       int (*fn)(struct device *dev,
  289 							 void *));
  290 struct device *driver_find_device(struct device_driver *drv,
  291 				  struct device *start, void *data,
  292 				  int (*match)(struct device *dev, void *data));
  293 
  294 /**
  295  * struct subsys_interface - interfaces to device functions
  296  * @name:       name of the device function
  297  * @subsys:     subsytem of the devices to attach to
  298  * @node:       the list of functions registered at the subsystem
  299  * @add_dev:    device hookup to device function handler
  300  * @remove_dev: device hookup to device function handler
  301  *
  302  * Simple interfaces attached to a subsystem. Multiple interfaces can
  303  * attach to a subsystem and its devices. Unlike drivers, they do not
  304  * exclusively claim or control devices. Interfaces usually represent
  305  * a specific functionality of a subsystem/class of devices.
  306  */
  307 struct subsys_interface {
  308 	const char *name;
  309 	struct bus_type *subsys;
  310 	struct list_head node;
  311 	int (*add_dev)(struct device *dev, struct subsys_interface *sif);
  312 	int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
  313 };
  314 
  315 int subsys_interface_register(struct subsys_interface *sif);
  316 void subsys_interface_unregister(struct subsys_interface *sif);
  317 
  318 int subsys_system_register(struct bus_type *subsys,
  319 			   const struct attribute_group **groups);
  320 int subsys_virtual_register(struct bus_type *subsys,
  321 			    const struct attribute_group **groups);
  322 
  323 /**
  324  * struct class - device classes
  325  * @name:	Name of the class.
  326  * @owner:	The module owner.
  327  * @class_attrs: Default attributes of this class.
  328  * @dev_groups:	Default attributes of the devices that belong to the class.
  329  * @dev_kobj:	The kobject that represents this class and links it into the hierarchy.
  330  * @dev_uevent:	Called when a device is added, removed from this class, or a
  331  *		few other things that generate uevents to add the environment
  332  *		variables.
  333  * @devnode:	Callback to provide the devtmpfs.
  334  * @class_release: Called to release this class.
  335  * @dev_release: Called to release the device.
  336  * @suspend:	Used to put the device to sleep mode, usually to a low power
  337  *		state.
  338  * @resume:	Used to bring the device from the sleep mode.
  339  * @ns_type:	Callbacks so sysfs can detemine namespaces.
  340  * @namespace:	Namespace of the device belongs to this class.
  341  * @pm:		The default device power management operations of this class.
  342  * @p:		The private data of the driver core, no one other than the
  343  *		driver core can touch this.
  344  *
  345  * A class is a higher-level view of a device that abstracts out low-level
  346  * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
  347  * at the class level, they are all simply disks. Classes allow user space
  348  * to work with devices based on what they do, rather than how they are
  349  * connected or how they work.
  350  */
  351 struct class {
  352 	const char		*name;
  353 	struct module		*owner;
  354 
  355 	struct class_attribute		*class_attrs;
  356 	const struct attribute_group	**dev_groups;
  357 	struct kobject			*dev_kobj;
  358 
  359 	int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
  360 	char *(*devnode)(struct device *dev, umode_t *mode);
  361 
  362 	void (*class_release)(struct class *class);
  363 	void (*dev_release)(struct device *dev);
  364 
  365 	int (*suspend)(struct device *dev, pm_message_t state);
  366 	int (*resume)(struct device *dev);
  367 
  368 	const struct kobj_ns_type_operations *ns_type;
  369 	const void *(*namespace)(struct device *dev);
  370 
  371 	const struct dev_pm_ops *pm;
  372 
  373 	struct subsys_private *p;
  374 };
  375 
  376 struct class_dev_iter {
  377 	struct klist_iter		ki;
  378 	const struct device_type	*type;
  379 };
  380 
  381 extern struct kobject *sysfs_dev_block_kobj;
  382 extern struct kobject *sysfs_dev_char_kobj;
  383 extern int __must_check __class_register(struct class *class,
  384 					 struct lock_class_key *key);
  385 extern void class_unregister(struct class *class);
  386 
  387 /* This is a #define to keep the compiler from merging different
  388  * instances of the __key variable */
  389 #define class_register(class)			\
  390 ({						\
  391 	static struct lock_class_key __key;	\
  392 	__class_register(class, &__key);	\
  393 })
  394 
  395 struct class_compat;
  396 struct class_compat *class_compat_register(const char *name);
  397 void class_compat_unregister(struct class_compat *cls);
  398 int class_compat_create_link(struct class_compat *cls, struct device *dev,
  399 			     struct device *device_link);
  400 void class_compat_remove_link(struct class_compat *cls, struct device *dev,
  401 			      struct device *device_link);
  402 
  403 extern void class_dev_iter_init(struct class_dev_iter *iter,
  404 				struct class *class,
  405 				struct device *start,
  406 				const struct device_type *type);
  407 extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
  408 extern void class_dev_iter_exit(struct class_dev_iter *iter);
  409 
  410 extern int class_for_each_device(struct class *class, struct device *start,
  411 				 void *data,
  412 				 int (*fn)(struct device *dev, void *data));
  413 extern struct device *class_find_device(struct class *class,
  414 					struct device *start, const void *data,
  415 					int (*match)(struct device *, const void *));
  416 
  417 struct class_attribute {
  418 	struct attribute attr;
  419 	ssize_t (*show)(struct class *class, struct class_attribute *attr,
  420 			char *buf);
  421 	ssize_t (*store)(struct class *class, struct class_attribute *attr,
  422 			const char *buf, size_t count);
  423 };
  424 
  425 #define CLASS_ATTR(_name, _mode, _show, _store) \
  426 	struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store)
  427 #define CLASS_ATTR_RW(_name) \
  428 	struct class_attribute class_attr_##_name = __ATTR_RW(_name)
  429 #define CLASS_ATTR_RO(_name) \
  430 	struct class_attribute class_attr_##_name = __ATTR_RO(_name)
  431 
  432 extern int __must_check class_create_file_ns(struct class *class,
  433 					     const struct class_attribute *attr,
  434 					     const void *ns);
  435 extern void class_remove_file_ns(struct class *class,
  436 				 const struct class_attribute *attr,
  437 				 const void *ns);
  438 
  439 static inline int __must_check class_create_file(struct class *class,
  440 					const struct class_attribute *attr)
  441 {
  442 	return class_create_file_ns(class, attr, NULL);
  443 }
  444 
  445 static inline void class_remove_file(struct class *class,
  446 				     const struct class_attribute *attr)
  447 {
  448 	return class_remove_file_ns(class, attr, NULL);
  449 }
  450 
  451 /* Simple class attribute that is just a static string */
  452 struct class_attribute_string {
  453 	struct class_attribute attr;
  454 	char *str;
  455 };
  456 
  457 /* Currently read-only only */
  458 #define _CLASS_ATTR_STRING(_name, _mode, _str) \
  459 	{ __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
  460 #define CLASS_ATTR_STRING(_name, _mode, _str) \
  461 	struct class_attribute_string class_attr_##_name = \
  462 		_CLASS_ATTR_STRING(_name, _mode, _str)
  463 
  464 extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
  465                         char *buf);
  466 
  467 struct class_interface {
  468 	struct list_head	node;
  469 	struct class		*class;
  470 
  471 	int (*add_dev)		(struct device *, struct class_interface *);
  472 	void (*remove_dev)	(struct device *, struct class_interface *);
  473 };
  474 
  475 extern int __must_check class_interface_register(struct class_interface *);
  476 extern void class_interface_unregister(struct class_interface *);
  477 
  478 extern struct class * __must_check __class_create(struct module *owner,
  479 						  const char *name,
  480 						  struct lock_class_key *key);
  481 extern void class_destroy(struct class *cls);
  482 
  483 /* This is a #define to keep the compiler from merging different
  484  * instances of the __key variable */
  485 #define class_create(owner, name)		\
  486 ({						\
  487 	static struct lock_class_key __key;	\
  488 	__class_create(owner, name, &__key);	\
  489 })
  490 
  491 /*
  492  * The type of device, "struct device" is embedded in. A class
  493  * or bus can contain devices of different types
  494  * like "partitions" and "disks", "mouse" and "event".
  495  * This identifies the device type and carries type-specific
  496  * information, equivalent to the kobj_type of a kobject.
  497  * If "name" is specified, the uevent will contain it in
  498  * the DEVTYPE variable.
  499  */
  500 struct device_type {
  501 	const char *name;
  502 	const struct attribute_group **groups;
  503 	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
  504 	char *(*devnode)(struct device *dev, umode_t *mode,
  505 			 kuid_t *uid, kgid_t *gid);
  506 	void (*release)(struct device *dev);
  507 
  508 	const struct dev_pm_ops *pm;
  509 };
  510 
  511 /* interface for exporting device attributes */
  512 struct device_attribute {
  513 	struct attribute	attr;
  514 	ssize_t (*show)(struct device *dev, struct device_attribute *attr,
  515 			char *buf);
  516 	ssize_t (*store)(struct device *dev, struct device_attribute *attr,
  517 			 const char *buf, size_t count);
  518 };
  519 
  520 struct dev_ext_attribute {
  521 	struct device_attribute attr;
  522 	void *var;
  523 };
  524 
  525 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
  526 			  char *buf);
  527 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
  528 			   const char *buf, size_t count);
  529 ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
  530 			char *buf);
  531 ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
  532 			 const char *buf, size_t count);
  533 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
  534 			char *buf);
  535 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
  536 			 const char *buf, size_t count);
  537 
  538 #define DEVICE_ATTR(_name, _mode, _show, _store) \
  539 	struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
  540 #define DEVICE_ATTR_RW(_name) \
  541 	struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
  542 #define DEVICE_ATTR_RO(_name) \
  543 	struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
  544 #define DEVICE_ATTR_WO(_name) \
  545 	struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
  546 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \
  547 	struct dev_ext_attribute dev_attr_##_name = \
  548 		{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
  549 #define DEVICE_INT_ATTR(_name, _mode, _var) \
  550 	struct dev_ext_attribute dev_attr_##_name = \
  551 		{ __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
  552 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \
  553 	struct dev_ext_attribute dev_attr_##_name = \
  554 		{ __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
  555 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
  556 	struct device_attribute dev_attr_##_name =		\
  557 		__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
  558 
  559 extern int device_create_file(struct device *device,
  560 			      const struct device_attribute *entry);
  561 extern void device_remove_file(struct device *dev,
  562 			       const struct device_attribute *attr);
  563 extern bool device_remove_file_self(struct device *dev,
  564 				    const struct device_attribute *attr);
  565 extern int __must_check device_create_bin_file(struct device *dev,
  566 					const struct bin_attribute *attr);
  567 extern void device_remove_bin_file(struct device *dev,
  568 				   const struct bin_attribute *attr);
  569 
  570 /* device resource management */
  571 typedef void (*dr_release_t)(struct device *dev, void *res);
  572 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
  573 
  574 #ifdef CONFIG_DEBUG_DEVRES
  575 extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
  576 			     const char *name);
  577 #define devres_alloc(release, size, gfp) \
  578 	__devres_alloc(release, size, gfp, #release)
  579 #else
  580 extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp);
  581 #endif
  582 extern void devres_for_each_res(struct device *dev, dr_release_t release,
  583 				dr_match_t match, void *match_data,
  584 				void (*fn)(struct device *, void *, void *),
  585 				void *data);
  586 extern void devres_free(void *res);
  587 extern void devres_add(struct device *dev, void *res);
  588 extern void *devres_find(struct device *dev, dr_release_t release,
  589 			 dr_match_t match, void *match_data);
  590 extern void *devres_get(struct device *dev, void *new_res,
  591 			dr_match_t match, void *match_data);
  592 extern void *devres_remove(struct device *dev, dr_release_t release,
  593 			   dr_match_t match, void *match_data);
  594 extern int devres_destroy(struct device *dev, dr_release_t release,
  595 			  dr_match_t match, void *match_data);
  596 extern int devres_release(struct device *dev, dr_release_t release,
  597 			  dr_match_t match, void *match_data);
  598 
  599 /* devres group */
  600 extern void * __must_check devres_open_group(struct device *dev, void *id,
  601 					     gfp_t gfp);
  602 extern void devres_close_group(struct device *dev, void *id);
  603 extern void devres_remove_group(struct device *dev, void *id);
  604 extern int devres_release_group(struct device *dev, void *id);
  605 
  606 /* managed devm_k.alloc/kfree for device drivers */
  607 extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
  608 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
  609 {
  610 	return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
  611 }
  612 static inline void *devm_kmalloc_array(struct device *dev,
  613 				       size_t n, size_t size, gfp_t flags)
  614 {
  615 	if (size != 0 && n > SIZE_MAX / size)
  616 		return NULL;
  617 	return devm_kmalloc(dev, n * size, flags);
  618 }
  619 static inline void *devm_kcalloc(struct device *dev,
  620 				 size_t n, size_t size, gfp_t flags)
  621 {
  622 	return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
  623 }
  624 extern void devm_kfree(struct device *dev, void *p);
  625 extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
  626 extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
  627 			  gfp_t gfp);
  628 
  629 extern unsigned long devm_get_free_pages(struct device *dev,
  630 					 gfp_t gfp_mask, unsigned int order);
  631 extern void devm_free_pages(struct device *dev, unsigned long addr);
  632 
  633 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
  634 void __iomem *devm_request_and_ioremap(struct device *dev,
  635 			struct resource *res);
  636 
  637 /* allows to add/remove a custom action to devres stack */
  638 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
  639 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
  640 
  641 struct device_dma_parameters {
  642 	/*
  643 	 * a low level driver may set these to teach IOMMU code about
  644 	 * sg limitations.
  645 	 */
  646 	unsigned int max_segment_size;
  647 	unsigned long segment_boundary_mask;
  648 };
  649 
  650 struct acpi_device;
  651 
  652 struct acpi_dev_node {
  653 #ifdef CONFIG_ACPI
  654 	struct acpi_device *companion;
  655 #endif
  656 };
  657 
  658 /**
  659  * struct device - The basic device structure
  660  * @parent:	The device's "parent" device, the device to which it is attached.
  661  * 		In most cases, a parent device is some sort of bus or host
  662  * 		controller. If parent is NULL, the device, is a top-level device,
  663  * 		which is not usually what you want.
  664  * @p:		Holds the private data of the driver core portions of the device.
  665  * 		See the comment of the struct device_private for detail.
  666  * @kobj:	A top-level, abstract class from which other classes are derived.
  667  * @init_name:	Initial name of the device.
  668  * @type:	The type of device.
  669  * 		This identifies the device type and carries type-specific
  670  * 		information.
  671  * @mutex:	Mutex to synchronize calls to its driver.
  672  * @bus:	Type of bus device is on.
  673  * @driver:	Which driver has allocated this
  674  * @platform_data: Platform data specific to the device.
  675  * 		Example: For devices on custom boards, as typical of embedded
  676  * 		and SOC based hardware, Linux often uses platform_data to point
  677  * 		to board-specific structures describing devices and how they
  678  * 		are wired.  That can include what ports are available, chip
  679  * 		variants, which GPIO pins act in what additional roles, and so
  680  * 		on.  This shrinks the "Board Support Packages" (BSPs) and
  681  * 		minimizes board-specific #ifdefs in drivers.
  682  * @driver_data: Private pointer for driver specific info.
  683  * @power:	For device power management.
  684  * 		See Documentation/power/devices.txt for details.
  685  * @pm_domain:	Provide callbacks that are executed during system suspend,
  686  * 		hibernation, system resume and during runtime PM transitions
  687  * 		along with subsystem-level and driver-level callbacks.
  688  * @pins:	For device pin management.
  689  *		See Documentation/pinctrl.txt for details.
  690  * @numa_node:	NUMA node this device is close to.
  691  * @dma_mask:	Dma mask (if dma'ble device).
  692  * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
  693  * 		hardware supports 64-bit addresses for consistent allocations
  694  * 		such descriptors.
  695  * @dma_pfn_offset: offset of DMA memory range relatively of RAM
  696  * @dma_parms:	A low level driver may set these to teach IOMMU code about
  697  * 		segment limitations.
  698  * @dma_pools:	Dma pools (if dma'ble device).
  699  * @dma_mem:	Internal for coherent mem override.
  700  * @cma_area:	Contiguous memory area for dma allocations
  701  * @archdata:	For arch-specific additions.
  702  * @of_node:	Associated device tree node.
  703  * @acpi_node:	Associated ACPI device node.
  704  * @devt:	For creating the sysfs "dev".
  705  * @id:		device instance
  706  * @devres_lock: Spinlock to protect the resource of the device.
  707  * @devres_head: The resources list of the device.
  708  * @knode_class: The node used to add the device to the class list.
  709  * @class:	The class of the device.
  710  * @groups:	Optional attribute groups.
  711  * @release:	Callback to free the device after all references have
  712  * 		gone away. This should be set by the allocator of the
  713  * 		device (i.e. the bus driver that discovered the device).
  714  * @iommu_group: IOMMU group the device belongs to.
  715  *
  716  * @offline_disabled: If set, the device is permanently online.
  717  * @offline:	Set after successful invocation of bus type's .offline().
  718  *
  719  * At the lowest level, every device in a Linux system is represented by an
  720  * instance of struct device. The device structure contains the information
  721  * that the device model core needs to model the system. Most subsystems,
  722  * however, track additional information about the devices they host. As a
  723  * result, it is rare for devices to be represented by bare device structures;
  724  * instead, that structure, like kobject structures, is usually embedded within
  725  * a higher-level representation of the device.
  726  */
  727 struct device {
  728 	struct device		*parent;
  729 
  730 	struct device_private	*p;
  731 
  732 	struct kobject kobj;
  733 	const char		*init_name; /* initial name of the device */
  734 	const struct device_type *type;
  735 
  736 	struct mutex		mutex;	/* mutex to synchronize calls to
  737 					 * its driver.
  738 					 */
  739 
  740 	struct bus_type	*bus;		/* type of bus device is on */
  741 	struct device_driver *driver;	/* which driver has allocated this
  742 					   device */
  743 	void		*platform_data;	/* Platform specific data, device
  744 					   core doesn't touch it */
  745 	void		*driver_data;	/* Driver data, set and get with
  746 					   dev_set/get_drvdata */
  747 	struct dev_pm_info	power;
  748 	struct dev_pm_domain	*pm_domain;
  749 
  750 #ifdef CONFIG_PINCTRL
  751 	struct dev_pin_info	*pins;
  752 #endif
  753 
  754 #ifdef CONFIG_NUMA
  755 	int		numa_node;	/* NUMA node this device is close to */
  756 #endif
  757 	u64		*dma_mask;	/* dma mask (if dma'able device) */
  758 	u64		coherent_dma_mask;/* Like dma_mask, but for
  759 					     alloc_coherent mappings as
  760 					     not all hardware supports
  761 					     64 bit addresses for consistent
  762 					     allocations such descriptors. */
  763 	unsigned long	dma_pfn_offset;
  764 
  765 	struct device_dma_parameters *dma_parms;
  766 
  767 	struct list_head	dma_pools;	/* dma pools (if dma'ble) */
  768 
  769 	struct dma_coherent_mem	*dma_mem; /* internal for coherent mem
  770 					     override */
  771 #ifdef CONFIG_DMA_CMA
  772 	struct cma *cma_area;		/* contiguous memory area for dma
  773 					   allocations */
  774 #endif
  775 	/* arch specific additions */
  776 	struct dev_archdata	archdata;
  777 
  778 	struct device_node	*of_node; /* associated device tree node */
  779 	struct acpi_dev_node	acpi_node; /* associated ACPI device node */
  780 
  781 	dev_t			devt;	/* dev_t, creates the sysfs "dev" */
  782 	u32			id;	/* device instance */
  783 
  784 	spinlock_t		devres_lock;
  785 	struct list_head	devres_head;
  786 
  787 	struct klist_node	knode_class;
  788 	struct class		*class;
  789 	const struct attribute_group **groups;	/* optional groups */
  790 
  791 	void	(*release)(struct device *dev);
  792 	struct iommu_group	*iommu_group;
  793 
  794 	bool			offline_disabled:1;
  795 	bool			offline:1;
  796 };
  797 
  798 static inline struct device *kobj_to_dev(struct kobject *kobj)
  799 {
  800 	return container_of(kobj, struct device, kobj);
  801 }
  802 
  803 /* Get the wakeup routines, which depend on struct device */
  804 #include <linux/pm_wakeup.h>
  805 
  806 static inline const char *dev_name(const struct device *dev)
  807 {
  808 	/* Use the init name until the kobject becomes available */
  809 	if (dev->init_name)
  810 		return dev->init_name;
  811 
  812 	return kobject_name(&dev->kobj);
  813 }
  814 
  815 extern __printf(2, 3)
  816 int dev_set_name(struct device *dev, const char *name, ...);
  817 
  818 #ifdef CONFIG_NUMA
  819 static inline int dev_to_node(struct device *dev)
  820 {
  821 	return dev->numa_node;
  822 }
  823 static inline void set_dev_node(struct device *dev, int node)
  824 {
  825 	dev->numa_node = node;
  826 }
  827 #else
  828 static inline int dev_to_node(struct device *dev)
  829 {
  830 	return -1;
  831 }
  832 static inline void set_dev_node(struct device *dev, int node)
  833 {
  834 }
  835 #endif
  836 
  837 static inline void *dev_get_drvdata(const struct device *dev)
  838 {
  839 	return dev->driver_data;
  840 }
  841 
  842 static inline void dev_set_drvdata(struct device *dev, void *data)
  843 {
  844 	dev->driver_data = data;
  845 }
  846 
  847 static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
  848 {
  849 	return dev ? dev->power.subsys_data : NULL;
  850 }
  851 
  852 static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
  853 {
  854 	return dev->kobj.uevent_suppress;
  855 }
  856 
  857 static inline void dev_set_uevent_suppress(struct device *dev, int val)
  858 {
  859 	dev->kobj.uevent_suppress = val;
  860 }
  861 
  862 static inline int device_is_registered(struct device *dev)
  863 {
  864 	return dev->kobj.state_in_sysfs;
  865 }
  866 
  867 static inline void device_enable_async_suspend(struct device *dev)
  868 {
  869 	if (!dev->power.is_prepared)
  870 		dev->power.async_suspend = true;
  871 }
  872 
  873 static inline void device_disable_async_suspend(struct device *dev)
  874 {
  875 	if (!dev->power.is_prepared)
  876 		dev->power.async_suspend = false;
  877 }
  878 
  879 static inline bool device_async_suspend_enabled(struct device *dev)
  880 {
  881 	return !!dev->power.async_suspend;
  882 }
  883 
  884 static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
  885 {
  886 	dev->power.ignore_children = enable;
  887 }
  888 
  889 static inline void dev_pm_syscore_device(struct device *dev, bool val)
  890 {
  891 #ifdef CONFIG_PM_SLEEP
  892 	dev->power.syscore = val;
  893 #endif
  894 }
  895 
  896 static inline void device_lock(struct device *dev)
  897 {
  898 	mutex_lock(&dev->mutex);
  899 }
  900 
  901 static inline int device_trylock(struct device *dev)
  902 {
  903 	return mutex_trylock(&dev->mutex);
  904 }
  905 
  906 static inline void device_unlock(struct device *dev)
  907 {
  908 	mutex_unlock(&dev->mutex);
  909 }
  910 
  911 void driver_init(void);
  912 
  913 /*
  914  * High level routines for use by the bus drivers
  915  */
  916 extern int __must_check device_register(struct device *dev);
  917 extern void device_unregister(struct device *dev);
  918 extern void device_initialize(struct device *dev);
  919 extern int __must_check device_add(struct device *dev);
  920 extern void device_del(struct device *dev);
  921 extern int device_for_each_child(struct device *dev, void *data,
  922 		     int (*fn)(struct device *dev, void *data));
  923 extern struct device *device_find_child(struct device *dev, void *data,
  924 				int (*match)(struct device *dev, void *data));
  925 extern int device_rename(struct device *dev, const char *new_name);
  926 extern int device_move(struct device *dev, struct device *new_parent,
  927 		       enum dpm_order dpm_order);
  928 extern const char *device_get_devnode(struct device *dev,
  929 				      umode_t *mode, kuid_t *uid, kgid_t *gid,
  930 				      const char **tmp);
  931 
  932 static inline bool device_supports_offline(struct device *dev)
  933 {
  934 	return dev->bus && dev->bus->offline && dev->bus->online;
  935 }
  936 
  937 extern void lock_device_hotplug(void);
  938 extern void unlock_device_hotplug(void);
  939 extern int lock_device_hotplug_sysfs(void);
  940 extern int device_offline(struct device *dev);
  941 extern int device_online(struct device *dev);
  942 /*
  943  * Root device objects for grouping under /sys/devices
  944  */
  945 extern struct device *__root_device_register(const char *name,
  946 					     struct module *owner);
  947 
  948 /* This is a macro to avoid include problems with THIS_MODULE */
  949 #define root_device_register(name) \
  950 	__root_device_register(name, THIS_MODULE)
  951 
  952 extern void root_device_unregister(struct device *root);
  953 
  954 static inline void *dev_get_platdata(const struct device *dev)
  955 {
  956 	return dev->platform_data;
  957 }
  958 
  959 /*
  960  * Manual binding of a device to driver. See drivers/base/bus.c
  961  * for information on use.
  962  */
  963 extern int __must_check device_bind_driver(struct device *dev);
  964 extern void device_release_driver(struct device *dev);
  965 extern int  __must_check device_attach(struct device *dev);
  966 extern int __must_check driver_attach(struct device_driver *drv);
  967 extern int __must_check device_reprobe(struct device *dev);
  968 
  969 /*
  970  * Easy functions for dynamically creating devices on the fly
  971  */
  972 extern struct device *device_create_vargs(struct class *cls,
  973 					  struct device *parent,
  974 					  dev_t devt,
  975 					  void *drvdata,
  976 					  const char *fmt,
  977 					  va_list vargs);
  978 extern __printf(5, 6)
  979 struct device *device_create(struct class *cls, struct device *parent,
  980 			     dev_t devt, void *drvdata,
  981 			     const char *fmt, ...);
  982 extern __printf(6, 7)
  983 struct device *device_create_with_groups(struct class *cls,
  984 			     struct device *parent, dev_t devt, void *drvdata,
  985 			     const struct attribute_group **groups,
  986 			     const char *fmt, ...);
  987 extern void device_destroy(struct class *cls, dev_t devt);
  988 
  989 /*
  990  * Platform "fixup" functions - allow the platform to have their say
  991  * about devices and actions that the general device layer doesn't
  992  * know about.
  993  */
  994 /* Notify platform of device discovery */
  995 extern int (*platform_notify)(struct device *dev);
  996 
  997 extern int (*platform_notify_remove)(struct device *dev);
  998 
  999 
 1000 /*
 1001  * get_device - atomically increment the reference count for the device.
 1002  *
 1003  */
 1004 extern struct device *get_device(struct device *dev);
 1005 extern void put_device(struct device *dev);
 1006 
 1007 #ifdef CONFIG_DEVTMPFS
 1008 extern int devtmpfs_create_node(struct device *dev);
 1009 extern int devtmpfs_delete_node(struct device *dev);
 1010 extern int devtmpfs_mount(const char *mntdir);
 1011 #else
 1012 static inline int devtmpfs_create_node(struct device *dev) { return 0; }
 1013 static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
 1014 static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
 1015 #endif
 1016 
 1017 /* drivers/base/power/shutdown.c */
 1018 extern void device_shutdown(void);
 1019 
 1020 /* debugging and troubleshooting/diagnostic helpers. */
 1021 extern const char *dev_driver_string(const struct device *dev);
 1022 
 1023 
 1024 #ifdef CONFIG_PRINTK
 1025 
 1026 extern __printf(3, 0)
 1027 int dev_vprintk_emit(int level, const struct device *dev,
 1028 		     const char *fmt, va_list args);
 1029 extern __printf(3, 4)
 1030 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
 1031 
 1032 extern __printf(3, 4)
 1033 int dev_printk(const char *level, const struct device *dev,
 1034 	       const char *fmt, ...);
 1035 extern __printf(2, 3)
 1036 int dev_emerg(const struct device *dev, const char *fmt, ...);
 1037 extern __printf(2, 3)
 1038 int dev_alert(const struct device *dev, const char *fmt, ...);
 1039 extern __printf(2, 3)
 1040 int dev_crit(const struct device *dev, const char *fmt, ...);
 1041 extern __printf(2, 3)
 1042 int dev_err(const struct device *dev, const char *fmt, ...);
 1043 extern __printf(2, 3)
 1044 int dev_warn(const struct device *dev, const char *fmt, ...);
 1045 extern __printf(2, 3)
 1046 int dev_notice(const struct device *dev, const char *fmt, ...);
 1047 extern __printf(2, 3)
 1048 int _dev_info(const struct device *dev, const char *fmt, ...);
 1049 
 1050 #else
 1051 
 1052 static inline __printf(3, 0)
 1053 int dev_vprintk_emit(int level, const struct device *dev,
 1054 		     const char *fmt, va_list args)
 1055 { return 0; }
 1056 static inline __printf(3, 4)
 1057 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
 1058 { return 0; }
 1059 
 1060 static inline int __dev_printk(const char *level, const struct device *dev,
 1061 			       struct va_format *vaf)
 1062 { return 0; }
 1063 static inline __printf(3, 4)
 1064 int dev_printk(const char *level, const struct device *dev,
 1065 	       const char *fmt, ...)
 1066 { return 0; }
 1067 
 1068 static inline __printf(2, 3)
 1069 int dev_emerg(const struct device *dev, const char *fmt, ...)
 1070 { return 0; }
 1071 static inline __printf(2, 3)
 1072 int dev_crit(const struct device *dev, const char *fmt, ...)
 1073 { return 0; }
 1074 static inline __printf(2, 3)
 1075 int dev_alert(const struct device *dev, const char *fmt, ...)
 1076 { return 0; }
 1077 static inline __printf(2, 3)
 1078 int dev_err(const struct device *dev, const char *fmt, ...)
 1079 { return 0; }
 1080 static inline __printf(2, 3)
 1081 int dev_warn(const struct device *dev, const char *fmt, ...)
 1082 { return 0; }
 1083 static inline __printf(2, 3)
 1084 int dev_notice(const struct device *dev, const char *fmt, ...)
 1085 { return 0; }
 1086 static inline __printf(2, 3)
 1087 int _dev_info(const struct device *dev, const char *fmt, ...)
 1088 { return 0; }
 1089 
 1090 #endif
 1091 
 1092 /*
 1093  * Stupid hackaround for existing uses of non-printk uses dev_info
 1094  *
 1095  * Note that the definition of dev_info below is actually _dev_info
 1096  * and a macro is used to avoid redefining dev_info
 1097  */
 1098 
 1099 #define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
 1100 
 1101 #if defined(CONFIG_DYNAMIC_DEBUG)
 1102 #define dev_dbg(dev, format, ...)		     \
 1103 do {						     \
 1104 	dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
 1105 } while (0)
 1106 #elif defined(DEBUG)
 1107 #define dev_dbg(dev, format, arg...)		\
 1108 	dev_printk(KERN_DEBUG, dev, format, ##arg)
 1109 #else
 1110 #define dev_dbg(dev, format, arg...)				\
 1111 ({								\
 1112 	if (0)							\
 1113 		dev_printk(KERN_DEBUG, dev, format, ##arg);	\
 1114 	0;							\
 1115 })
 1116 #endif
 1117 
 1118 #define dev_level_ratelimited(dev_level, dev, fmt, ...)			\
 1119 do {									\
 1120 	static DEFINE_RATELIMIT_STATE(_rs,				\
 1121 				      DEFAULT_RATELIMIT_INTERVAL,	\
 1122 				      DEFAULT_RATELIMIT_BURST);		\
 1123 	if (__ratelimit(&_rs))						\
 1124 		dev_level(dev, fmt, ##__VA_ARGS__);			\
 1125 } while (0)
 1126 
 1127 #define dev_emerg_ratelimited(dev, fmt, ...)				\
 1128 	dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
 1129 #define dev_alert_ratelimited(dev, fmt, ...)				\
 1130 	dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
 1131 #define dev_crit_ratelimited(dev, fmt, ...)				\
 1132 	dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
 1133 #define dev_err_ratelimited(dev, fmt, ...)				\
 1134 	dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
 1135 #define dev_warn_ratelimited(dev, fmt, ...)				\
 1136 	dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
 1137 #define dev_notice_ratelimited(dev, fmt, ...)				\
 1138 	dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
 1139 #define dev_info_ratelimited(dev, fmt, ...)				\
 1140 	dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
 1141 #if defined(CONFIG_DYNAMIC_DEBUG)
 1142 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
 1143 #define dev_dbg_ratelimited(dev, fmt, ...)				\
 1144 do {									\
 1145 	static DEFINE_RATELIMIT_STATE(_rs,				\
 1146 				      DEFAULT_RATELIMIT_INTERVAL,	\
 1147 				      DEFAULT_RATELIMIT_BURST);		\
 1148 	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);			\
 1149 	if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&	\
 1150 	    __ratelimit(&_rs))						\
 1151 		__dynamic_dev_dbg(&descriptor, dev, fmt,		\
 1152 				  ##__VA_ARGS__);			\
 1153 } while (0)
 1154 #elif defined(DEBUG)
 1155 #define dev_dbg_ratelimited(dev, fmt, ...)				\
 1156 do {									\
 1157 	static DEFINE_RATELIMIT_STATE(_rs,				\
 1158 				      DEFAULT_RATELIMIT_INTERVAL,	\
 1159 				      DEFAULT_RATELIMIT_BURST);		\
 1160 	if (__ratelimit(&_rs))						\
 1161 		dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);	\
 1162 } while (0)
 1163 #else
 1164 #define dev_dbg_ratelimited(dev, fmt, ...)			\
 1165 	no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 1166 #endif
 1167 
 1168 #ifdef VERBOSE_DEBUG
 1169 #define dev_vdbg	dev_dbg
 1170 #else
 1171 #define dev_vdbg(dev, format, arg...)				\
 1172 ({								\
 1173 	if (0)							\
 1174 		dev_printk(KERN_DEBUG, dev, format, ##arg);	\
 1175 	0;							\
 1176 })
 1177 #endif
 1178 
 1179 /*
 1180  * dev_WARN*() acts like dev_printk(), but with the key difference of
 1181  * using WARN/WARN_ONCE to include file/line information and a backtrace.
 1182  */
 1183 #define dev_WARN(dev, format, arg...) \
 1184 	WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
 1185 
 1186 #define dev_WARN_ONCE(dev, condition, format, arg...) \
 1187 	WARN_ONCE(condition, "%s %s: " format, \
 1188 			dev_driver_string(dev), dev_name(dev), ## arg)
 1189 
 1190 /* Create alias, so I can be autoloaded. */
 1191 #define MODULE_ALIAS_CHARDEV(major,minor) \
 1192 	MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
 1193 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \
 1194 	MODULE_ALIAS("char-major-" __stringify(major) "-*")
 1195 
 1196 #ifdef CONFIG_SYSFS_DEPRECATED
 1197 extern long sysfs_deprecated;
 1198 #else
 1199 #define sysfs_deprecated 0
 1200 #endif
 1201 
 1202 /**
 1203  * module_driver() - Helper macro for drivers that don't do anything
 1204  * special in module init/exit. This eliminates a lot of boilerplate.
 1205  * Each module may only use this macro once, and calling it replaces
 1206  * module_init() and module_exit().
 1207  *
 1208  * @__driver: driver name
 1209  * @__register: register function for this driver type
 1210  * @__unregister: unregister function for this driver type
 1211  * @...: Additional arguments to be passed to __register and __unregister.
 1212  *
 1213  * Use this macro to construct bus specific macros for registering
 1214  * drivers, and do not use it on its own.
 1215  */
 1216 #define module_driver(__driver, __register, __unregister, ...) \
 1217 static int __init __driver##_init(void) \
 1218 { \
 1219 	return __register(&(__driver) , ##__VA_ARGS__); \
 1220 } \
 1221 module_init(__driver##_init); \
 1222 static void __exit __driver##_exit(void) \
 1223 { \
 1224 	__unregister(&(__driver) , ##__VA_ARGS__); \
 1225 } \
 1226 module_exit(__driver##_exit);
 1227 
 1228 #endif /* _DEVICE_H_ */                 1 /*
    2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
    3  *		operating system.  INET is implemented using the  BSD Socket
    4  *		interface as the means of communication with the user level.
    5  *
    6  *		Definitions for the Interfaces handler.
    7  *
    8  * Version:	@(#)dev.h	1.0.10	08/12/93
    9  *
   10  * Authors:	Ross Biro
   11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
   12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
   13  *		Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
   14  *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
   15  *		Bjorn Ekwall. <bj0rn@blox.se>
   16  *              Pekka Riikonen <priikone@poseidon.pspt.fi>
   17  *
   18  *		This program is free software; you can redistribute it and/or
   19  *		modify it under the terms of the GNU General Public License
   20  *		as published by the Free Software Foundation; either version
   21  *		2 of the License, or (at your option) any later version.
   22  *
   23  *		Moved to /usr/include/linux for NET3
   24  */
   25 #ifndef _LINUX_NETDEVICE_H
   26 #define _LINUX_NETDEVICE_H
   27 
   28 #include <linux/pm_qos.h>
   29 #include <linux/timer.h>
   30 #include <linux/bug.h>
   31 #include <linux/delay.h>
   32 #include <linux/atomic.h>
   33 #include <asm/cache.h>
   34 #include <asm/byteorder.h>
   35 
   36 #include <linux/percpu.h>
   37 #include <linux/rculist.h>
   38 #include <linux/dmaengine.h>
   39 #include <linux/workqueue.h>
   40 #include <linux/dynamic_queue_limits.h>
   41 
   42 #include <linux/ethtool.h>
   43 #include <net/net_namespace.h>
   44 #include <net/dsa.h>
   45 #ifdef CONFIG_DCB
   46 #include <net/dcbnl.h>
   47 #endif
   48 #include <net/netprio_cgroup.h>
   49 
   50 #include <linux/netdev_features.h>
   51 #include <linux/neighbour.h>
   52 #include <uapi/linux/netdevice.h>
   53 
   54 struct netpoll_info;
   55 struct device;
   56 struct phy_device;
   57 /* 802.11 specific */
   58 struct wireless_dev;
   59 
   60 void netdev_set_default_ethtool_ops(struct net_device *dev,
   61 				    const struct ethtool_ops *ops);
   62 
   63 /* Backlog congestion levels */
   64 #define NET_RX_SUCCESS		0	/* keep 'em coming, baby */
   65 #define NET_RX_DROP		1	/* packet dropped */
   66 
   67 /*
   68  * Transmit return codes: transmit return codes originate from three different
   69  * namespaces:
   70  *
   71  * - qdisc return codes
   72  * - driver transmit return codes
   73  * - errno values
   74  *
   75  * Drivers are allowed to return any one of those in their hard_start_xmit()
   76  * function. Real network devices commonly used with qdiscs should only return
   77  * the driver transmit return codes though - when qdiscs are used, the actual
   78  * transmission happens asynchronously, so the value is not propagated to
   79  * higher layers. Virtual network devices transmit synchronously, in this case
   80  * the driver transmit return codes are consumed by dev_queue_xmit(), all
   81  * others are propagated to higher layers.
   82  */
   83 
   84 /* qdisc ->enqueue() return codes. */
   85 #define NET_XMIT_SUCCESS	0x00
   86 #define NET_XMIT_DROP		0x01	/* skb dropped			*/
   87 #define NET_XMIT_CN		0x02	/* congestion notification	*/
   88 #define NET_XMIT_POLICED	0x03	/* skb is shot by police	*/
   89 #define NET_XMIT_MASK		0x0f	/* qdisc flags in net/sch_generic.h */
   90 
   91 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
   92  * indicates that the device will soon be dropping packets, or already drops
   93  * some packets of the same priority; prompting us to send less aggressively. */
   94 #define net_xmit_eval(e)	((e) == NET_XMIT_CN ? 0 : (e))
   95 #define net_xmit_errno(e)	((e) != NET_XMIT_CN ? -ENOBUFS : 0)
   96 
   97 /* Driver transmit return codes */
   98 #define NETDEV_TX_MASK		0xf0
   99 
  100 enum netdev_tx {
  101 	__NETDEV_TX_MIN	 = INT_MIN,	/* make sure enum is signed */
  102 	NETDEV_TX_OK	 = 0x00,	/* driver took care of packet */
  103 	NETDEV_TX_BUSY	 = 0x10,	/* driver tx path was busy*/
  104 	NETDEV_TX_LOCKED = 0x20,	/* driver tx lock was already taken */
  105 };
  106 typedef enum netdev_tx netdev_tx_t;
  107 
  108 /*
  109  * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
  110  * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
  111  */
  112 static inline bool dev_xmit_complete(int rc)
  113 {
  114 	/*
  115 	 * Positive cases with an skb consumed by a driver:
  116 	 * - successful transmission (rc == NETDEV_TX_OK)
  117 	 * - error while transmitting (rc < 0)
  118 	 * - error while queueing to a different device (rc & NET_XMIT_MASK)
  119 	 */
  120 	if (likely(rc < NET_XMIT_MASK))
  121 		return true;
  122 
  123 	return false;
  124 }
  125 
  126 /*
  127  *	Compute the worst case header length according to the protocols
  128  *	used.
  129  */
  130 
  131 #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
  132 # if defined(CONFIG_MAC80211_MESH)
  133 #  define LL_MAX_HEADER 128
  134 # else
  135 #  define LL_MAX_HEADER 96
  136 # endif
  137 #else
  138 # define LL_MAX_HEADER 32
  139 #endif
  140 
  141 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
  142     !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
  143 #define MAX_HEADER LL_MAX_HEADER
  144 #else
  145 #define MAX_HEADER (LL_MAX_HEADER + 48)
  146 #endif
  147 
  148 /*
  149  *	Old network device statistics. Fields are native words
  150  *	(unsigned long) so they can be read and written atomically.
  151  */
  152 
  153 struct net_device_stats {
  154 	unsigned long	rx_packets;
  155 	unsigned long	tx_packets;
  156 	unsigned long	rx_bytes;
  157 	unsigned long	tx_bytes;
  158 	unsigned long	rx_errors;
  159 	unsigned long	tx_errors;
  160 	unsigned long	rx_dropped;
  161 	unsigned long	tx_dropped;
  162 	unsigned long	multicast;
  163 	unsigned long	collisions;
  164 	unsigned long	rx_length_errors;
  165 	unsigned long	rx_over_errors;
  166 	unsigned long	rx_crc_errors;
  167 	unsigned long	rx_frame_errors;
  168 	unsigned long	rx_fifo_errors;
  169 	unsigned long	rx_missed_errors;
  170 	unsigned long	tx_aborted_errors;
  171 	unsigned long	tx_carrier_errors;
  172 	unsigned long	tx_fifo_errors;
  173 	unsigned long	tx_heartbeat_errors;
  174 	unsigned long	tx_window_errors;
  175 	unsigned long	rx_compressed;
  176 	unsigned long	tx_compressed;
  177 };
  178 
  179 
  180 #include <linux/cache.h>
  181 #include <linux/skbuff.h>
  182 
  183 #ifdef CONFIG_RPS
  184 #include <linux/static_key.h>
  185 extern struct static_key rps_needed;
  186 #endif
  187 
  188 struct neighbour;
  189 struct neigh_parms;
  190 struct sk_buff;
  191 
  192 struct netdev_hw_addr {
  193 	struct list_head	list;
  194 	unsigned char		addr[MAX_ADDR_LEN];
  195 	unsigned char		type;
  196 #define NETDEV_HW_ADDR_T_LAN		1
  197 #define NETDEV_HW_ADDR_T_SAN		2
  198 #define NETDEV_HW_ADDR_T_SLAVE		3
  199 #define NETDEV_HW_ADDR_T_UNICAST	4
  200 #define NETDEV_HW_ADDR_T_MULTICAST	5
  201 	bool			global_use;
  202 	int			sync_cnt;
  203 	int			refcount;
  204 	int			synced;
  205 	struct rcu_head		rcu_head;
  206 };
  207 
  208 struct netdev_hw_addr_list {
  209 	struct list_head	list;
  210 	int			count;
  211 };
  212 
  213 #define netdev_hw_addr_list_count(l) ((l)->count)
  214 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
  215 #define netdev_hw_addr_list_for_each(ha, l) \
  216 	list_for_each_entry(ha, &(l)->list, list)
  217 
  218 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
  219 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
  220 #define netdev_for_each_uc_addr(ha, dev) \
  221 	netdev_hw_addr_list_for_each(ha, &(dev)->uc)
  222 
  223 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
  224 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
  225 #define netdev_for_each_mc_addr(ha, dev) \
  226 	netdev_hw_addr_list_for_each(ha, &(dev)->mc)
  227 
  228 struct hh_cache {
  229 	u16		hh_len;
  230 	u16		__pad;
  231 	seqlock_t	hh_lock;
  232 
  233 	/* cached hardware header; allow for machine alignment needs.        */
  234 #define HH_DATA_MOD	16
  235 #define HH_DATA_OFF(__len) \
  236 	(HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
  237 #define HH_DATA_ALIGN(__len) \
  238 	(((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
  239 	unsigned long	hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
  240 };
  241 
  242 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
  243  * Alternative is:
  244  *   dev->hard_header_len ? (dev->hard_header_len +
  245  *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
  246  *
  247  * We could use other alignment values, but we must maintain the
  248  * relationship HH alignment <= LL alignment.
  249  */
  250 #define LL_RESERVED_SPACE(dev) \
  251 	((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  252 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
  253 	((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  254 
  255 struct header_ops {
  256 	int	(*create) (struct sk_buff *skb, struct net_device *dev,
  257 			   unsigned short type, const void *daddr,
  258 			   const void *saddr, unsigned int len);
  259 	int	(*parse)(const struct sk_buff *skb, unsigned char *haddr);
  260 	int	(*rebuild)(struct sk_buff *skb);
  261 	int	(*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
  262 	void	(*cache_update)(struct hh_cache *hh,
  263 				const struct net_device *dev,
  264 				const unsigned char *haddr);
  265 };
  266 
  267 /* These flag bits are private to the generic network queueing
  268  * layer, they may not be explicitly referenced by any other
  269  * code.
  270  */
  271 
  272 enum netdev_state_t {
  273 	__LINK_STATE_START,
  274 	__LINK_STATE_PRESENT,
  275 	__LINK_STATE_NOCARRIER,
  276 	__LINK_STATE_LINKWATCH_PENDING,
  277 	__LINK_STATE_DORMANT,
  278 };
  279 
  280 
  281 /*
  282  * This structure holds at boot time configured netdevice settings. They
  283  * are then used in the device probing.
  284  */
  285 struct netdev_boot_setup {
  286 	char name[IFNAMSIZ];
  287 	struct ifmap map;
  288 };
  289 #define NETDEV_BOOT_SETUP_MAX 8
  290 
  291 int __init netdev_boot_setup(char *str);
  292 
  293 /*
  294  * Structure for NAPI scheduling similar to tasklet but with weighting
  295  */
  296 struct napi_struct {
  297 	/* The poll_list must only be managed by the entity which
  298 	 * changes the state of the NAPI_STATE_SCHED bit.  This means
  299 	 * whoever atomically sets that bit can add this napi_struct
  300 	 * to the per-cpu poll_list, and whoever clears that bit
  301 	 * can remove from the list right before clearing the bit.
  302 	 */
  303 	struct list_head	poll_list;
  304 
  305 	unsigned long		state;
  306 	int			weight;
  307 	unsigned int		gro_count;
  308 	int			(*poll)(struct napi_struct *, int);
  309 #ifdef CONFIG_NETPOLL
  310 	spinlock_t		poll_lock;
  311 	int			poll_owner;
  312 #endif
  313 	struct net_device	*dev;
  314 	struct sk_buff		*gro_list;
  315 	struct sk_buff		*skb;
  316 	struct list_head	dev_list;
  317 	struct hlist_node	napi_hash_node;
  318 	unsigned int		napi_id;
  319 };
  320 
  321 enum {
  322 	NAPI_STATE_SCHED,	/* Poll is scheduled */
  323 	NAPI_STATE_DISABLE,	/* Disable pending */
  324 	NAPI_STATE_NPSVC,	/* Netpoll - don't dequeue from poll_list */
  325 	NAPI_STATE_HASHED,	/* In NAPI hash */
  326 };
  327 
  328 enum gro_result {
  329 	GRO_MERGED,
  330 	GRO_MERGED_FREE,
  331 	GRO_HELD,
  332 	GRO_NORMAL,
  333 	GRO_DROP,
  334 };
  335 typedef enum gro_result gro_result_t;
  336 
  337 /*
  338  * enum rx_handler_result - Possible return values for rx_handlers.
  339  * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
  340  * further.
  341  * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
  342  * case skb->dev was changed by rx_handler.
  343  * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
  344  * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
  345  *
  346  * rx_handlers are functions called from inside __netif_receive_skb(), to do
  347  * special processing of the skb, prior to delivery to protocol handlers.
  348  *
  349  * Currently, a net_device can only have a single rx_handler registered. Trying
  350  * to register a second rx_handler will return -EBUSY.
  351  *
  352  * To register a rx_handler on a net_device, use netdev_rx_handler_register().
  353  * To unregister a rx_handler on a net_device, use
  354  * netdev_rx_handler_unregister().
  355  *
  356  * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
  357  * do with the skb.
  358  *
  359  * If the rx_handler consumed to skb in some way, it should return
  360  * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
  361  * the skb to be delivered in some other ways.
  362  *
  363  * If the rx_handler changed skb->dev, to divert the skb to another
  364  * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
  365  * new device will be called if it exists.
  366  *
  367  * If the rx_handler consider the skb should be ignored, it should return
  368  * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
  369  * are registered on exact device (ptype->dev == skb->dev).
  370  *
  371  * If the rx_handler didn't changed skb->dev, but want the skb to be normally
  372  * delivered, it should return RX_HANDLER_PASS.
  373  *
  374  * A device without a registered rx_handler will behave as if rx_handler
  375  * returned RX_HANDLER_PASS.
  376  */
  377 
  378 enum rx_handler_result {
  379 	RX_HANDLER_CONSUMED,
  380 	RX_HANDLER_ANOTHER,
  381 	RX_HANDLER_EXACT,
  382 	RX_HANDLER_PASS,
  383 };
  384 typedef enum rx_handler_result rx_handler_result_t;
  385 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
  386 
  387 void __napi_schedule(struct napi_struct *n);
  388 
  389 static inline bool napi_disable_pending(struct napi_struct *n)
  390 {
  391 	return test_bit(NAPI_STATE_DISABLE, &n->state);
  392 }
  393 
  394 /**
  395  *	napi_schedule_prep - check if napi can be scheduled
  396  *	@n: napi context
  397  *
  398  * Test if NAPI routine is already running, and if not mark
  399  * it as running.  This is used as a condition variable
  400  * insure only one NAPI poll instance runs.  We also make
  401  * sure there is no pending NAPI disable.
  402  */
  403 static inline bool napi_schedule_prep(struct napi_struct *n)
  404 {
  405 	return !napi_disable_pending(n) &&
  406 		!test_and_set_bit(NAPI_STATE_SCHED, &n->state);
  407 }
  408 
  409 /**
  410  *	napi_schedule - schedule NAPI poll
  411  *	@n: napi context
  412  *
  413  * Schedule NAPI poll routine to be called if it is not already
  414  * running.
  415  */
  416 static inline void napi_schedule(struct napi_struct *n)
  417 {
  418 	if (napi_schedule_prep(n))
  419 		__napi_schedule(n);
  420 }
  421 
  422 /* Try to reschedule poll. Called by dev->poll() after napi_complete().  */
  423 static inline bool napi_reschedule(struct napi_struct *napi)
  424 {
  425 	if (napi_schedule_prep(napi)) {
  426 		__napi_schedule(napi);
  427 		return true;
  428 	}
  429 	return false;
  430 }
  431 
  432 /**
  433  *	napi_complete - NAPI processing complete
  434  *	@n: napi context
  435  *
  436  * Mark NAPI processing as complete.
  437  */
  438 void __napi_complete(struct napi_struct *n);
  439 void napi_complete(struct napi_struct *n);
  440 
  441 /**
  442  *	napi_by_id - lookup a NAPI by napi_id
  443  *	@napi_id: hashed napi_id
  444  *
  445  * lookup @napi_id in napi_hash table
  446  * must be called under rcu_read_lock()
  447  */
  448 struct napi_struct *napi_by_id(unsigned int napi_id);
  449 
  450 /**
  451  *	napi_hash_add - add a NAPI to global hashtable
  452  *	@napi: napi context
  453  *
  454  * generate a new napi_id and store a @napi under it in napi_hash
  455  */
  456 void napi_hash_add(struct napi_struct *napi);
  457 
  458 /**
  459  *	napi_hash_del - remove a NAPI from global table
  460  *	@napi: napi context
  461  *
  462  * Warning: caller must observe rcu grace period
  463  * before freeing memory containing @napi
  464  */
  465 void napi_hash_del(struct napi_struct *napi);
  466 
  467 /**
  468  *	napi_disable - prevent NAPI from scheduling
  469  *	@n: napi context
  470  *
  471  * Stop NAPI from being scheduled on this context.
  472  * Waits till any outstanding processing completes.
  473  */
  474 static inline void napi_disable(struct napi_struct *n)
  475 {
  476 	might_sleep();
  477 	set_bit(NAPI_STATE_DISABLE, &n->state);
  478 	while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
  479 		msleep(1);
  480 	clear_bit(NAPI_STATE_DISABLE, &n->state);
  481 }
  482 
  483 /**
  484  *	napi_enable - enable NAPI scheduling
  485  *	@n: napi context
  486  *
  487  * Resume NAPI from being scheduled on this context.
  488  * Must be paired with napi_disable.
  489  */
  490 static inline void napi_enable(struct napi_struct *n)
  491 {
  492 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  493 	smp_mb__before_atomic();
  494 	clear_bit(NAPI_STATE_SCHED, &n->state);
  495 }
  496 
  497 #ifdef CONFIG_SMP
  498 /**
  499  *	napi_synchronize - wait until NAPI is not running
  500  *	@n: napi context
  501  *
  502  * Wait until NAPI is done being scheduled on this context.
  503  * Waits till any outstanding processing completes but
  504  * does not disable future activations.
  505  */
  506 static inline void napi_synchronize(const struct napi_struct *n)
  507 {
  508 	while (test_bit(NAPI_STATE_SCHED, &n->state))
  509 		msleep(1);
  510 }
  511 #else
  512 # define napi_synchronize(n)	barrier()
  513 #endif
  514 
  515 enum netdev_queue_state_t {
  516 	__QUEUE_STATE_DRV_XOFF,
  517 	__QUEUE_STATE_STACK_XOFF,
  518 	__QUEUE_STATE_FROZEN,
  519 };
  520 
  521 #define QUEUE_STATE_DRV_XOFF	(1 << __QUEUE_STATE_DRV_XOFF)
  522 #define QUEUE_STATE_STACK_XOFF	(1 << __QUEUE_STATE_STACK_XOFF)
  523 #define QUEUE_STATE_FROZEN	(1 << __QUEUE_STATE_FROZEN)
  524 
  525 #define QUEUE_STATE_ANY_XOFF	(QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
  526 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
  527 					QUEUE_STATE_FROZEN)
  528 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
  529 					QUEUE_STATE_FROZEN)
  530 
  531 /*
  532  * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
  533  * netif_tx_* functions below are used to manipulate this flag.  The
  534  * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
  535  * queue independently.  The netif_xmit_*stopped functions below are called
  536  * to check if the queue has been stopped by the driver or stack (either
  537  * of the XOFF bits are set in the state).  Drivers should not need to call
  538  * netif_xmit*stopped functions, they should only be using netif_tx_*.
  539  */
  540 
  541 struct netdev_queue {
  542 /*
  543  * read mostly part
  544  */
  545 	struct net_device	*dev;
  546 	struct Qdisc		*qdisc;
  547 	struct Qdisc		*qdisc_sleeping;
  548 #ifdef CONFIG_SYSFS
  549 	struct kobject		kobj;
  550 #endif
  551 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  552 	int			numa_node;
  553 #endif
  554 /*
  555  * write mostly part
  556  */
  557 	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
  558 	int			xmit_lock_owner;
  559 	/*
  560 	 * please use this field instead of dev->trans_start
  561 	 */
  562 	unsigned long		trans_start;
  563 
  564 	/*
  565 	 * Number of TX timeouts for this queue
  566 	 * (/sys/class/net/DEV/Q/trans_timeout)
  567 	 */
  568 	unsigned long		trans_timeout;
  569 
  570 	unsigned long		state;
  571 
  572 #ifdef CONFIG_BQL
  573 	struct dql		dql;
  574 #endif
  575 } ____cacheline_aligned_in_smp;
  576 
  577 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
  578 {
  579 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  580 	return q->numa_node;
  581 #else
  582 	return NUMA_NO_NODE;
  583 #endif
  584 }
  585 
  586 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
  587 {
  588 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  589 	q->numa_node = node;
  590 #endif
  591 }
  592 
  593 #ifdef CONFIG_RPS
  594 /*
  595  * This structure holds an RPS map which can be of variable length.  The
  596  * map is an array of CPUs.
  597  */
  598 struct rps_map {
  599 	unsigned int len;
  600 	struct rcu_head rcu;
  601 	u16 cpus[0];
  602 };
  603 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
  604 
  605 /*
  606  * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
  607  * tail pointer for that CPU's input queue at the time of last enqueue, and
  608  * a hardware filter index.
  609  */
  610 struct rps_dev_flow {
  611 	u16 cpu;
  612 	u16 filter;
  613 	unsigned int last_qtail;
  614 };
  615 #define RPS_NO_FILTER 0xffff
  616 
  617 /*
  618  * The rps_dev_flow_table structure contains a table of flow mappings.
  619  */
  620 struct rps_dev_flow_table {
  621 	unsigned int mask;
  622 	struct rcu_head rcu;
  623 	struct rps_dev_flow flows[0];
  624 };
  625 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
  626     ((_num) * sizeof(struct rps_dev_flow)))
  627 
  628 /*
  629  * The rps_sock_flow_table contains mappings of flows to the last CPU
  630  * on which they were processed by the application (set in recvmsg).
  631  */
  632 struct rps_sock_flow_table {
  633 	unsigned int mask;
  634 	u16 ents[0];
  635 };
  636 #define	RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
  637     ((_num) * sizeof(u16)))
  638 
  639 #define RPS_NO_CPU 0xffff
  640 
  641 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
  642 					u32 hash)
  643 {
  644 	if (table && hash) {
  645 		unsigned int cpu, index = hash & table->mask;
  646 
  647 		/* We only give a hint, preemption can change cpu under us */
  648 		cpu = raw_smp_processor_id();
  649 
  650 		if (table->ents[index] != cpu)
  651 			table->ents[index] = cpu;
  652 	}
  653 }
  654 
  655 static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
  656 				       u32 hash)
  657 {
  658 	if (table && hash)
  659 		table->ents[hash & table->mask] = RPS_NO_CPU;
  660 }
  661 
  662 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
  663 
  664 #ifdef CONFIG_RFS_ACCEL
  665 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
  666 			 u16 filter_id);
  667 #endif
  668 #endif /* CONFIG_RPS */
  669 
  670 /* This structure contains an instance of an RX queue. */
  671 struct netdev_rx_queue {
  672 #ifdef CONFIG_RPS
  673 	struct rps_map __rcu		*rps_map;
  674 	struct rps_dev_flow_table __rcu	*rps_flow_table;
  675 #endif
  676 	struct kobject			kobj;
  677 	struct net_device		*dev;
  678 } ____cacheline_aligned_in_smp;
  679 
  680 /*
  681  * RX queue sysfs structures and functions.
  682  */
  683 struct rx_queue_attribute {
  684 	struct attribute attr;
  685 	ssize_t (*show)(struct netdev_rx_queue *queue,
  686 	    struct rx_queue_attribute *attr, char *buf);
  687 	ssize_t (*store)(struct netdev_rx_queue *queue,
  688 	    struct rx_queue_attribute *attr, const char *buf, size_t len);
  689 };
  690 
  691 #ifdef CONFIG_XPS
  692 /*
  693  * This structure holds an XPS map which can be of variable length.  The
  694  * map is an array of queues.
  695  */
  696 struct xps_map {
  697 	unsigned int len;
  698 	unsigned int alloc_len;
  699 	struct rcu_head rcu;
  700 	u16 queues[0];
  701 };
  702 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
  703 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map))	\
  704     / sizeof(u16))
  705 
  706 /*
  707  * This structure holds all XPS maps for device.  Maps are indexed by CPU.
  708  */
  709 struct xps_dev_maps {
  710 	struct rcu_head rcu;
  711 	struct xps_map __rcu *cpu_map[0];
  712 };
  713 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) +		\
  714     (nr_cpu_ids * sizeof(struct xps_map *)))
  715 #endif /* CONFIG_XPS */
  716 
  717 #define TC_MAX_QUEUE	16
  718 #define TC_BITMASK	15
  719 /* HW offloaded queuing disciplines txq count and offset maps */
  720 struct netdev_tc_txq {
  721 	u16 count;
  722 	u16 offset;
  723 };
  724 
  725 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
  726 /*
  727  * This structure is to hold information about the device
  728  * configured to run FCoE protocol stack.
  729  */
  730 struct netdev_fcoe_hbainfo {
  731 	char	manufacturer[64];
  732 	char	serial_number[64];
  733 	char	hardware_version[64];
  734 	char	driver_version[64];
  735 	char	optionrom_version[64];
  736 	char	firmware_version[64];
  737 	char	model[256];
  738 	char	model_description[256];
  739 };
  740 #endif
  741 
  742 #define MAX_PHYS_PORT_ID_LEN 32
  743 
  744 /* This structure holds a unique identifier to identify the
  745  * physical port used by a netdevice.
  746  */
  747 struct netdev_phys_port_id {
  748 	unsigned char id[MAX_PHYS_PORT_ID_LEN];
  749 	unsigned char id_len;
  750 };
  751 
  752 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  753 				       struct sk_buff *skb);
  754 
  755 /*
  756  * This structure defines the management hooks for network devices.
  757  * The following hooks can be defined; unless noted otherwise, they are
  758  * optional and can be filled with a null pointer.
  759  *
  760  * int (*ndo_init)(struct net_device *dev);
  761  *     This function is called once when network device is registered.
  762  *     The network device can use this to any late stage initializaton
  763  *     or semantic validattion. It can fail with an error code which will
  764  *     be propogated back to register_netdev
  765  *
  766  * void (*ndo_uninit)(struct net_device *dev);
  767  *     This function is called when device is unregistered or when registration
  768  *     fails. It is not called if init fails.
  769  *
  770  * int (*ndo_open)(struct net_device *dev);
  771  *     This function is called when network device transistions to the up
  772  *     state.
  773  *
  774  * int (*ndo_stop)(struct net_device *dev);
  775  *     This function is called when network device transistions to the down
  776  *     state.
  777  *
  778  * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
  779  *                               struct net_device *dev);
  780  *	Called when a packet needs to be transmitted.
  781  *	Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
  782  *        (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
  783  *	Required can not be NULL.
  784  *
  785  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
  786  *                         void *accel_priv, select_queue_fallback_t fallback);
  787  *	Called to decide which queue to when device supports multiple
  788  *	transmit queues.
  789  *
  790  * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
  791  *	This function is called to allow device receiver to make
  792  *	changes to configuration when multicast or promiscious is enabled.
  793  *
  794  * void (*ndo_set_rx_mode)(struct net_device *dev);
  795  *	This function is called device changes address list filtering.
  796  *	If driver handles unicast address filtering, it should set
  797  *	IFF_UNICAST_FLT to its priv_flags.
  798  *
  799  * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
  800  *	This function  is called when the Media Access Control address
  801  *	needs to be changed. If this interface is not defined, the
  802  *	mac address can not be changed.
  803  *
  804  * int (*ndo_validate_addr)(struct net_device *dev);
  805  *	Test if Media Access Control address is valid for the device.
  806  *
  807  * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
  808  *	Called when a user request an ioctl which can't be handled by
  809  *	the generic interface code. If not defined ioctl's return
  810  *	not supported error code.
  811  *
  812  * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
  813  *	Used to set network devices bus interface parameters. This interface
  814  *	is retained for legacy reason, new devices should use the bus
  815  *	interface (PCI) for low level management.
  816  *
  817  * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
  818  *	Called when a user wants to change the Maximum Transfer Unit
  819  *	of a device. If not defined, any request to change MTU will
  820  *	will return an error.
  821  *
  822  * void (*ndo_tx_timeout)(struct net_device *dev);
  823  *	Callback uses when the transmitter has not made any progress
  824  *	for dev->watchdog ticks.
  825  *
  826  * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
  827  *                      struct rtnl_link_stats64 *storage);
  828  * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  829  *	Called when a user wants to get the network device usage
  830  *	statistics. Drivers must do one of the following:
  831  *	1. Define @ndo_get_stats64 to fill in a zero-initialised
  832  *	   rtnl_link_stats64 structure passed by the caller.
  833  *	2. Define @ndo_get_stats to update a net_device_stats structure
  834  *	   (which should normally be dev->stats) and return a pointer to
  835  *	   it. The structure may be changed asynchronously only if each
  836  *	   field is written atomically.
  837  *	3. Update dev->stats asynchronously and atomically, and define
  838  *	   neither operation.
  839  *
  840  * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
  841  *	If device support VLAN filtering this function is called when a
  842  *	VLAN id is registered.
  843  *
  844  * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
  845  *	If device support VLAN filtering this function is called when a
  846  *	VLAN id is unregistered.
  847  *
  848  * void (*ndo_poll_controller)(struct net_device *dev);
  849  *
  850  *	SR-IOV management functions.
  851  * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
  852  * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
  853  * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
  854  *			  int max_tx_rate);
  855  * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
  856  * int (*ndo_get_vf_config)(struct net_device *dev,
  857  *			    int vf, struct ifla_vf_info *ivf);
  858  * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
  859  * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
  860  *			  struct nlattr *port[]);
  861  * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
  862  * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
  863  * 	Called to setup 'tc' number of traffic classes in the net device. This
  864  * 	is always called from the stack with the rtnl lock held and netif tx
  865  * 	queues stopped. This allows the netdevice to perform queue management
  866  * 	safely.
  867  *
  868  *	Fiber Channel over Ethernet (FCoE) offload functions.
  869  * int (*ndo_fcoe_enable)(struct net_device *dev);
  870  *	Called when the FCoE protocol stack wants to start using LLD for FCoE
  871  *	so the underlying device can perform whatever needed configuration or
  872  *	initialization to support acceleration of FCoE traffic.
  873  *
  874  * int (*ndo_fcoe_disable)(struct net_device *dev);
  875  *	Called when the FCoE protocol stack wants to stop using LLD for FCoE
  876  *	so the underlying device can perform whatever needed clean-ups to
  877  *	stop supporting acceleration of FCoE traffic.
  878  *
  879  * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
  880  *			     struct scatterlist *sgl, unsigned int sgc);
  881  *	Called when the FCoE Initiator wants to initialize an I/O that
  882  *	is a possible candidate for Direct Data Placement (DDP). The LLD can
  883  *	perform necessary setup and returns 1 to indicate the device is set up
  884  *	successfully to perform DDP on this I/O, otherwise this returns 0.
  885  *
  886  * int (*ndo_fcoe_ddp_done)(struct net_device *dev,  u16 xid);
  887  *	Called when the FCoE Initiator/Target is done with the DDPed I/O as
  888  *	indicated by the FC exchange id 'xid', so the underlying device can
  889  *	clean up and reuse resources for later DDP requests.
  890  *
  891  * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
  892  *			      struct scatterlist *sgl, unsigned int sgc);
  893  *	Called when the FCoE Target wants to initialize an I/O that
  894  *	is a possible candidate for Direct Data Placement (DDP). The LLD can
  895  *	perform necessary setup and returns 1 to indicate the device is set up
  896  *	successfully to perform DDP on this I/O, otherwise this returns 0.
  897  *
  898  * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
  899  *			       struct netdev_fcoe_hbainfo *hbainfo);
  900  *	Called when the FCoE Protocol stack wants information on the underlying
  901  *	device. This information is utilized by the FCoE protocol stack to
  902  *	register attributes with Fiber Channel management service as per the
  903  *	FC-GS Fabric Device Management Information(FDMI) specification.
  904  *
  905  * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
  906  *	Called when the underlying device wants to override default World Wide
  907  *	Name (WWN) generation mechanism in FCoE protocol stack to pass its own
  908  *	World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
  909  *	protocol stack to use.
  910  *
  911  *	RFS acceleration.
  912  * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
  913  *			    u16 rxq_index, u32 flow_id);
  914  *	Set hardware filter for RFS.  rxq_index is the target queue index;
  915  *	flow_id is a flow ID to be passed to rps_may_expire_flow() later.
  916  *	Return the filter ID on success, or a negative error code.
  917  *
  918  *	Slave management functions (for bridge, bonding, etc).
  919  * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
  920  *	Called to make another netdev an underling.
  921  *
  922  * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
  923  *	Called to release previously enslaved netdev.
  924  *
  925  *      Feature/offload setting functions.
  926  * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
  927  *		netdev_features_t features);
  928  *	Adjusts the requested feature flags according to device-specific
  929  *	constraints, and returns the resulting flags. Must not modify
  930  *	the device state.
  931  *
  932  * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
  933  *	Called to update device configuration to new features. Passed
  934  *	feature set might be less than what was returned by ndo_fix_features()).
  935  *	Must return >0 or -errno if it changed dev->features itself.
  936  *
  937  * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
  938  *		      struct net_device *dev,
  939  *		      const unsigned char *addr, u16 flags)
  940  *	Adds an FDB entry to dev for addr.
  941  * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
  942  *		      struct net_device *dev,
  943  *		      const unsigned char *addr)
  944  *	Deletes the FDB entry from dev coresponding to addr.
  945  * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
  946  *		       struct net_device *dev, int idx)
  947  *	Used to add FDB entries to dump requests. Implementers should add
  948  *	entries to skb and update idx with the number of entries.
  949  *
  950  * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
  951  * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
  952  *			     struct net_device *dev, u32 filter_mask)
  953  *
  954  * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
  955  *	Called to change device carrier. Soft-devices (like dummy, team, etc)
  956  *	which do not represent real hardware may define this to allow their
  957  *	userspace components to manage their virtual carrier state. Devices
  958  *	that determine carrier state from physical hardware properties (eg
  959  *	network cables) or protocol-dependent mechanisms (eg
  960  *	USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
  961  *
  962  * int (*ndo_get_phys_port_id)(struct net_device *dev,
  963  *			       struct netdev_phys_port_id *ppid);
  964  *	Called to get ID of physical port of this device. If driver does
  965  *	not implement this, it is assumed that the hw is not able to have
  966  *	multiple net devices on single physical port.
  967  *
  968  * void (*ndo_add_vxlan_port)(struct  net_device *dev,
  969  *			      sa_family_t sa_family, __be16 port);
  970  *	Called by vxlan to notiy a driver about the UDP port and socket
  971  *	address family that vxlan is listnening to. It is called only when
  972  *	a new port starts listening. The operation is protected by the
  973  *	vxlan_net->sock_lock.
  974  *
  975  * void (*ndo_del_vxlan_port)(struct  net_device *dev,
  976  *			      sa_family_t sa_family, __be16 port);
  977  *	Called by vxlan to notify the driver about a UDP port and socket
  978  *	address family that vxlan is not listening to anymore. The operation
  979  *	is protected by the vxlan_net->sock_lock.
  980  *
  981  * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
  982  *				 struct net_device *dev)
  983  *	Called by upper layer devices to accelerate switching or other
  984  *	station functionality into hardware. 'pdev is the lowerdev
  985  *	to use for the offload and 'dev' is the net device that will
  986  *	back the offload. Returns a pointer to the private structure
  987  *	the upper layer will maintain.
  988  * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
  989  *	Called by upper layer device to delete the station created
  990  *	by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
  991  *	the station and priv is the structure returned by the add
  992  *	operation.
  993  * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
  994  *				      struct net_device *dev,
  995  *				      void *priv);
  996  *	Callback to use for xmit over the accelerated station. This
  997  *	is used in place of ndo_start_xmit on accelerated net
  998  *	devices.
  999  */
 1000 struct net_device_ops {
 1001 	int			(*ndo_init)(struct net_device *dev);
 1002 	void			(*ndo_uninit)(struct net_device *dev);
 1003 	int			(*ndo_open)(struct net_device *dev);
 1004 	int			(*ndo_stop)(struct net_device *dev);
 1005 	netdev_tx_t		(*ndo_start_xmit) (struct sk_buff *skb,
 1006 						   struct net_device *dev);
 1007 	u16			(*ndo_select_queue)(struct net_device *dev,
 1008 						    struct sk_buff *skb,
 1009 						    void *accel_priv,
 1010 						    select_queue_fallback_t fallback);
 1011 	void			(*ndo_change_rx_flags)(struct net_device *dev,
 1012 						       int flags);
 1013 	void			(*ndo_set_rx_mode)(struct net_device *dev);
 1014 	int			(*ndo_set_mac_address)(struct net_device *dev,
 1015 						       void *addr);
 1016 	int			(*ndo_validate_addr)(struct net_device *dev);
 1017 	int			(*ndo_do_ioctl)(struct net_device *dev,
 1018 					        struct ifreq *ifr, int cmd);
 1019 	int			(*ndo_set_config)(struct net_device *dev,
 1020 					          struct ifmap *map);
 1021 	int			(*ndo_change_mtu)(struct net_device *dev,
 1022 						  int new_mtu);
 1023 	int			(*ndo_neigh_setup)(struct net_device *dev,
 1024 						   struct neigh_parms *);
 1025 	void			(*ndo_tx_timeout) (struct net_device *dev);
 1026 
 1027 	struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
 1028 						     struct rtnl_link_stats64 *storage);
 1029 	struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
 1030 
 1031 	int			(*ndo_vlan_rx_add_vid)(struct net_device *dev,
 1032 						       __be16 proto, u16 vid);
 1033 	int			(*ndo_vlan_rx_kill_vid)(struct net_device *dev,
 1034 						        __be16 proto, u16 vid);
 1035 #ifdef CONFIG_NET_POLL_CONTROLLER
 1036 	void                    (*ndo_poll_controller)(struct net_device *dev);
 1037 	int			(*ndo_netpoll_setup)(struct net_device *dev,
 1038 						     struct netpoll_info *info);
 1039 	void			(*ndo_netpoll_cleanup)(struct net_device *dev);
 1040 #endif
 1041 #ifdef CONFIG_NET_RX_BUSY_POLL
 1042 	int			(*ndo_busy_poll)(struct napi_struct *dev);
 1043 #endif
 1044 	int			(*ndo_set_vf_mac)(struct net_device *dev,
 1045 						  int queue, u8 *mac);
 1046 	int			(*ndo_set_vf_vlan)(struct net_device *dev,
 1047 						   int queue, u16 vlan, u8 qos);
 1048 	int			(*ndo_set_vf_rate)(struct net_device *dev,
 1049 						   int vf, int min_tx_rate,
 1050 						   int max_tx_rate);
 1051 	int			(*ndo_set_vf_spoofchk)(struct net_device *dev,
 1052 						       int vf, bool setting);
 1053 	int			(*ndo_get_vf_config)(struct net_device *dev,
 1054 						     int vf,
 1055 						     struct ifla_vf_info *ivf);
 1056 	int			(*ndo_set_vf_link_state)(struct net_device *dev,
 1057 							 int vf, int link_state);
 1058 	int			(*ndo_set_vf_port)(struct net_device *dev,
 1059 						   int vf,
 1060 						   struct nlattr *port[]);
 1061 	int			(*ndo_get_vf_port)(struct net_device *dev,
 1062 						   int vf, struct sk_buff *skb);
 1063 	int			(*ndo_setup_tc)(struct net_device *dev, u8 tc);
 1064 #if IS_ENABLED(CONFIG_FCOE)
 1065 	int			(*ndo_fcoe_enable)(struct net_device *dev);
 1066 	int			(*ndo_fcoe_disable)(struct net_device *dev);
 1067 	int			(*ndo_fcoe_ddp_setup)(struct net_device *dev,
 1068 						      u16 xid,
 1069 						      struct scatterlist *sgl,
 1070 						      unsigned int sgc);
 1071 	int			(*ndo_fcoe_ddp_done)(struct net_device *dev,
 1072 						     u16 xid);
 1073 	int			(*ndo_fcoe_ddp_target)(struct net_device *dev,
 1074 						       u16 xid,
 1075 						       struct scatterlist *sgl,
 1076 						       unsigned int sgc);
 1077 	int			(*ndo_fcoe_get_hbainfo)(struct net_device *dev,
 1078 							struct netdev_fcoe_hbainfo *hbainfo);
 1079 #endif
 1080 
 1081 #if IS_ENABLED(CONFIG_LIBFCOE)
 1082 #define NETDEV_FCOE_WWNN 0
 1083 #define NETDEV_FCOE_WWPN 1
 1084 	int			(*ndo_fcoe_get_wwn)(struct net_device *dev,
 1085 						    u64 *wwn, int type);
 1086 #endif
 1087 
 1088 #ifdef CONFIG_RFS_ACCEL
 1089 	int			(*ndo_rx_flow_steer)(struct net_device *dev,
 1090 						     const struct sk_buff *skb,
 1091 						     u16 rxq_index,
 1092 						     u32 flow_id);
 1093 #endif
 1094 	int			(*ndo_add_slave)(struct net_device *dev,
 1095 						 struct net_device *slave_dev);
 1096 	int			(*ndo_del_slave)(struct net_device *dev,
 1097 						 struct net_device *slave_dev);
 1098 	netdev_features_t	(*ndo_fix_features)(struct net_device *dev,
 1099 						    netdev_features_t features);
 1100 	int			(*ndo_set_features)(struct net_device *dev,
 1101 						    netdev_features_t features);
 1102 	int			(*ndo_neigh_construct)(struct neighbour *n);
 1103 	void			(*ndo_neigh_destroy)(struct neighbour *n);
 1104 
 1105 	int			(*ndo_fdb_add)(struct ndmsg *ndm,
 1106 					       struct nlattr *tb[],
 1107 					       struct net_device *dev,
 1108 					       const unsigned char *addr,
 1109 					       u16 flags);
 1110 	int			(*ndo_fdb_del)(struct ndmsg *ndm,
 1111 					       struct nlattr *tb[],
 1112 					       struct net_device *dev,
 1113 					       const unsigned char *addr);
 1114 	int			(*ndo_fdb_dump)(struct sk_buff *skb,
 1115 						struct netlink_callback *cb,
 1116 						struct net_device *dev,
 1117 						int idx);
 1118 
 1119 	int			(*ndo_bridge_setlink)(struct net_device *dev,
 1120 						      struct nlmsghdr *nlh);
 1121 	int			(*ndo_bridge_getlink)(struct sk_buff *skb,
 1122 						      u32 pid, u32 seq,
 1123 						      struct net_device *dev,
 1124 						      u32 filter_mask);
 1125 	int			(*ndo_bridge_dellink)(struct net_device *dev,
 1126 						      struct nlmsghdr *nlh);
 1127 	int			(*ndo_change_carrier)(struct net_device *dev,
 1128 						      bool new_carrier);
 1129 	int			(*ndo_get_phys_port_id)(struct net_device *dev,
 1130 							struct netdev_phys_port_id *ppid);
 1131 	void			(*ndo_add_vxlan_port)(struct  net_device *dev,
 1132 						      sa_family_t sa_family,
 1133 						      __be16 port);
 1134 	void			(*ndo_del_vxlan_port)(struct  net_device *dev,
 1135 						      sa_family_t sa_family,
 1136 						      __be16 port);
 1137 
 1138 	void*			(*ndo_dfwd_add_station)(struct net_device *pdev,
 1139 							struct net_device *dev);
 1140 	void			(*ndo_dfwd_del_station)(struct net_device *pdev,
 1141 							void *priv);
 1142 
 1143 	netdev_tx_t		(*ndo_dfwd_start_xmit) (struct sk_buff *skb,
 1144 							struct net_device *dev,
 1145 							void *priv);
 1146 	int			(*ndo_get_lock_subclass)(struct net_device *dev);
 1147 };
 1148 
 1149 /**
 1150  * enum net_device_priv_flags - &struct net_device priv_flags
 1151  *
 1152  * These are the &struct net_device, they are only set internally
 1153  * by drivers and used in the kernel. These flags are invisible to
 1154  * userspace, this means that the order of these flags can change
 1155  * during any kernel release.
 1156  *
 1157  * You should have a pretty good reason to be extending these flags.
 1158  *
 1159  * @IFF_802_1Q_VLAN: 802.1Q VLAN device
 1160  * @IFF_EBRIDGE: Ethernet bridging device
 1161  * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
 1162  * @IFF_MASTER_8023AD: bonding master, 802.3ad
 1163  * @IFF_MASTER_ALB: bonding master, balance-alb
 1164  * @IFF_BONDING: bonding master or slave
 1165  * @IFF_SLAVE_NEEDARP: need ARPs for validation
 1166  * @IFF_ISATAP: ISATAP interface (RFC4214)
 1167  * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
 1168  * @IFF_WAN_HDLC: WAN HDLC device
 1169  * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
 1170  *	release skb->dst
 1171  * @IFF_DONT_BRIDGE: disallow bridging this ether dev
 1172  * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
 1173  * @IFF_MACVLAN_PORT: device used as macvlan port
 1174  * @IFF_BRIDGE_PORT: device used as bridge port
 1175  * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
 1176  * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
 1177  * @IFF_UNICAST_FLT: Supports unicast filtering
 1178  * @IFF_TEAM_PORT: device used as team port
 1179  * @IFF_SUPP_NOFCS: device supports sending custom FCS
 1180  * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
 1181  *	change when it's running
 1182  * @IFF_MACVLAN: Macvlan device
 1183  */
 1184 enum netdev_priv_flags {
 1185 	IFF_802_1Q_VLAN			= 1<<0,
 1186 	IFF_EBRIDGE			= 1<<1,
 1187 	IFF_SLAVE_INACTIVE		= 1<<2,
 1188 	IFF_MASTER_8023AD		= 1<<3,
 1189 	IFF_MASTER_ALB			= 1<<4,
 1190 	IFF_BONDING			= 1<<5,
 1191 	IFF_SLAVE_NEEDARP		= 1<<6,
 1192 	IFF_ISATAP			= 1<<7,
 1193 	IFF_MASTER_ARPMON		= 1<<8,
 1194 	IFF_WAN_HDLC			= 1<<9,
 1195 	IFF_XMIT_DST_RELEASE		= 1<<10,
 1196 	IFF_DONT_BRIDGE			= 1<<11,
 1197 	IFF_DISABLE_NETPOLL		= 1<<12,
 1198 	IFF_MACVLAN_PORT		= 1<<13,
 1199 	IFF_BRIDGE_PORT			= 1<<14,
 1200 	IFF_OVS_DATAPATH		= 1<<15,
 1201 	IFF_TX_SKB_SHARING		= 1<<16,
 1202 	IFF_UNICAST_FLT			= 1<<17,
 1203 	IFF_TEAM_PORT			= 1<<18,
 1204 	IFF_SUPP_NOFCS			= 1<<19,
 1205 	IFF_LIVE_ADDR_CHANGE		= 1<<20,
 1206 	IFF_MACVLAN			= 1<<21,
 1207 };
 1208 
 1209 #define IFF_802_1Q_VLAN			IFF_802_1Q_VLAN
 1210 #define IFF_EBRIDGE			IFF_EBRIDGE
 1211 #define IFF_SLAVE_INACTIVE		IFF_SLAVE_INACTIVE
 1212 #define IFF_MASTER_8023AD		IFF_MASTER_8023AD
 1213 #define IFF_MASTER_ALB			IFF_MASTER_ALB
 1214 #define IFF_BONDING			IFF_BONDING
 1215 #define IFF_SLAVE_NEEDARP		IFF_SLAVE_NEEDARP
 1216 #define IFF_ISATAP			IFF_ISATAP
 1217 #define IFF_MASTER_ARPMON		IFF_MASTER_ARPMON
 1218 #define IFF_WAN_HDLC			IFF_WAN_HDLC
 1219 #define IFF_XMIT_DST_RELEASE		IFF_XMIT_DST_RELEASE
 1220 #define IFF_DONT_BRIDGE			IFF_DONT_BRIDGE
 1221 #define IFF_DISABLE_NETPOLL		IFF_DISABLE_NETPOLL
 1222 #define IFF_MACVLAN_PORT		IFF_MACVLAN_PORT
 1223 #define IFF_BRIDGE_PORT			IFF_BRIDGE_PORT
 1224 #define IFF_OVS_DATAPATH		IFF_OVS_DATAPATH
 1225 #define IFF_TX_SKB_SHARING		IFF_TX_SKB_SHARING
 1226 #define IFF_UNICAST_FLT			IFF_UNICAST_FLT
 1227 #define IFF_TEAM_PORT			IFF_TEAM_PORT
 1228 #define IFF_SUPP_NOFCS			IFF_SUPP_NOFCS
 1229 #define IFF_LIVE_ADDR_CHANGE		IFF_LIVE_ADDR_CHANGE
 1230 #define IFF_MACVLAN			IFF_MACVLAN
 1231 
 1232 /*
 1233  *	The DEVICE structure.
 1234  *	Actually, this whole structure is a big mistake.  It mixes I/O
 1235  *	data with strictly "high-level" data, and it has to know about
 1236  *	almost every data structure used in the INET module.
 1237  *
 1238  *	FIXME: cleanup struct net_device such that network protocol info
 1239  *	moves out.
 1240  */
 1241 
 1242 struct net_device {
 1243 
 1244 	/*
 1245 	 * This is the first field of the "visible" part of this structure
 1246 	 * (i.e. as seen by users in the "Space.c" file).  It is the name
 1247 	 * of the interface.
 1248 	 */
 1249 	char			name[IFNAMSIZ];
 1250 
 1251 	/* device name hash chain, please keep it close to name[] */
 1252 	struct hlist_node	name_hlist;
 1253 
 1254 	/* snmp alias */
 1255 	char 			*ifalias;
 1256 
 1257 	/*
 1258 	 *	I/O specific fields
 1259 	 *	FIXME: Merge these and struct ifmap into one
 1260 	 */
 1261 	unsigned long		mem_end;	/* shared mem end	*/
 1262 	unsigned long		mem_start;	/* shared mem start	*/
 1263 	unsigned long		base_addr;	/* device I/O address	*/
 1264 	int			irq;		/* device IRQ number	*/
 1265 
 1266 	/*
 1267 	 *	Some hardware also needs these fields, but they are not
 1268 	 *	part of the usual set specified in Space.c.
 1269 	 */
 1270 
 1271 	unsigned long		state;
 1272 
 1273 	struct list_head	dev_list;
 1274 	struct list_head	napi_list;
 1275 	struct list_head	unreg_list;
 1276 	struct list_head	close_list;
 1277 
 1278 	/* directly linked devices, like slaves for bonding */
 1279 	struct {
 1280 		struct list_head upper;
 1281 		struct list_head lower;
 1282 	} adj_list;
 1283 
 1284 	/* all linked devices, *including* neighbours */
 1285 	struct {
 1286 		struct list_head upper;
 1287 		struct list_head lower;
 1288 	} all_adj_list;
 1289 
 1290 
 1291 	/* currently active device features */
 1292 	netdev_features_t	features;
 1293 	/* user-changeable features */
 1294 	netdev_features_t	hw_features;
 1295 	/* user-requested features */
 1296 	netdev_features_t	wanted_features;
 1297 	/* mask of features inheritable by VLAN devices */
 1298 	netdev_features_t	vlan_features;
 1299 	/* mask of features inherited by encapsulating devices
 1300 	 * This field indicates what encapsulation offloads
 1301 	 * the hardware is capable of doing, and drivers will
 1302 	 * need to set them appropriately.
 1303 	 */
 1304 	netdev_features_t	hw_enc_features;
 1305 	/* mask of fetures inheritable by MPLS */
 1306 	netdev_features_t	mpls_features;
 1307 
 1308 	/* Interface index. Unique device identifier	*/
 1309 	int			ifindex;
 1310 	int			iflink;
 1311 
 1312 	struct net_device_stats	stats;
 1313 
 1314 	/* dropped packets by core network, Do not use this in drivers */
 1315 	atomic_long_t		rx_dropped;
 1316 	atomic_long_t		tx_dropped;
 1317 
 1318 	/* Stats to monitor carrier on<->off transitions */
 1319 	atomic_t		carrier_changes;
 1320 
 1321 #ifdef CONFIG_WIRELESS_EXT
 1322 	/* List of functions to handle Wireless Extensions (instead of ioctl).
 1323 	 * See <net/iw_handler.h> for details. Jean II */
 1324 	const struct iw_handler_def *	wireless_handlers;
 1325 	/* Instance data managed by the core of Wireless Extensions. */
 1326 	struct iw_public_data *	wireless_data;
 1327 #endif
 1328 	/* Management operations */
 1329 	const struct net_device_ops *netdev_ops;
 1330 	const struct ethtool_ops *ethtool_ops;
 1331 	const struct forwarding_accel_ops *fwd_ops;
 1332 
 1333 	/* Hardware header description */
 1334 	const struct header_ops *header_ops;
 1335 
 1336 	unsigned int		flags;	/* interface flags (a la BSD)	*/
 1337 	unsigned int		priv_flags; /* Like 'flags' but invisible to userspace.
 1338 					     * See if.h for definitions. */
 1339 	unsigned short		gflags;
 1340 	unsigned short		padded;	/* How much padding added by alloc_netdev() */
 1341 
 1342 	unsigned char		operstate; /* RFC2863 operstate */
 1343 	unsigned char		link_mode; /* mapping policy to operstate */
 1344 
 1345 	unsigned char		if_port;	/* Selectable AUI, TP,..*/
 1346 	unsigned char		dma;		/* DMA channel		*/
 1347 
 1348 	unsigned int		mtu;	/* interface MTU value		*/
 1349 	unsigned short		type;	/* interface hardware type	*/
 1350 	unsigned short		hard_header_len;	/* hardware hdr length	*/
 1351 
 1352 	/* extra head- and tailroom the hardware may need, but not in all cases
 1353 	 * can this be guaranteed, especially tailroom. Some cases also use
 1354 	 * LL_MAX_HEADER instead to allocate the skb.
 1355 	 */
 1356 	unsigned short		needed_headroom;
 1357 	unsigned short		needed_tailroom;
 1358 
 1359 	/* Interface address info. */
 1360 	unsigned char		perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
 1361 	unsigned char		addr_assign_type; /* hw address assignment type */
 1362 	unsigned char		addr_len;	/* hardware address length	*/
 1363 	unsigned short		neigh_priv_len;
 1364 	unsigned short          dev_id;		/* Used to differentiate devices
 1365 						 * that share the same link
 1366 						 * layer address
 1367 						 */
 1368 	unsigned short          dev_port;	/* Used to differentiate
 1369 						 * devices that share the same
 1370 						 * function
 1371 						 */
 1372 	spinlock_t		addr_list_lock;
 1373 	struct netdev_hw_addr_list	uc;	/* Unicast mac addresses */
 1374 	struct netdev_hw_addr_list	mc;	/* Multicast mac addresses */
 1375 	struct netdev_hw_addr_list	dev_addrs; /* list of device
 1376 						    * hw addresses
 1377 						    */
 1378 #ifdef CONFIG_SYSFS
 1379 	struct kset		*queues_kset;
 1380 #endif
 1381 
 1382 	bool			uc_promisc;
 1383 	unsigned int		promiscuity;
 1384 	unsigned int		allmulti;
 1385 
 1386 
 1387 	/* Protocol specific pointers */
 1388 
 1389 #if IS_ENABLED(CONFIG_VLAN_8021Q)
 1390 	struct vlan_info __rcu	*vlan_info;	/* VLAN info */
 1391 #endif
 1392 #if IS_ENABLED(CONFIG_NET_DSA)
 1393 	struct dsa_switch_tree	*dsa_ptr;	/* dsa specific data */
 1394 #endif
 1395 #if IS_ENABLED(CONFIG_TIPC)
 1396 	struct tipc_bearer __rcu *tipc_ptr;	/* TIPC specific data */
 1397 #endif
 1398 	void 			*atalk_ptr;	/* AppleTalk link 	*/
 1399 	struct in_device __rcu	*ip_ptr;	/* IPv4 specific data	*/
 1400 	struct dn_dev __rcu     *dn_ptr;        /* DECnet specific data */
 1401 	struct inet6_dev __rcu	*ip6_ptr;       /* IPv6 specific data */
 1402 	void			*ax25_ptr;	/* AX.25 specific data */
 1403 	struct wireless_dev	*ieee80211_ptr;	/* IEEE 802.11 specific data,
 1404 						   assign before registering */
 1405 
 1406 /*
 1407  * Cache lines mostly used on receive path (including eth_type_trans())
 1408  */
 1409 	unsigned long		last_rx;	/* Time of last Rx */
 1410 
 1411 	/* Interface address info used in eth_type_trans() */
 1412 	unsigned char		*dev_addr;	/* hw address, (before bcast
 1413 						   because most packets are
 1414 						   unicast) */
 1415 
 1416 
 1417 #ifdef CONFIG_SYSFS
 1418 	struct netdev_rx_queue	*_rx;
 1419 
 1420 	/* Number of RX queues allocated at register_netdev() time */
 1421 	unsigned int		num_rx_queues;
 1422 
 1423 	/* Number of RX queues currently active in device */
 1424 	unsigned int		real_num_rx_queues;
 1425 
 1426 #endif
 1427 
 1428 	rx_handler_func_t __rcu	*rx_handler;
 1429 	void __rcu		*rx_handler_data;
 1430 
 1431 	struct netdev_queue __rcu *ingress_queue;
 1432 	unsigned char		broadcast[MAX_ADDR_LEN];	/* hw bcast add	*/
 1433 
 1434 
 1435 /*
 1436  * Cache lines mostly used on transmit path
 1437  */
 1438 	struct netdev_queue	*_tx ____cacheline_aligned_in_smp;
 1439 
 1440 	/* Number of TX queues allocated at alloc_netdev_mq() time  */
 1441 	unsigned int		num_tx_queues;
 1442 
 1443 	/* Number of TX queues currently active in device  */
 1444 	unsigned int		real_num_tx_queues;
 1445 
 1446 	/* root qdisc from userspace point of view */
 1447 	struct Qdisc		*qdisc;
 1448 
 1449 	unsigned long		tx_queue_len;	/* Max frames per queue allowed */
 1450 	spinlock_t		tx_global_lock;
 1451 
 1452 #ifdef CONFIG_XPS
 1453 	struct xps_dev_maps __rcu *xps_maps;
 1454 #endif
 1455 #ifdef CONFIG_RFS_ACCEL
 1456 	/* CPU reverse-mapping for RX completion interrupts, indexed
 1457 	 * by RX queue number.  Assigned by driver.  This must only be
 1458 	 * set if the ndo_rx_flow_steer operation is defined. */
 1459 	struct cpu_rmap		*rx_cpu_rmap;
 1460 #endif
 1461 
 1462 	/* These may be needed for future network-power-down code. */
 1463 
 1464 	/*
 1465 	 * trans_start here is expensive for high speed devices on SMP,
 1466 	 * please use netdev_queue->trans_start instead.
 1467 	 */
 1468 	unsigned long		trans_start;	/* Time (in jiffies) of last Tx	*/
 1469 
 1470 	int			watchdog_timeo; /* used by dev_watchdog() */
 1471 	struct timer_list	watchdog_timer;
 1472 
 1473 	/* Number of references to this device */
 1474 	int __percpu		*pcpu_refcnt;
 1475 
 1476 	/* delayed register/unregister */
 1477 	struct list_head	todo_list;
 1478 	/* device index hash chain */
 1479 	struct hlist_node	index_hlist;
 1480 
 1481 	struct list_head	link_watch_list;
 1482 
 1483 	/* register/unregister state machine */
 1484 	enum { NETREG_UNINITIALIZED=0,
 1485 	       NETREG_REGISTERED,	/* completed register_netdevice */
 1486 	       NETREG_UNREGISTERING,	/* called unregister_netdevice */
 1487 	       NETREG_UNREGISTERED,	/* completed unregister todo */
 1488 	       NETREG_RELEASED,		/* called free_netdev */
 1489 	       NETREG_DUMMY,		/* dummy device for NAPI poll */
 1490 	} reg_state:8;
 1491 
 1492 	bool dismantle; /* device is going do be freed */
 1493 
 1494 	enum {
 1495 		RTNL_LINK_INITIALIZED,
 1496 		RTNL_LINK_INITIALIZING,
 1497 	} rtnl_link_state:16;
 1498 
 1499 	/* Called from unregister, can be used to call free_netdev */
 1500 	void (*destructor)(struct net_device *dev);
 1501 
 1502 #ifdef CONFIG_NETPOLL
 1503 	struct netpoll_info __rcu	*npinfo;
 1504 #endif
 1505 
 1506 #ifdef CONFIG_NET_NS
 1507 	/* Network namespace this network device is inside */
 1508 	struct net		*nd_net;
 1509 #endif
 1510 
 1511 	/* mid-layer private */
 1512 	union {
 1513 		void				*ml_priv;
 1514 		struct pcpu_lstats __percpu	*lstats; /* loopback stats */
 1515 		struct pcpu_sw_netstats __percpu	*tstats;
 1516 		struct pcpu_dstats __percpu	*dstats; /* dummy stats */
 1517 		struct pcpu_vstats __percpu	*vstats; /* veth stats */
 1518 	};
 1519 	/* GARP */
 1520 	struct garp_port __rcu	*garp_port;
 1521 	/* MRP */
 1522 	struct mrp_port __rcu	*mrp_port;
 1523 
 1524 	/* class/net/name entry */
 1525 	struct device		dev;
 1526 	/* space for optional device, statistics, and wireless sysfs groups */
 1527 	const struct attribute_group *sysfs_groups[4];
 1528 	/* space for optional per-rx queue attributes */
 1529 	const struct attribute_group *sysfs_rx_queue_group;
 1530 
 1531 	/* rtnetlink link ops */
 1532 	const struct rtnl_link_ops *rtnl_link_ops;
 1533 
 1534 	/* for setting kernel sock attribute on TCP connection setup */
 1535 #define GSO_MAX_SIZE		65536
 1536 	unsigned int		gso_max_size;
 1537 #define GSO_MAX_SEGS		65535
 1538 	u16			gso_max_segs;
 1539 
 1540 #ifdef CONFIG_DCB
 1541 	/* Data Center Bridging netlink ops */
 1542 	const struct dcbnl_rtnl_ops *dcbnl_ops;
 1543 #endif
 1544 	u8 num_tc;
 1545 	struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
 1546 	u8 prio_tc_map[TC_BITMASK + 1];
 1547 
 1548 #if IS_ENABLED(CONFIG_FCOE)
 1549 	/* max exchange id for FCoE LRO by ddp */
 1550 	unsigned int		fcoe_ddp_xid;
 1551 #endif
 1552 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
 1553 	struct netprio_map __rcu *priomap;
 1554 #endif
 1555 	/* phy device may attach itself for hardware timestamping */
 1556 	struct phy_device *phydev;
 1557 
 1558 	struct lock_class_key *qdisc_tx_busylock;
 1559 
 1560 	/* group the device belongs to */
 1561 	int group;
 1562 
 1563 	struct pm_qos_request	pm_qos_req;
 1564 };
 1565 #define to_net_dev(d) container_of(d, struct net_device, dev)
 1566 
 1567 #define	NETDEV_ALIGN		32
 1568 
 1569 static inline
 1570 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
 1571 {
 1572 	return dev->prio_tc_map[prio & TC_BITMASK];
 1573 }
 1574 
 1575 static inline
 1576 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
 1577 {
 1578 	if (tc >= dev->num_tc)
 1579 		return -EINVAL;
 1580 
 1581 	dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
 1582 	return 0;
 1583 }
 1584 
 1585 static inline
 1586 void netdev_reset_tc(struct net_device *dev)
 1587 {
 1588 	dev->num_tc = 0;
 1589 	memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
 1590 	memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
 1591 }
 1592 
 1593 static inline
 1594 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
 1595 {
 1596 	if (tc >= dev->num_tc)
 1597 		return -EINVAL;
 1598 
 1599 	dev->tc_to_txq[tc].count = count;
 1600 	dev->tc_to_txq[tc].offset = offset;
 1601 	return 0;
 1602 }
 1603 
 1604 static inline
 1605 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
 1606 {
 1607 	if (num_tc > TC_MAX_QUEUE)
 1608 		return -EINVAL;
 1609 
 1610 	dev->num_tc = num_tc;
 1611 	return 0;
 1612 }
 1613 
 1614 static inline
 1615 int netdev_get_num_tc(struct net_device *dev)
 1616 {
 1617 	return dev->num_tc;
 1618 }
 1619 
 1620 static inline
 1621 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
 1622 					 unsigned int index)
 1623 {
 1624 	return &dev->_tx[index];
 1625 }
 1626 
 1627 static inline void netdev_for_each_tx_queue(struct net_device *dev,
 1628 					    void (*f)(struct net_device *,
 1629 						      struct netdev_queue *,
 1630 						      void *),
 1631 					    void *arg)
 1632 {
 1633 	unsigned int i;
 1634 
 1635 	for (i = 0; i < dev->num_tx_queues; i++)
 1636 		f(dev, &dev->_tx[i], arg);
 1637 }
 1638 
 1639 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
 1640 				    struct sk_buff *skb,
 1641 				    void *accel_priv);
 1642 
 1643 /*
 1644  * Net namespace inlines
 1645  */
 1646 static inline
 1647 struct net *dev_net(const struct net_device *dev)
 1648 {
 1649 	return read_pnet(&dev->nd_net);
 1650 }
 1651 
 1652 static inline
 1653 void dev_net_set(struct net_device *dev, struct net *net)
 1654 {
 1655 #ifdef CONFIG_NET_NS
 1656 	release_net(dev->nd_net);
 1657 	dev->nd_net = hold_net(net);
 1658 #endif
 1659 }
 1660 
 1661 static inline bool netdev_uses_dsa_tags(struct net_device *dev)
 1662 {
 1663 #ifdef CONFIG_NET_DSA_TAG_DSA
 1664 	if (dev->dsa_ptr != NULL)
 1665 		return dsa_uses_dsa_tags(dev->dsa_ptr);
 1666 #endif
 1667 
 1668 	return 0;
 1669 }
 1670 
 1671 static inline bool netdev_uses_trailer_tags(struct net_device *dev)
 1672 {
 1673 #ifdef CONFIG_NET_DSA_TAG_TRAILER
 1674 	if (dev->dsa_ptr != NULL)
 1675 		return dsa_uses_trailer_tags(dev->dsa_ptr);
 1676 #endif
 1677 
 1678 	return 0;
 1679 }
 1680 
 1681 /**
 1682  *	netdev_priv - access network device private data
 1683  *	@dev: network device
 1684  *
 1685  * Get network device private data
 1686  */
 1687 static inline void *netdev_priv(const struct net_device *dev)
 1688 {
 1689 	return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
 1690 }
 1691 
 1692 /* Set the sysfs physical device reference for the network logical device
 1693  * if set prior to registration will cause a symlink during initialization.
 1694  */
 1695 #define SET_NETDEV_DEV(net, pdev)	((net)->dev.parent = (pdev))
 1696 
 1697 /* Set the sysfs device type for the network logical device to allow
 1698  * fine-grained identification of different network device types. For
 1699  * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
 1700  */
 1701 #define SET_NETDEV_DEVTYPE(net, devtype)	((net)->dev.type = (devtype))
 1702 
 1703 /* Default NAPI poll() weight
 1704  * Device drivers are strongly advised to not use bigger value
 1705  */
 1706 #define NAPI_POLL_WEIGHT 64
 1707 
 1708 /**
 1709  *	netif_napi_add - initialize a napi context
 1710  *	@dev:  network device
 1711  *	@napi: napi context
 1712  *	@poll: polling function
 1713  *	@weight: default weight
 1714  *
 1715  * netif_napi_add() must be used to initialize a napi context prior to calling
 1716  * *any* of the other napi related functions.
 1717  */
 1718 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
 1719 		    int (*poll)(struct napi_struct *, int), int weight);
 1720 
 1721 /**
 1722  *  netif_napi_del - remove a napi context
 1723  *  @napi: napi context
 1724  *
 1725  *  netif_napi_del() removes a napi context from the network device napi list
 1726  */
 1727 void netif_napi_del(struct napi_struct *napi);
 1728 
 1729 struct napi_gro_cb {
 1730 	/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
 1731 	void *frag0;
 1732 
 1733 	/* Length of frag0. */
 1734 	unsigned int frag0_len;
 1735 
 1736 	/* This indicates where we are processing relative to skb->data. */
 1737 	int data_offset;
 1738 
 1739 	/* This is non-zero if the packet cannot be merged with the new skb. */
 1740 	u16	flush;
 1741 
 1742 	/* Save the IP ID here and check when we get to the transport layer */
 1743 	u16	flush_id;
 1744 
 1745 	/* Number of segments aggregated. */
 1746 	u16	count;
 1747 
 1748 	/* This is non-zero if the packet may be of the same flow. */
 1749 	u8	same_flow;
 1750 
 1751 	/* Free the skb? */
 1752 	u8	free;
 1753 #define NAPI_GRO_FREE		  1
 1754 #define NAPI_GRO_FREE_STOLEN_HEAD 2
 1755 
 1756 	/* jiffies when first packet was created/queued */
 1757 	unsigned long age;
 1758 
 1759 	/* Used in ipv6_gro_receive() */
 1760 	u16	proto;
 1761 
 1762 	/* Used in udp_gro_receive */
 1763 	u16	udp_mark;
 1764 
 1765 	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
 1766 	__wsum	csum;
 1767 
 1768 	/* used in skb_gro_receive() slow path */
 1769 	struct sk_buff *last;
 1770 };
 1771 
 1772 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
 1773 
 1774 struct packet_type {
 1775 	__be16			type;	/* This is really htons(ether_type). */
 1776 	struct net_device	*dev;	/* NULL is wildcarded here	     */
 1777 	int			(*func) (struct sk_buff *,
 1778 					 struct net_device *,
 1779 					 struct packet_type *,
 1780 					 struct net_device *);
 1781 	bool			(*id_match)(struct packet_type *ptype,
 1782 					    struct sock *sk);
 1783 	void			*af_packet_priv;
 1784 	struct list_head	list;
 1785 };
 1786 
 1787 struct offload_callbacks {
 1788 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
 1789 						netdev_features_t features);
 1790 	int			(*gso_send_check)(struct sk_buff *skb);
 1791 	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
 1792 					       struct sk_buff *skb);
 1793 	int			(*gro_complete)(struct sk_buff *skb, int nhoff);
 1794 };
 1795 
 1796 struct packet_offload {
 1797 	__be16			 type;	/* This is really htons(ether_type). */
 1798 	struct offload_callbacks callbacks;
 1799 	struct list_head	 list;
 1800 };
 1801 
 1802 struct udp_offload {
 1803 	__be16			 port;
 1804 	struct offload_callbacks callbacks;
 1805 };
 1806 
 1807 /* often modified stats are per cpu, other are shared (netdev->stats) */
 1808 struct pcpu_sw_netstats {
 1809 	u64     rx_packets;
 1810 	u64     rx_bytes;
 1811 	u64     tx_packets;
 1812 	u64     tx_bytes;
 1813 	struct u64_stats_sync   syncp;
 1814 };
 1815 
 1816 #define netdev_alloc_pcpu_stats(type)				\
 1817 ({								\
 1818 	typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
 1819 	if (pcpu_stats)	{					\
 1820 		int i;						\
 1821 		for_each_possible_cpu(i) {			\
 1822 			typeof(type) *stat;			\
 1823 			stat = per_cpu_ptr(pcpu_stats, i);	\
 1824 			u64_stats_init(&stat->syncp);		\
 1825 		}						\
 1826 	}							\
 1827 	pcpu_stats;						\
 1828 })
 1829 
 1830 #include <linux/notifier.h>
 1831 
 1832 /* netdevice notifier chain. Please remember to update the rtnetlink
 1833  * notification exclusion list in rtnetlink_event() when adding new
 1834  * types.
 1835  */
 1836 #define NETDEV_UP	0x0001	/* For now you can't veto a device up/down */
 1837 #define NETDEV_DOWN	0x0002
 1838 #define NETDEV_REBOOT	0x0003	/* Tell a protocol stack a network interface
 1839 				   detected a hardware crash and restarted
 1840 				   - we can use this eg to kick tcp sessions
 1841 				   once done */
 1842 #define NETDEV_CHANGE	0x0004	/* Notify device state change */
 1843 #define NETDEV_REGISTER 0x0005
 1844 #define NETDEV_UNREGISTER	0x0006
 1845 #define NETDEV_CHANGEMTU	0x0007 /* notify after mtu change happened */
 1846 #define NETDEV_CHANGEADDR	0x0008
 1847 #define NETDEV_GOING_DOWN	0x0009
 1848 #define NETDEV_CHANGENAME	0x000A
 1849 #define NETDEV_FEAT_CHANGE	0x000B
 1850 #define NETDEV_BONDING_FAILOVER 0x000C
 1851 #define NETDEV_PRE_UP		0x000D
 1852 #define NETDEV_PRE_TYPE_CHANGE	0x000E
 1853 #define NETDEV_POST_TYPE_CHANGE	0x000F
 1854 #define NETDEV_POST_INIT	0x0010
 1855 #define NETDEV_UNREGISTER_FINAL 0x0011
 1856 #define NETDEV_RELEASE		0x0012
 1857 #define NETDEV_NOTIFY_PEERS	0x0013
 1858 #define NETDEV_JOIN		0x0014
 1859 #define NETDEV_CHANGEUPPER	0x0015
 1860 #define NETDEV_RESEND_IGMP	0x0016
 1861 #define NETDEV_PRECHANGEMTU	0x0017 /* notify before mtu change happened */
 1862 
 1863 int register_netdevice_notifier(struct notifier_block *nb);
 1864 int unregister_netdevice_notifier(struct notifier_block *nb);
 1865 
 1866 struct netdev_notifier_info {
 1867 	struct net_device *dev;
 1868 };
 1869 
 1870 struct netdev_notifier_change_info {
 1871 	struct netdev_notifier_info info; /* must be first */
 1872 	unsigned int flags_changed;
 1873 };
 1874 
 1875 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
 1876 					     struct net_device *dev)
 1877 {
 1878 	info->dev = dev;
 1879 }
 1880 
 1881 static inline struct net_device *
 1882 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
 1883 {
 1884 	return info->dev;
 1885 }
 1886 
 1887 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
 1888 
 1889 
 1890 extern rwlock_t				dev_base_lock;		/* Device list lock */
 1891 
 1892 #define for_each_netdev(net, d)		\
 1893 		list_for_each_entry(d, &(net)->dev_base_head, dev_list)
 1894 #define for_each_netdev_reverse(net, d)	\
 1895 		list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
 1896 #define for_each_netdev_rcu(net, d)		\
 1897 		list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
 1898 #define for_each_netdev_safe(net, d, n)	\
 1899 		list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
 1900 #define for_each_netdev_continue(net, d)		\
 1901 		list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
 1902 #define for_each_netdev_continue_rcu(net, d)		\
 1903 	list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
 1904 #define for_each_netdev_in_bond_rcu(bond, slave)	\
 1905 		for_each_netdev_rcu(&init_net, slave)	\
 1906 			if (netdev_master_upper_dev_get_rcu(slave) == bond)
 1907 #define net_device_entry(lh)	list_entry(lh, struct net_device, dev_list)
 1908 
 1909 static inline struct net_device *next_net_device(struct net_device *dev)
 1910 {
 1911 	struct list_head *lh;
 1912 	struct net *net;
 1913 
 1914 	net = dev_net(dev);
 1915 	lh = dev->dev_list.next;
 1916 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 1917 }
 1918 
 1919 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
 1920 {
 1921 	struct list_head *lh;
 1922 	struct net *net;
 1923 
 1924 	net = dev_net(dev);
 1925 	lh = rcu_dereference(list_next_rcu(&dev->dev_list));
 1926 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 1927 }
 1928 
 1929 static inline struct net_device *first_net_device(struct net *net)
 1930 {
 1931 	return list_empty(&net->dev_base_head) ? NULL :
 1932 		net_device_entry(net->dev_base_head.next);
 1933 }
 1934 
 1935 static inline struct net_device *first_net_device_rcu(struct net *net)
 1936 {
 1937 	struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
 1938 
 1939 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 1940 }
 1941 
 1942 int netdev_boot_setup_check(struct net_device *dev);
 1943 unsigned long netdev_boot_base(const char *prefix, int unit);
 1944 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
 1945 				       const char *hwaddr);
 1946 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
 1947 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
 1948 void dev_add_pack(struct packet_type *pt);
 1949 void dev_remove_pack(struct packet_type *pt);
 1950 void __dev_remove_pack(struct packet_type *pt);
 1951 void dev_add_offload(struct packet_offload *po);
 1952 void dev_remove_offload(struct packet_offload *po);
 1953 
 1954 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
 1955 					unsigned short mask);
 1956 struct net_device *dev_get_by_name(struct net *net, const char *name);
 1957 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
 1958 struct net_device *__dev_get_by_name(struct net *net, const char *name);
 1959 int dev_alloc_name(struct net_device *dev, const char *name);
 1960 int dev_open(struct net_device *dev);
 1961 int dev_close(struct net_device *dev);
 1962 void dev_disable_lro(struct net_device *dev);
 1963 int dev_loopback_xmit(struct sk_buff *newskb);
 1964 int dev_queue_xmit(struct sk_buff *skb);
 1965 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
 1966 int register_netdevice(struct net_device *dev);
 1967 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
 1968 void unregister_netdevice_many(struct list_head *head);
 1969 static inline void unregister_netdevice(struct net_device *dev)
 1970 {
 1971 	unregister_netdevice_queue(dev, NULL);
 1972 }
 1973 
 1974 int netdev_refcnt_read(const struct net_device *dev);
 1975 void free_netdev(struct net_device *dev);
 1976 void netdev_freemem(struct net_device *dev);
 1977 void synchronize_net(void);
 1978 int init_dummy_netdev(struct net_device *dev);
 1979 
 1980 struct net_device *dev_get_by_index(struct net *net, int ifindex);
 1981 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
 1982 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
 1983 int netdev_get_name(struct net *net, char *name, int ifindex);
 1984 int dev_restart(struct net_device *dev);
 1985 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
 1986 
 1987 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
 1988 {
 1989 	return NAPI_GRO_CB(skb)->data_offset;
 1990 }
 1991 
 1992 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
 1993 {
 1994 	return skb->len - NAPI_GRO_CB(skb)->data_offset;
 1995 }
 1996 
 1997 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
 1998 {
 1999 	NAPI_GRO_CB(skb)->data_offset += len;
 2000 }
 2001 
 2002 static inline void *skb_gro_header_fast(struct sk_buff *skb,
 2003 					unsigned int offset)
 2004 {
 2005 	return NAPI_GRO_CB(skb)->frag0 + offset;
 2006 }
 2007 
 2008 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
 2009 {
 2010 	return NAPI_GRO_CB(skb)->frag0_len < hlen;
 2011 }
 2012 
 2013 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
 2014 					unsigned int offset)
 2015 {
 2016 	if (!pskb_may_pull(skb, hlen))
 2017 		return NULL;
 2018 
 2019 	NAPI_GRO_CB(skb)->frag0 = NULL;
 2020 	NAPI_GRO_CB(skb)->frag0_len = 0;
 2021 	return skb->data + offset;
 2022 }
 2023 
 2024 static inline void *skb_gro_network_header(struct sk_buff *skb)
 2025 {
 2026 	return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
 2027 	       skb_network_offset(skb);
 2028 }
 2029 
 2030 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
 2031 					const void *start, unsigned int len)
 2032 {
 2033 	if (skb->ip_summed == CHECKSUM_COMPLETE)
 2034 		NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
 2035 						  csum_partial(start, len, 0));
 2036 }
 2037 
 2038 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
 2039 				  unsigned short type,
 2040 				  const void *daddr, const void *saddr,
 2041 				  unsigned int len)
 2042 {
 2043 	if (!dev->header_ops || !dev->header_ops->create)
 2044 		return 0;
 2045 
 2046 	return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
 2047 }
 2048 
 2049 static inline int dev_parse_header(const struct sk_buff *skb,
 2050 				   unsigned char *haddr)
 2051 {
 2052 	const struct net_device *dev = skb->dev;
 2053 
 2054 	if (!dev->header_ops || !dev->header_ops->parse)
 2055 		return 0;
 2056 	return dev->header_ops->parse(skb, haddr);
 2057 }
 2058 
 2059 static inline int dev_rebuild_header(struct sk_buff *skb)
 2060 {
 2061 	const struct net_device *dev = skb->dev;
 2062 
 2063 	if (!dev->header_ops || !dev->header_ops->rebuild)
 2064 		return 0;
 2065 	return dev->header_ops->rebuild(skb);
 2066 }
 2067 
 2068 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
 2069 int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
 2070 static inline int unregister_gifconf(unsigned int family)
 2071 {
 2072 	return register_gifconf(family, NULL);
 2073 }
 2074 
 2075 #ifdef CONFIG_NET_FLOW_LIMIT
 2076 #define FLOW_LIMIT_HISTORY	(1 << 7)  /* must be ^2 and !overflow buckets */
 2077 struct sd_flow_limit {
 2078 	u64			count;
 2079 	unsigned int		num_buckets;
 2080 	unsigned int		history_head;
 2081 	u16			history[FLOW_LIMIT_HISTORY];
 2082 	u8			buckets[];
 2083 };
 2084 
 2085 extern int netdev_flow_limit_table_len;
 2086 #endif /* CONFIG_NET_FLOW_LIMIT */
 2087 
 2088 /*
 2089  * Incoming packets are placed on per-cpu queues
 2090  */
 2091 struct softnet_data {
 2092 	struct Qdisc		*output_queue;
 2093 	struct Qdisc		**output_queue_tailp;
 2094 	struct list_head	poll_list;
 2095 	struct sk_buff		*completion_queue;
 2096 	struct sk_buff_head	process_queue;
 2097 
 2098 	/* stats */
 2099 	unsigned int		processed;
 2100 	unsigned int		time_squeeze;
 2101 	unsigned int		cpu_collision;
 2102 	unsigned int		received_rps;
 2103 
 2104 #ifdef CONFIG_RPS
 2105 	struct softnet_data	*rps_ipi_list;
 2106 
 2107 	/* Elements below can be accessed between CPUs for RPS */
 2108 	struct call_single_data	csd ____cacheline_aligned_in_smp;
 2109 	struct softnet_data	*rps_ipi_next;
 2110 	unsigned int		cpu;
 2111 	unsigned int		input_queue_head;
 2112 	unsigned int		input_queue_tail;
 2113 #endif
 2114 	unsigned int		dropped;
 2115 	struct sk_buff_head	input_pkt_queue;
 2116 	struct napi_struct	backlog;
 2117 
 2118 #ifdef CONFIG_NET_FLOW_LIMIT
 2119 	struct sd_flow_limit __rcu *flow_limit;
 2120 #endif
 2121 };
 2122 
 2123 static inline void input_queue_head_incr(struct softnet_data *sd)
 2124 {
 2125 #ifdef CONFIG_RPS
 2126 	sd->input_queue_head++;
 2127 #endif
 2128 }
 2129 
 2130 static inline void input_queue_tail_incr_save(struct softnet_data *sd,
 2131 					      unsigned int *qtail)
 2132 {
 2133 #ifdef CONFIG_RPS
 2134 	*qtail = ++sd->input_queue_tail;
 2135 #endif
 2136 }
 2137 
 2138 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 2139 
 2140 void __netif_schedule(struct Qdisc *q);
 2141 
 2142 static inline void netif_schedule_queue(struct netdev_queue *txq)
 2143 {
 2144 	if (!(txq->state & QUEUE_STATE_ANY_XOFF))
 2145 		__netif_schedule(txq->qdisc);
 2146 }
 2147 
 2148 static inline void netif_tx_schedule_all(struct net_device *dev)
 2149 {
 2150 	unsigned int i;
 2151 
 2152 	for (i = 0; i < dev->num_tx_queues; i++)
 2153 		netif_schedule_queue(netdev_get_tx_queue(dev, i));
 2154 }
 2155 
 2156 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
 2157 {
 2158 	clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 2159 }
 2160 
 2161 /**
 2162  *	netif_start_queue - allow transmit
 2163  *	@dev: network device
 2164  *
 2165  *	Allow upper layers to call the device hard_start_xmit routine.
 2166  */
 2167 static inline void netif_start_queue(struct net_device *dev)
 2168 {
 2169 	netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
 2170 }
 2171 
 2172 static inline void netif_tx_start_all_queues(struct net_device *dev)
 2173 {
 2174 	unsigned int i;
 2175 
 2176 	for (i = 0; i < dev->num_tx_queues; i++) {
 2177 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 2178 		netif_tx_start_queue(txq);
 2179 	}
 2180 }
 2181 
 2182 static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
 2183 {
 2184 	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
 2185 		__netif_schedule(dev_queue->qdisc);
 2186 }
 2187 
 2188 /**
 2189  *	netif_wake_queue - restart transmit
 2190  *	@dev: network device
 2191  *
 2192  *	Allow upper layers to call the device hard_start_xmit routine.
 2193  *	Used for flow control when transmit resources are available.
 2194  */
 2195 static inline void netif_wake_queue(struct net_device *dev)
 2196 {
 2197 	netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
 2198 }
 2199 
 2200 static inline void netif_tx_wake_all_queues(struct net_device *dev)
 2201 {
 2202 	unsigned int i;
 2203 
 2204 	for (i = 0; i < dev->num_tx_queues; i++) {
 2205 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 2206 		netif_tx_wake_queue(txq);
 2207 	}
 2208 }
 2209 
 2210 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
 2211 {
 2212 	if (WARN_ON(!dev_queue)) {
 2213 		pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
 2214 		return;
 2215 	}
 2216 	set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 2217 }
 2218 
 2219 /**
 2220  *	netif_stop_queue - stop transmitted packets
 2221  *	@dev: network device
 2222  *
 2223  *	Stop upper layers calling the device hard_start_xmit routine.
 2224  *	Used for flow control when transmit resources are unavailable.
 2225  */
 2226 static inline void netif_stop_queue(struct net_device *dev)
 2227 {
 2228 	netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
 2229 }
 2230 
 2231 static inline void netif_tx_stop_all_queues(struct net_device *dev)
 2232 {
 2233 	unsigned int i;
 2234 
 2235 	for (i = 0; i < dev->num_tx_queues; i++) {
 2236 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 2237 		netif_tx_stop_queue(txq);
 2238 	}
 2239 }
 2240 
 2241 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
 2242 {
 2243 	return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 2244 }
 2245 
 2246 /**
 2247  *	netif_queue_stopped - test if transmit queue is flowblocked
 2248  *	@dev: network device
 2249  *
 2250  *	Test if transmit queue on device is currently unable to send.
 2251  */
 2252 static inline bool netif_queue_stopped(const struct net_device *dev)
 2253 {
 2254 	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
 2255 }
 2256 
 2257 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
 2258 {
 2259 	return dev_queue->state & QUEUE_STATE_ANY_XOFF;
 2260 }
 2261 
 2262 static inline bool
 2263 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
 2264 {
 2265 	return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
 2266 }
 2267 
 2268 static inline bool
 2269 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
 2270 {
 2271 	return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
 2272 }
 2273 
 2274 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
 2275 					unsigned int bytes)
 2276 {
 2277 #ifdef CONFIG_BQL
 2278 	dql_queued(&dev_queue->dql, bytes);
 2279 
 2280 	if (likely(dql_avail(&dev_queue->dql) >= 0))
 2281 		return;
 2282 
 2283 	set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
 2284 
 2285 	/*
 2286 	 * The XOFF flag must be set before checking the dql_avail below,
 2287 	 * because in netdev_tx_completed_queue we update the dql_completed
 2288 	 * before checking the XOFF flag.
 2289 	 */
 2290 	smp_mb();
 2291 
 2292 	/* check again in case another CPU has just made room avail */
 2293 	if (unlikely(dql_avail(&dev_queue->dql) >= 0))
 2294 		clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
 2295 #endif
 2296 }
 2297 
 2298 /**
 2299  * 	netdev_sent_queue - report the number of bytes queued to hardware
 2300  * 	@dev: network device
 2301  * 	@bytes: number of bytes queued to the hardware device queue
 2302  *
 2303  * 	Report the number of bytes queued for sending/completion to the network
 2304  * 	device hardware queue. @bytes should be a good approximation and should
 2305  * 	exactly match netdev_completed_queue() @bytes
 2306  */
 2307 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
 2308 {
 2309 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
 2310 }
 2311 
 2312 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
 2313 					     unsigned int pkts, unsigned int bytes)
 2314 {
 2315 #ifdef CONFIG_BQL
 2316 	if (unlikely(!bytes))
 2317 		return;
 2318 
 2319 	dql_completed(&dev_queue->dql, bytes);
 2320 
 2321 	/*
 2322 	 * Without the memory barrier there is a small possiblity that
 2323 	 * netdev_tx_sent_queue will miss the update and cause the queue to
 2324 	 * be stopped forever
 2325 	 */
 2326 	smp_mb();
 2327 
 2328 	if (dql_avail(&dev_queue->dql) < 0)
 2329 		return;
 2330 
 2331 	if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
 2332 		netif_schedule_queue(dev_queue);
 2333 #endif
 2334 }
 2335 
 2336 /**
 2337  * 	netdev_completed_queue - report bytes and packets completed by device
 2338  * 	@dev: network device
 2339  * 	@pkts: actual number of packets sent over the medium
 2340  * 	@bytes: actual number of bytes sent over the medium
 2341  *
 2342  * 	Report the number of bytes and packets transmitted by the network device
 2343  * 	hardware queue over the physical medium, @bytes must exactly match the
 2344  * 	@bytes amount passed to netdev_sent_queue()
 2345  */
 2346 static inline void netdev_completed_queue(struct net_device *dev,
 2347 					  unsigned int pkts, unsigned int bytes)
 2348 {
 2349 	netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
 2350 }
 2351 
 2352 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
 2353 {
 2354 #ifdef CONFIG_BQL
 2355 	clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
 2356 	dql_reset(&q->dql);
 2357 #endif
 2358 }
 2359 
 2360 /**
 2361  * 	netdev_reset_queue - reset the packets and bytes count of a network device
 2362  * 	@dev_queue: network device
 2363  *
 2364  * 	Reset the bytes and packet count of a network device and clear the
 2365  * 	software flow control OFF bit for this network device
 2366  */
 2367 static inline void netdev_reset_queue(struct net_device *dev_queue)
 2368 {
 2369 	netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
 2370 }
 2371 
 2372 /**
 2373  * 	netdev_cap_txqueue - check if selected tx queue exceeds device queues
 2374  * 	@dev: network device
 2375  * 	@queue_index: given tx queue index
 2376  *
 2377  * 	Returns 0 if given tx queue index >= number of device tx queues,
 2378  * 	otherwise returns the originally passed tx queue index.
 2379  */
 2380 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
 2381 {
 2382 	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
 2383 		net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
 2384 				     dev->name, queue_index,
 2385 				     dev->real_num_tx_queues);
 2386 		return 0;
 2387 	}
 2388 
 2389 	return queue_index;
 2390 }
 2391 
 2392 /**
 2393  *	netif_running - test if up
 2394  *	@dev: network device
 2395  *
 2396  *	Test if the device has been brought up.
 2397  */
 2398 static inline bool netif_running(const struct net_device *dev)
 2399 {
 2400 	return test_bit(__LINK_STATE_START, &dev->state);
 2401 }
 2402 
 2403 /*
 2404  * Routines to manage the subqueues on a device.  We only need start
 2405  * stop, and a check if it's stopped.  All other device management is
 2406  * done at the overall netdevice level.
 2407  * Also test the device if we're multiqueue.
 2408  */
 2409 
 2410 /**
 2411  *	netif_start_subqueue - allow sending packets on subqueue
 2412  *	@dev: network device
 2413  *	@queue_index: sub queue index
 2414  *
 2415  * Start individual transmit queue of a device with multiple transmit queues.
 2416  */
 2417 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
 2418 {
 2419 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 2420 
 2421 	netif_tx_start_queue(txq);
 2422 }
 2423 
 2424 /**
 2425  *	netif_stop_subqueue - stop sending packets on subqueue
 2426  *	@dev: network device
 2427  *	@queue_index: sub queue index
 2428  *
 2429  * Stop individual transmit queue of a device with multiple transmit queues.
 2430  */
 2431 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
 2432 {
 2433 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 2434 	netif_tx_stop_queue(txq);
 2435 }
 2436 
 2437 /**
 2438  *	netif_subqueue_stopped - test status of subqueue
 2439  *	@dev: network device
 2440  *	@queue_index: sub queue index
 2441  *
 2442  * Check individual transmit queue of a device with multiple transmit queues.
 2443  */
 2444 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
 2445 					    u16 queue_index)
 2446 {
 2447 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 2448 
 2449 	return netif_tx_queue_stopped(txq);
 2450 }
 2451 
 2452 static inline bool netif_subqueue_stopped(const struct net_device *dev,
 2453 					  struct sk_buff *skb)
 2454 {
 2455 	return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
 2456 }
 2457 
 2458 /**
 2459  *	netif_wake_subqueue - allow sending packets on subqueue
 2460  *	@dev: network device
 2461  *	@queue_index: sub queue index
 2462  *
 2463  * Resume individual transmit queue of a device with multiple transmit queues.
 2464  */
 2465 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
 2466 {
 2467 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 2468 	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
 2469 		__netif_schedule(txq->qdisc);
 2470 }
 2471 
 2472 #ifdef CONFIG_XPS
 2473 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
 2474 			u16 index);
 2475 #else
 2476 static inline int netif_set_xps_queue(struct net_device *dev,
 2477 				      const struct cpumask *mask,
 2478 				      u16 index)
 2479 {
 2480 	return 0;
 2481 }
 2482 #endif
 2483 
 2484 /*
 2485  * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
 2486  * as a distribution range limit for the returned value.
 2487  */
 2488 static inline u16 skb_tx_hash(const struct net_device *dev,
 2489 			      const struct sk_buff *skb)
 2490 {
 2491 	return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
 2492 }
 2493 
 2494 /**
 2495  *	netif_is_multiqueue - test if device has multiple transmit queues
 2496  *	@dev: network device
 2497  *
 2498  * Check if device has multiple transmit queues
 2499  */
 2500 static inline bool netif_is_multiqueue(const struct net_device *dev)
 2501 {
 2502 	return dev->num_tx_queues > 1;
 2503 }
 2504 
 2505 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
 2506 
 2507 #ifdef CONFIG_SYSFS
 2508 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
 2509 #else
 2510 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
 2511 						unsigned int rxq)
 2512 {
 2513 	return 0;
 2514 }
 2515 #endif
 2516 
 2517 static inline int netif_copy_real_num_queues(struct net_device *to_dev,
 2518 					     const struct net_device *from_dev)
 2519 {
 2520 	int err;
 2521 
 2522 	err = netif_set_real_num_tx_queues(to_dev,
 2523 					   from_dev->real_num_tx_queues);
 2524 	if (err)
 2525 		return err;
 2526 #ifdef CONFIG_SYSFS
 2527 	return netif_set_real_num_rx_queues(to_dev,
 2528 					    from_dev->real_num_rx_queues);
 2529 #else
 2530 	return 0;
 2531 #endif
 2532 }
 2533 
 2534 #ifdef CONFIG_SYSFS
 2535 static inline unsigned int get_netdev_rx_queue_index(
 2536 		struct netdev_rx_queue *queue)
 2537 {
 2538 	struct net_device *dev = queue->dev;
 2539 	int index = queue - dev->_rx;
 2540 
 2541 	BUG_ON(index >= dev->num_rx_queues);
 2542 	return index;
 2543 }
 2544 #endif
 2545 
 2546 #define DEFAULT_MAX_NUM_RSS_QUEUES	(8)
 2547 int netif_get_num_default_rss_queues(void);
 2548 
 2549 enum skb_free_reason {
 2550 	SKB_REASON_CONSUMED,
 2551 	SKB_REASON_DROPPED,
 2552 };
 2553 
 2554 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
 2555 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
 2556 
 2557 /*
 2558  * It is not allowed to call kfree_skb() or consume_skb() from hardware
 2559  * interrupt context or with hardware interrupts being disabled.
 2560  * (in_irq() || irqs_disabled())
 2561  *
 2562  * We provide four helpers that can be used in following contexts :
 2563  *
 2564  * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
 2565  *  replacing kfree_skb(skb)
 2566  *
 2567  * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
 2568  *  Typically used in place of consume_skb(skb) in TX completion path
 2569  *
 2570  * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
 2571  *  replacing kfree_skb(skb)
 2572  *
 2573  * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
 2574  *  and consumed a packet. Used in place of consume_skb(skb)
 2575  */
 2576 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
 2577 {
 2578 	__dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
 2579 }
 2580 
 2581 static inline void dev_consume_skb_irq(struct sk_buff *skb)
 2582 {
 2583 	__dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
 2584 }
 2585 
 2586 static inline void dev_kfree_skb_any(struct sk_buff *skb)
 2587 {
 2588 	__dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
 2589 }
 2590 
 2591 static inline void dev_consume_skb_any(struct sk_buff *skb)
 2592 {
 2593 	__dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
 2594 }
 2595 
 2596 int netif_rx(struct sk_buff *skb);
 2597 int netif_rx_ni(struct sk_buff *skb);
 2598 int netif_receive_skb(struct sk_buff *skb);
 2599 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
 2600 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
 2601 struct sk_buff *napi_get_frags(struct napi_struct *napi);
 2602 gro_result_t napi_gro_frags(struct napi_struct *napi);
 2603 struct packet_offload *gro_find_receive_by_type(__be16 type);
 2604 struct packet_offload *gro_find_complete_by_type(__be16 type);
 2605 
 2606 static inline void napi_free_frags(struct napi_struct *napi)
 2607 {
 2608 	kfree_skb(napi->skb);
 2609 	napi->skb = NULL;
 2610 }
 2611 
 2612 int netdev_rx_handler_register(struct net_device *dev,
 2613 			       rx_handler_func_t *rx_handler,
 2614 			       void *rx_handler_data);
 2615 void netdev_rx_handler_unregister(struct net_device *dev);
 2616 
 2617 bool dev_valid_name(const char *name);
 2618 int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
 2619 int dev_ethtool(struct net *net, struct ifreq *);
 2620 unsigned int dev_get_flags(const struct net_device *);
 2621 int __dev_change_flags(struct net_device *, unsigned int flags);
 2622 int dev_change_flags(struct net_device *, unsigned int);
 2623 void __dev_notify_flags(struct net_device *, unsigned int old_flags,
 2624 			unsigned int gchanges);
 2625 int dev_change_name(struct net_device *, const char *);
 2626 int dev_set_alias(struct net_device *, const char *, size_t);
 2627 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
 2628 int dev_set_mtu(struct net_device *, int);
 2629 void dev_set_group(struct net_device *, int);
 2630 int dev_set_mac_address(struct net_device *, struct sockaddr *);
 2631 int dev_change_carrier(struct net_device *, bool new_carrier);
 2632 int dev_get_phys_port_id(struct net_device *dev,
 2633 			 struct netdev_phys_port_id *ppid);
 2634 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 2635 			struct netdev_queue *txq);
 2636 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 2637 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 2638 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
 2639 
 2640 extern int		netdev_budget;
 2641 
 2642 /* Called by rtnetlink.c:rtnl_unlock() */
 2643 void netdev_run_todo(void);
 2644 
 2645 /**
 2646  *	dev_put - release reference to device
 2647  *	@dev: network device
 2648  *
 2649  * Release reference to device to allow it to be freed.
 2650  */
 2651 static inline void dev_put(struct net_device *dev)
 2652 {
 2653 	this_cpu_dec(*dev->pcpu_refcnt);
 2654 }
 2655 
 2656 /**
 2657  *	dev_hold - get reference to device
 2658  *	@dev: network device
 2659  *
 2660  * Hold reference to device to keep it from being freed.
 2661  */
 2662 static inline void dev_hold(struct net_device *dev)
 2663 {
 2664 	this_cpu_inc(*dev->pcpu_refcnt);
 2665 }
 2666 
 2667 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
 2668  * and _off may be called from IRQ context, but it is caller
 2669  * who is responsible for serialization of these calls.
 2670  *
 2671  * The name carrier is inappropriate, these functions should really be
 2672  * called netif_lowerlayer_*() because they represent the state of any
 2673  * kind of lower layer not just hardware media.
 2674  */
 2675 
 2676 void linkwatch_init_dev(struct net_device *dev);
 2677 void linkwatch_fire_event(struct net_device *dev);
 2678 void linkwatch_forget_dev(struct net_device *dev);
 2679 
 2680 /**
 2681  *	netif_carrier_ok - test if carrier present
 2682  *	@dev: network device
 2683  *
 2684  * Check if carrier is present on device
 2685  */
 2686 static inline bool netif_carrier_ok(const struct net_device *dev)
 2687 {
 2688 	return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
 2689 }
 2690 
 2691 unsigned long dev_trans_start(struct net_device *dev);
 2692 
 2693 void __netdev_watchdog_up(struct net_device *dev);
 2694 
 2695 void netif_carrier_on(struct net_device *dev);
 2696 
 2697 void netif_carrier_off(struct net_device *dev);
 2698 
 2699 /**
 2700  *	netif_dormant_on - mark device as dormant.
 2701  *	@dev: network device
 2702  *
 2703  * Mark device as dormant (as per RFC2863).
 2704  *
 2705  * The dormant state indicates that the relevant interface is not
 2706  * actually in a condition to pass packets (i.e., it is not 'up') but is
 2707  * in a "pending" state, waiting for some external event.  For "on-
 2708  * demand" interfaces, this new state identifies the situation where the
 2709  * interface is waiting for events to place it in the up state.
 2710  *
 2711  */
 2712 static inline void netif_dormant_on(struct net_device *dev)
 2713 {
 2714 	if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
 2715 		linkwatch_fire_event(dev);
 2716 }
 2717 
 2718 /**
 2719  *	netif_dormant_off - set device as not dormant.
 2720  *	@dev: network device
 2721  *
 2722  * Device is not in dormant state.
 2723  */
 2724 static inline void netif_dormant_off(struct net_device *dev)
 2725 {
 2726 	if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
 2727 		linkwatch_fire_event(dev);
 2728 }
 2729 
 2730 /**
 2731  *	netif_dormant - test if carrier present
 2732  *	@dev: network device
 2733  *
 2734  * Check if carrier is present on device
 2735  */
 2736 static inline bool netif_dormant(const struct net_device *dev)
 2737 {
 2738 	return test_bit(__LINK_STATE_DORMANT, &dev->state);
 2739 }
 2740 
 2741 
 2742 /**
 2743  *	netif_oper_up - test if device is operational
 2744  *	@dev: network device
 2745  *
 2746  * Check if carrier is operational
 2747  */
 2748 static inline bool netif_oper_up(const struct net_device *dev)
 2749 {
 2750 	return (dev->operstate == IF_OPER_UP ||
 2751 		dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
 2752 }
 2753 
 2754 /**
 2755  *	netif_device_present - is device available or removed
 2756  *	@dev: network device
 2757  *
 2758  * Check if device has not been removed from system.
 2759  */
 2760 static inline bool netif_device_present(struct net_device *dev)
 2761 {
 2762 	return test_bit(__LINK_STATE_PRESENT, &dev->state);
 2763 }
 2764 
 2765 void netif_device_detach(struct net_device *dev);
 2766 
 2767 void netif_device_attach(struct net_device *dev);
 2768 
 2769 /*
 2770  * Network interface message level settings
 2771  */
 2772 
 2773 enum {
 2774 	NETIF_MSG_DRV		= 0x0001,
 2775 	NETIF_MSG_PROBE		= 0x0002,
 2776 	NETIF_MSG_LINK		= 0x0004,
 2777 	NETIF_MSG_TIMER		= 0x0008,
 2778 	NETIF_MSG_IFDOWN	= 0x0010,
 2779 	NETIF_MSG_IFUP		= 0x0020,
 2780 	NETIF_MSG_RX_ERR	= 0x0040,
 2781 	NETIF_MSG_TX_ERR	= 0x0080,
 2782 	NETIF_MSG_TX_QUEUED	= 0x0100,
 2783 	NETIF_MSG_INTR		= 0x0200,
 2784 	NETIF_MSG_TX_DONE	= 0x0400,
 2785 	NETIF_MSG_RX_STATUS	= 0x0800,
 2786 	NETIF_MSG_PKTDATA	= 0x1000,
 2787 	NETIF_MSG_HW		= 0x2000,
 2788 	NETIF_MSG_WOL		= 0x4000,
 2789 };
 2790 
 2791 #define netif_msg_drv(p)	((p)->msg_enable & NETIF_MSG_DRV)
 2792 #define netif_msg_probe(p)	((p)->msg_enable & NETIF_MSG_PROBE)
 2793 #define netif_msg_link(p)	((p)->msg_enable & NETIF_MSG_LINK)
 2794 #define netif_msg_timer(p)	((p)->msg_enable & NETIF_MSG_TIMER)
 2795 #define netif_msg_ifdown(p)	((p)->msg_enable & NETIF_MSG_IFDOWN)
 2796 #define netif_msg_ifup(p)	((p)->msg_enable & NETIF_MSG_IFUP)
 2797 #define netif_msg_rx_err(p)	((p)->msg_enable & NETIF_MSG_RX_ERR)
 2798 #define netif_msg_tx_err(p)	((p)->msg_enable & NETIF_MSG_TX_ERR)
 2799 #define netif_msg_tx_queued(p)	((p)->msg_enable & NETIF_MSG_TX_QUEUED)
 2800 #define netif_msg_intr(p)	((p)->msg_enable & NETIF_MSG_INTR)
 2801 #define netif_msg_tx_done(p)	((p)->msg_enable & NETIF_MSG_TX_DONE)
 2802 #define netif_msg_rx_status(p)	((p)->msg_enable & NETIF_MSG_RX_STATUS)
 2803 #define netif_msg_pktdata(p)	((p)->msg_enable & NETIF_MSG_PKTDATA)
 2804 #define netif_msg_hw(p)		((p)->msg_enable & NETIF_MSG_HW)
 2805 #define netif_msg_wol(p)	((p)->msg_enable & NETIF_MSG_WOL)
 2806 
 2807 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
 2808 {
 2809 	/* use default */
 2810 	if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
 2811 		return default_msg_enable_bits;
 2812 	if (debug_value == 0)	/* no output */
 2813 		return 0;
 2814 	/* set low N bits */
 2815 	return (1 << debug_value) - 1;
 2816 }
 2817 
 2818 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
 2819 {
 2820 	spin_lock(&txq->_xmit_lock);
 2821 	txq->xmit_lock_owner = cpu;
 2822 }
 2823 
 2824 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
 2825 {
 2826 	spin_lock_bh(&txq->_xmit_lock);
 2827 	txq->xmit_lock_owner = smp_processor_id();
 2828 }
 2829 
 2830 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
 2831 {
 2832 	bool ok = spin_trylock(&txq->_xmit_lock);
 2833 	if (likely(ok))
 2834 		txq->xmit_lock_owner = smp_processor_id();
 2835 	return ok;
 2836 }
 2837 
 2838 static inline void __netif_tx_unlock(struct netdev_queue *txq)
 2839 {
 2840 	txq->xmit_lock_owner = -1;
 2841 	spin_unlock(&txq->_xmit_lock);
 2842 }
 2843 
 2844 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
 2845 {
 2846 	txq->xmit_lock_owner = -1;
 2847 	spin_unlock_bh(&txq->_xmit_lock);
 2848 }
 2849 
 2850 static inline void txq_trans_update(struct netdev_queue *txq)
 2851 {
 2852 	if (txq->xmit_lock_owner != -1)
 2853 		txq->trans_start = jiffies;
 2854 }
 2855 
 2856 /**
 2857  *	netif_tx_lock - grab network device transmit lock
 2858  *	@dev: network device
 2859  *
 2860  * Get network device transmit lock
 2861  */
 2862 static inline void netif_tx_lock(struct net_device *dev)
 2863 {
 2864 	unsigned int i;
 2865 	int cpu;
 2866 
 2867 	spin_lock(&dev->tx_global_lock);
 2868 	cpu = smp_processor_id();
 2869 	for (i = 0; i < dev->num_tx_queues; i++) {
 2870 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 2871 
 2872 		/* We are the only thread of execution doing a
 2873 		 * freeze, but we have to grab the _xmit_lock in
 2874 		 * order to synchronize with threads which are in
 2875 		 * the ->hard_start_xmit() handler and already
 2876 		 * checked the frozen bit.
 2877 		 */
 2878 		__netif_tx_lock(txq, cpu);
 2879 		set_bit(__QUEUE_STATE_FROZEN, &txq->state);
 2880 		__netif_tx_unlock(txq);
 2881 	}
 2882 }
 2883 
 2884 static inline void netif_tx_lock_bh(struct net_device *dev)
 2885 {
 2886 	local_bh_disable();
 2887 	netif_tx_lock(dev);
 2888 }
 2889 
 2890 static inline void netif_tx_unlock(struct net_device *dev)
 2891 {
 2892 	unsigned int i;
 2893 
 2894 	for (i = 0; i < dev->num_tx_queues; i++) {
 2895 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 2896 
 2897 		/* No need to grab the _xmit_lock here.  If the
 2898 		 * queue is not stopped for another reason, we
 2899 		 * force a schedule.
 2900 		 */
 2901 		clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
 2902 		netif_schedule_queue(txq);
 2903 	}
 2904 	spin_unlock(&dev->tx_global_lock);
 2905 }
 2906 
 2907 static inline void netif_tx_unlock_bh(struct net_device *dev)
 2908 {
 2909 	netif_tx_unlock(dev);
 2910 	local_bh_enable();
 2911 }
 2912 
 2913 #define HARD_TX_LOCK(dev, txq, cpu) {			\
 2914 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
 2915 		__netif_tx_lock(txq, cpu);		\
 2916 	}						\
 2917 }
 2918 
 2919 #define HARD_TX_TRYLOCK(dev, txq)			\
 2920 	(((dev->features & NETIF_F_LLTX) == 0) ?	\
 2921 		__netif_tx_trylock(txq) :		\
 2922 		true )
 2923 
 2924 #define HARD_TX_UNLOCK(dev, txq) {			\
 2925 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
 2926 		__netif_tx_unlock(txq);			\
 2927 	}						\
 2928 }
 2929 
 2930 static inline void netif_tx_disable(struct net_device *dev)
 2931 {
 2932 	unsigned int i;
 2933 	int cpu;
 2934 
 2935 	local_bh_disable();
 2936 	cpu = smp_processor_id();
 2937 	for (i = 0; i < dev->num_tx_queues; i++) {
 2938 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 2939 
 2940 		__netif_tx_lock(txq, cpu);
 2941 		netif_tx_stop_queue(txq);
 2942 		__netif_tx_unlock(txq);
 2943 	}
 2944 	local_bh_enable();
 2945 }
 2946 
 2947 static inline void netif_addr_lock(struct net_device *dev)
 2948 {
 2949 	spin_lock(&dev->addr_list_lock);
 2950 }
 2951 
 2952 static inline void netif_addr_lock_nested(struct net_device *dev)
 2953 {
 2954 	int subclass = SINGLE_DEPTH_NESTING;
 2955 
 2956 	if (dev->netdev_ops->ndo_get_lock_subclass)
 2957 		subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
 2958 
 2959 	spin_lock_nested(&dev->addr_list_lock, subclass);
 2960 }
 2961 
 2962 static inline void netif_addr_lock_bh(struct net_device *dev)
 2963 {
 2964 	spin_lock_bh(&dev->addr_list_lock);
 2965 }
 2966 
 2967 static inline void netif_addr_unlock(struct net_device *dev)
 2968 {
 2969 	spin_unlock(&dev->addr_list_lock);
 2970 }
 2971 
 2972 static inline void netif_addr_unlock_bh(struct net_device *dev)
 2973 {
 2974 	spin_unlock_bh(&dev->addr_list_lock);
 2975 }
 2976 
 2977 /*
 2978  * dev_addrs walker. Should be used only for read access. Call with
 2979  * rcu_read_lock held.
 2980  */
 2981 #define for_each_dev_addr(dev, ha) \
 2982 		list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
 2983 
 2984 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
 2985 
 2986 void ether_setup(struct net_device *dev);
 2987 
 2988 /* Support for loadable net-drivers */
 2989 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 2990 				    void (*setup)(struct net_device *),
 2991 				    unsigned int txqs, unsigned int rxqs);
 2992 #define alloc_netdev(sizeof_priv, name, setup) \
 2993 	alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
 2994 
 2995 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
 2996 	alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
 2997 
 2998 int register_netdev(struct net_device *dev);
 2999 void unregister_netdev(struct net_device *dev);
 3000 
 3001 /* General hardware address lists handling functions */
 3002 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
 3003 		   struct netdev_hw_addr_list *from_list, int addr_len);
 3004 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
 3005 		      struct netdev_hw_addr_list *from_list, int addr_len);
 3006 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
 3007 		       struct net_device *dev,
 3008 		       int (*sync)(struct net_device *, const unsigned char *),
 3009 		       int (*unsync)(struct net_device *,
 3010 				     const unsigned char *));
 3011 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
 3012 			  struct net_device *dev,
 3013 			  int (*unsync)(struct net_device *,
 3014 					const unsigned char *));
 3015 void __hw_addr_init(struct netdev_hw_addr_list *list);
 3016 
 3017 /* Functions used for device addresses handling */
 3018 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
 3019 		 unsigned char addr_type);
 3020 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
 3021 		 unsigned char addr_type);
 3022 void dev_addr_flush(struct net_device *dev);
 3023 int dev_addr_init(struct net_device *dev);
 3024 
 3025 /* Functions used for unicast addresses handling */
 3026 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
 3027 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
 3028 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
 3029 int dev_uc_sync(struct net_device *to, struct net_device *from);
 3030 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
 3031 void dev_uc_unsync(struct net_device *to, struct net_device *from);
 3032 void dev_uc_flush(struct net_device *dev);
 3033 void dev_uc_init(struct net_device *dev);
 3034 
 3035 /**
 3036  *  __dev_uc_sync - Synchonize device's unicast list
 3037  *  @dev:  device to sync
 3038  *  @sync: function to call if address should be added
 3039  *  @unsync: function to call if address should be removed
 3040  *
 3041  *  Add newly added addresses to the interface, and release
 3042  *  addresses that have been deleted.
 3043  **/
 3044 static inline int __dev_uc_sync(struct net_device *dev,
 3045 				int (*sync)(struct net_device *,
 3046 					    const unsigned char *),
 3047 				int (*unsync)(struct net_device *,
 3048 					      const unsigned char *))
 3049 {
 3050 	return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
 3051 }
 3052 
 3053 /**
 3054  *  __dev_uc_unsync - Remove synchonized addresses from device
 3055  *  @dev:  device to sync
 3056  *  @unsync: function to call if address should be removed
 3057  *
 3058  *  Remove all addresses that were added to the device by dev_uc_sync().
 3059  **/
 3060 static inline void __dev_uc_unsync(struct net_device *dev,
 3061 				   int (*unsync)(struct net_device *,
 3062 						 const unsigned char *))
 3063 {
 3064 	__hw_addr_unsync_dev(&dev->uc, dev, unsync);
 3065 }
 3066 
 3067 /* Functions used for multicast addresses handling */
 3068 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
 3069 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
 3070 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
 3071 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
 3072 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
 3073 int dev_mc_sync(struct net_device *to, struct net_device *from);
 3074 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
 3075 void dev_mc_unsync(struct net_device *to, struct net_device *from);
 3076 void dev_mc_flush(struct net_device *dev);
 3077 void dev_mc_init(struct net_device *dev);
 3078 
 3079 /**
 3080  *  __dev_mc_sync - Synchonize device's multicast list
 3081  *  @dev:  device to sync
 3082  *  @sync: function to call if address should be added
 3083  *  @unsync: function to call if address should be removed
 3084  *
 3085  *  Add newly added addresses to the interface, and release
 3086  *  addresses that have been deleted.
 3087  **/
 3088 static inline int __dev_mc_sync(struct net_device *dev,
 3089 				int (*sync)(struct net_device *,
 3090 					    const unsigned char *),
 3091 				int (*unsync)(struct net_device *,
 3092 					      const unsigned char *))
 3093 {
 3094 	return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
 3095 }
 3096 
 3097 /**
 3098  *  __dev_mc_unsync - Remove synchonized addresses from device
 3099  *  @dev:  device to sync
 3100  *  @unsync: function to call if address should be removed
 3101  *
 3102  *  Remove all addresses that were added to the device by dev_mc_sync().
 3103  **/
 3104 static inline void __dev_mc_unsync(struct net_device *dev,
 3105 				   int (*unsync)(struct net_device *,
 3106 						 const unsigned char *))
 3107 {
 3108 	__hw_addr_unsync_dev(&dev->mc, dev, unsync);
 3109 }
 3110 
 3111 /* Functions used for secondary unicast and multicast support */
 3112 void dev_set_rx_mode(struct net_device *dev);
 3113 void __dev_set_rx_mode(struct net_device *dev);
 3114 int dev_set_promiscuity(struct net_device *dev, int inc);
 3115 int dev_set_allmulti(struct net_device *dev, int inc);
 3116 void netdev_state_change(struct net_device *dev);
 3117 void netdev_notify_peers(struct net_device *dev);
 3118 void netdev_features_change(struct net_device *dev);
 3119 /* Load a device via the kmod */
 3120 void dev_load(struct net *net, const char *name);
 3121 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
 3122 					struct rtnl_link_stats64 *storage);
 3123 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
 3124 			     const struct net_device_stats *netdev_stats);
 3125 
 3126 extern int		netdev_max_backlog;
 3127 extern int		netdev_tstamp_prequeue;
 3128 extern int		weight_p;
 3129 extern int		bpf_jit_enable;
 3130 
 3131 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
 3132 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
 3133 						     struct list_head **iter);
 3134 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
 3135 						     struct list_head **iter);
 3136 
 3137 /* iterate through upper list, must be called under RCU read lock */
 3138 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
 3139 	for (iter = &(dev)->adj_list.upper, \
 3140 	     updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
 3141 	     updev; \
 3142 	     updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
 3143 
 3144 /* iterate through upper list, must be called under RCU read lock */
 3145 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
 3146 	for (iter = &(dev)->all_adj_list.upper, \
 3147 	     updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
 3148 	     updev; \
 3149 	     updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
 3150 
 3151 void *netdev_lower_get_next_private(struct net_device *dev,
 3152 				    struct list_head **iter);
 3153 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
 3154 					struct list_head **iter);
 3155 
 3156 #define netdev_for_each_lower_private(dev, priv, iter) \
 3157 	for (iter = (dev)->adj_list.lower.next, \
 3158 	     priv = netdev_lower_get_next_private(dev, &(iter)); \
 3159 	     priv; \
 3160 	     priv = netdev_lower_get_next_private(dev, &(iter)))
 3161 
 3162 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
 3163 	for (iter = &(dev)->adj_list.lower, \
 3164 	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
 3165 	     priv; \
 3166 	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
 3167 
 3168 void *netdev_lower_get_next(struct net_device *dev,
 3169 				struct list_head **iter);
 3170 #define netdev_for_each_lower_dev(dev, ldev, iter) \
 3171 	for (iter = &(dev)->adj_list.lower, \
 3172 	     ldev = netdev_lower_get_next(dev, &(iter)); \
 3173 	     ldev; \
 3174 	     ldev = netdev_lower_get_next(dev, &(iter)))
 3175 
 3176 void *netdev_adjacent_get_private(struct list_head *adj_list);
 3177 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
 3178 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
 3179 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
 3180 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
 3181 int netdev_master_upper_dev_link(struct net_device *dev,
 3182 				 struct net_device *upper_dev);
 3183 int netdev_master_upper_dev_link_private(struct net_device *dev,
 3184 					 struct net_device *upper_dev,
 3185 					 void *private);
 3186 void netdev_upper_dev_unlink(struct net_device *dev,
 3187 			     struct net_device *upper_dev);
 3188 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
 3189 void *netdev_lower_dev_get_private(struct net_device *dev,
 3190 				   struct net_device *lower_dev);
 3191 int dev_get_nest_level(struct net_device *dev,
 3192 		       bool (*type_check)(struct net_device *dev));
 3193 int skb_checksum_help(struct sk_buff *skb);
 3194 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
 3195 				  netdev_features_t features, bool tx_path);
 3196 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
 3197 				    netdev_features_t features);
 3198 
 3199 static inline
 3200 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
 3201 {
 3202 	return __skb_gso_segment(skb, features, true);
 3203 }
 3204 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
 3205 
 3206 static inline bool can_checksum_protocol(netdev_features_t features,
 3207 					 __be16 protocol)
 3208 {
 3209 	return ((features & NETIF_F_GEN_CSUM) ||
 3210 		((features & NETIF_F_V4_CSUM) &&
 3211 		 protocol == htons(ETH_P_IP)) ||
 3212 		((features & NETIF_F_V6_CSUM) &&
 3213 		 protocol == htons(ETH_P_IPV6)) ||
 3214 		((features & NETIF_F_FCOE_CRC) &&
 3215 		 protocol == htons(ETH_P_FCOE)));
 3216 }
 3217 
 3218 #ifdef CONFIG_BUG
 3219 void netdev_rx_csum_fault(struct net_device *dev);
 3220 #else
 3221 static inline void netdev_rx_csum_fault(struct net_device *dev)
 3222 {
 3223 }
 3224 #endif
 3225 /* rx skb timestamps */
 3226 void net_enable_timestamp(void);
 3227 void net_disable_timestamp(void);
 3228 
 3229 #ifdef CONFIG_PROC_FS
 3230 int __init dev_proc_init(void);
 3231 #else
 3232 #define dev_proc_init() 0
 3233 #endif
 3234 
 3235 int netdev_class_create_file_ns(struct class_attribute *class_attr,
 3236 				const void *ns);
 3237 void netdev_class_remove_file_ns(struct class_attribute *class_attr,
 3238 				 const void *ns);
 3239 
 3240 static inline int netdev_class_create_file(struct class_attribute *class_attr)
 3241 {
 3242 	return netdev_class_create_file_ns(class_attr, NULL);
 3243 }
 3244 
 3245 static inline void netdev_class_remove_file(struct class_attribute *class_attr)
 3246 {
 3247 	netdev_class_remove_file_ns(class_attr, NULL);
 3248 }
 3249 
 3250 extern struct kobj_ns_type_operations net_ns_type_operations;
 3251 
 3252 const char *netdev_drivername(const struct net_device *dev);
 3253 
 3254 void linkwatch_run_queue(void);
 3255 
 3256 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
 3257 							  netdev_features_t f2)
 3258 {
 3259 	if (f1 & NETIF_F_GEN_CSUM)
 3260 		f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
 3261 	if (f2 & NETIF_F_GEN_CSUM)
 3262 		f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
 3263 	f1 &= f2;
 3264 	if (f1 & NETIF_F_GEN_CSUM)
 3265 		f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
 3266 
 3267 	return f1;
 3268 }
 3269 
 3270 static inline netdev_features_t netdev_get_wanted_features(
 3271 	struct net_device *dev)
 3272 {
 3273 	return (dev->features & ~dev->hw_features) | dev->wanted_features;
 3274 }
 3275 netdev_features_t netdev_increment_features(netdev_features_t all,
 3276 	netdev_features_t one, netdev_features_t mask);
 3277 
 3278 /* Allow TSO being used on stacked device :
 3279  * Performing the GSO segmentation before last device
 3280  * is a performance improvement.
 3281  */
 3282 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
 3283 							netdev_features_t mask)
 3284 {
 3285 	return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
 3286 }
 3287 
 3288 int __netdev_update_features(struct net_device *dev);
 3289 void netdev_update_features(struct net_device *dev);
 3290 void netdev_change_features(struct net_device *dev);
 3291 
 3292 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 3293 					struct net_device *dev);
 3294 
 3295 netdev_features_t netif_skb_features(struct sk_buff *skb);
 3296 
 3297 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 3298 {
 3299 	netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
 3300 
 3301 	/* check flags correspondence */
 3302 	BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
 3303 	BUILD_BUG_ON(SKB_GSO_UDP     != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
 3304 	BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
 3305 	BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
 3306 	BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
 3307 	BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
 3308 	BUILD_BUG_ON(SKB_GSO_GRE     != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
 3309 	BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
 3310 	BUILD_BUG_ON(SKB_GSO_IPIP    != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT));
 3311 	BUILD_BUG_ON(SKB_GSO_SIT     != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
 3312 	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
 3313 	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
 3314 	BUILD_BUG_ON(SKB_GSO_MPLS    != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT));
 3315 
 3316 	return (features & feature) == feature;
 3317 }
 3318 
 3319 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
 3320 {
 3321 	return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
 3322 	       (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
 3323 }
 3324 
 3325 static inline bool netif_needs_gso(struct sk_buff *skb,
 3326 				   netdev_features_t features)
 3327 {
 3328 	return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
 3329 		unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
 3330 			 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
 3331 }
 3332 
 3333 static inline void netif_set_gso_max_size(struct net_device *dev,
 3334 					  unsigned int size)
 3335 {
 3336 	dev->gso_max_size = size;
 3337 }
 3338 
 3339 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
 3340 					int pulled_hlen, u16 mac_offset,
 3341 					int mac_len)
 3342 {
 3343 	skb->protocol = protocol;
 3344 	skb->encapsulation = 1;
 3345 	skb_push(skb, pulled_hlen);
 3346 	skb_reset_transport_header(skb);
 3347 	skb->mac_header = mac_offset;
 3348 	skb->network_header = skb->mac_header + mac_len;
 3349 	skb->mac_len = mac_len;
 3350 }
 3351 
 3352 static inline bool netif_is_macvlan(struct net_device *dev)
 3353 {
 3354 	return dev->priv_flags & IFF_MACVLAN;
 3355 }
 3356 
 3357 static inline bool netif_is_bond_master(struct net_device *dev)
 3358 {
 3359 	return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
 3360 }
 3361 
 3362 static inline bool netif_is_bond_slave(struct net_device *dev)
 3363 {
 3364 	return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
 3365 }
 3366 
 3367 static inline bool netif_supports_nofcs(struct net_device *dev)
 3368 {
 3369 	return dev->priv_flags & IFF_SUPP_NOFCS;
 3370 }
 3371 
 3372 extern struct pernet_operations __net_initdata loopback_net_ops;
 3373 
 3374 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 3375 
 3376 /* netdev_printk helpers, similar to dev_printk */
 3377 
 3378 static inline const char *netdev_name(const struct net_device *dev)
 3379 {
 3380 	if (dev->reg_state != NETREG_REGISTERED)
 3381 		return "(unregistered net_device)";
 3382 	return dev->name;
 3383 }
 3384 
 3385 __printf(3, 4)
 3386 int netdev_printk(const char *level, const struct net_device *dev,
 3387 		  const char *format, ...);
 3388 __printf(2, 3)
 3389 int netdev_emerg(const struct net_device *dev, const char *format, ...);
 3390 __printf(2, 3)
 3391 int netdev_alert(const struct net_device *dev, const char *format, ...);
 3392 __printf(2, 3)
 3393 int netdev_crit(const struct net_device *dev, const char *format, ...);
 3394 __printf(2, 3)
 3395 int netdev_err(const struct net_device *dev, const char *format, ...);
 3396 __printf(2, 3)
 3397 int netdev_warn(const struct net_device *dev, const char *format, ...);
 3398 __printf(2, 3)
 3399 int netdev_notice(const struct net_device *dev, const char *format, ...);
 3400 __printf(2, 3)
 3401 int netdev_info(const struct net_device *dev, const char *format, ...);
 3402 
 3403 #define MODULE_ALIAS_NETDEV(device) \
 3404 	MODULE_ALIAS("netdev-" device)
 3405 
 3406 #if defined(CONFIG_DYNAMIC_DEBUG)
 3407 #define netdev_dbg(__dev, format, args...)			\
 3408 do {								\
 3409 	dynamic_netdev_dbg(__dev, format, ##args);		\
 3410 } while (0)
 3411 #elif defined(DEBUG)
 3412 #define netdev_dbg(__dev, format, args...)			\
 3413 	netdev_printk(KERN_DEBUG, __dev, format, ##args)
 3414 #else
 3415 #define netdev_dbg(__dev, format, args...)			\
 3416 ({								\
 3417 	if (0)							\
 3418 		netdev_printk(KERN_DEBUG, __dev, format, ##args); \
 3419 	0;							\
 3420 })
 3421 #endif
 3422 
 3423 #if defined(VERBOSE_DEBUG)
 3424 #define netdev_vdbg	netdev_dbg
 3425 #else
 3426 
 3427 #define netdev_vdbg(dev, format, args...)			\
 3428 ({								\
 3429 	if (0)							\
 3430 		netdev_printk(KERN_DEBUG, dev, format, ##args);	\
 3431 	0;							\
 3432 })
 3433 #endif
 3434 
 3435 /*
 3436  * netdev_WARN() acts like dev_printk(), but with the key difference
 3437  * of using a WARN/WARN_ON to get the message out, including the
 3438  * file/line information and a backtrace.
 3439  */
 3440 #define netdev_WARN(dev, format, args...)			\
 3441 	WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args)
 3442 
 3443 /* netif printk helpers, similar to netdev_printk */
 3444 
 3445 #define netif_printk(priv, type, level, dev, fmt, args...)	\
 3446 do {					  			\
 3447 	if (netif_msg_##type(priv))				\
 3448 		netdev_printk(level, (dev), fmt, ##args);	\
 3449 } while (0)
 3450 
 3451 #define netif_level(level, priv, type, dev, fmt, args...)	\
 3452 do {								\
 3453 	if (netif_msg_##type(priv))				\
 3454 		netdev_##level(dev, fmt, ##args);		\
 3455 } while (0)
 3456 
 3457 #define netif_emerg(priv, type, dev, fmt, args...)		\
 3458 	netif_level(emerg, priv, type, dev, fmt, ##args)
 3459 #define netif_alert(priv, type, dev, fmt, args...)		\
 3460 	netif_level(alert, priv, type, dev, fmt, ##args)
 3461 #define netif_crit(priv, type, dev, fmt, args...)		\
 3462 	netif_level(crit, priv, type, dev, fmt, ##args)
 3463 #define netif_err(priv, type, dev, fmt, args...)		\
 3464 	netif_level(err, priv, type, dev, fmt, ##args)
 3465 #define netif_warn(priv, type, dev, fmt, args...)		\
 3466 	netif_level(warn, priv, type, dev, fmt, ##args)
 3467 #define netif_notice(priv, type, dev, fmt, args...)		\
 3468 	netif_level(notice, priv, type, dev, fmt, ##args)
 3469 #define netif_info(priv, type, dev, fmt, args...)		\
 3470 	netif_level(info, priv, type, dev, fmt, ##args)
 3471 
 3472 #if defined(CONFIG_DYNAMIC_DEBUG)
 3473 #define netif_dbg(priv, type, netdev, format, args...)		\
 3474 do {								\
 3475 	if (netif_msg_##type(priv))				\
 3476 		dynamic_netdev_dbg(netdev, format, ##args);	\
 3477 } while (0)
 3478 #elif defined(DEBUG)
 3479 #define netif_dbg(priv, type, dev, format, args...)		\
 3480 	netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
 3481 #else
 3482 #define netif_dbg(priv, type, dev, format, args...)			\
 3483 ({									\
 3484 	if (0)								\
 3485 		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
 3486 	0;								\
 3487 })
 3488 #endif
 3489 
 3490 #if defined(VERBOSE_DEBUG)
 3491 #define netif_vdbg	netif_dbg
 3492 #else
 3493 #define netif_vdbg(priv, type, dev, format, args...)		\
 3494 ({								\
 3495 	if (0)							\
 3496 		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
 3497 	0;							\
 3498 })
 3499 #endif
 3500 
 3501 /*
 3502  *	The list of packet types we will receive (as opposed to discard)
 3503  *	and the routines to invoke.
 3504  *
 3505  *	Why 16. Because with 16 the only overlap we get on a hash of the
 3506  *	low nibble of the protocol value is RARP/SNAP/X.25.
 3507  *
 3508  *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
 3509  *             sure which should go first, but I bet it won't make much
 3510  *             difference if we are running VLANs.  The good news is that
 3511  *             this protocol won't be in the list unless compiled in, so
 3512  *             the average user (w/out VLANs) will not be adversely affected.
 3513  *             --BLG
 3514  *
 3515  *		0800	IP
 3516  *		8100    802.1Q VLAN
 3517  *		0001	802.3
 3518  *		0002	AX.25
 3519  *		0004	802.2
 3520  *		8035	RARP
 3521  *		0005	SNAP
 3522  *		0805	X.25
 3523  *		0806	ARP
 3524  *		8137	IPX
 3525  *		0009	Localtalk
 3526  *		86DD	IPv6
 3527  */
 3528 #define PTYPE_HASH_SIZE	(16)
 3529 #define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1)
 3530 
 3531 #endif	/* _LINUX_NETDEVICE_H */                 1 /*
    2  *	pci.h
    3  *
    4  *	PCI defines and function prototypes
    5  *	Copyright 1994, Drew Eckhardt
    6  *	Copyright 1997--1999 Martin Mares <mj@ucw.cz>
    7  *
    8  *	For more information, please consult the following manuals (look at
    9  *	http://www.pcisig.com/ for how to get them):
   10  *
   11  *	PCI BIOS Specification
   12  *	PCI Local Bus Specification
   13  *	PCI to PCI Bridge Specification
   14  *	PCI System Design Guide
   15  */
   16 #ifndef LINUX_PCI_H
   17 #define LINUX_PCI_H
   18 
   19 
   20 #include <linux/mod_devicetable.h>
   21 
   22 #include <linux/types.h>
   23 #include <linux/init.h>
   24 #include <linux/ioport.h>
   25 #include <linux/list.h>
   26 #include <linux/compiler.h>
   27 #include <linux/errno.h>
   28 #include <linux/kobject.h>
   29 #include <linux/atomic.h>
   30 #include <linux/device.h>
   31 #include <linux/io.h>
   32 #include <uapi/linux/pci.h>
   33 
   34 #include <linux/pci_ids.h>
   35 
   36 /*
   37  * The PCI interface treats multi-function devices as independent
   38  * devices.  The slot/function address of each device is encoded
   39  * in a single byte as follows:
   40  *
   41  *	7:3 = slot
   42  *	2:0 = function
   43  *
   44  * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
   45  * In the interest of not exposing interfaces to user-space unnecessarily,
   46  * the following kernel-only defines are being added here.
   47  */
   48 #define PCI_DEVID(bus, devfn)  ((((u16)bus) << 8) | devfn)
   49 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
   50 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
   51 
   52 /* pci_slot represents a physical slot */
   53 struct pci_slot {
   54 	struct pci_bus *bus;		/* The bus this slot is on */
   55 	struct list_head list;		/* node in list of slots on this bus */
   56 	struct hotplug_slot *hotplug;	/* Hotplug info (migrate over time) */
   57 	unsigned char number;		/* PCI_SLOT(pci_dev->devfn) */
   58 	struct kobject kobj;
   59 };
   60 
   61 static inline const char *pci_slot_name(const struct pci_slot *slot)
   62 {
   63 	return kobject_name(&slot->kobj);
   64 }
   65 
   66 /* File state for mmap()s on /proc/bus/pci/X/Y */
   67 enum pci_mmap_state {
   68 	pci_mmap_io,
   69 	pci_mmap_mem
   70 };
   71 
   72 /* This defines the direction arg to the DMA mapping routines. */
   73 #define PCI_DMA_BIDIRECTIONAL	0
   74 #define PCI_DMA_TODEVICE	1
   75 #define PCI_DMA_FROMDEVICE	2
   76 #define PCI_DMA_NONE		3
   77 
   78 /*
   79  *  For PCI devices, the region numbers are assigned this way:
   80  */
   81 enum {
   82 	/* #0-5: standard PCI resources */
   83 	PCI_STD_RESOURCES,
   84 	PCI_STD_RESOURCE_END = 5,
   85 
   86 	/* #6: expansion ROM resource */
   87 	PCI_ROM_RESOURCE,
   88 
   89 	/* device specific resources */
   90 #ifdef CONFIG_PCI_IOV
   91 	PCI_IOV_RESOURCES,
   92 	PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
   93 #endif
   94 
   95 	/* resources assigned to buses behind the bridge */
   96 #define PCI_BRIDGE_RESOURCE_NUM 4
   97 
   98 	PCI_BRIDGE_RESOURCES,
   99 	PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
  100 				  PCI_BRIDGE_RESOURCE_NUM - 1,
  101 
  102 	/* total resources associated with a PCI device */
  103 	PCI_NUM_RESOURCES,
  104 
  105 	/* preserve this for compatibility */
  106 	DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
  107 };
  108 
  109 typedef int __bitwise pci_power_t;
  110 
  111 #define PCI_D0		((pci_power_t __force) 0)
  112 #define PCI_D1		((pci_power_t __force) 1)
  113 #define PCI_D2		((pci_power_t __force) 2)
  114 #define PCI_D3hot	((pci_power_t __force) 3)
  115 #define PCI_D3cold	((pci_power_t __force) 4)
  116 #define PCI_UNKNOWN	((pci_power_t __force) 5)
  117 #define PCI_POWER_ERROR	((pci_power_t __force) -1)
  118 
  119 /* Remember to update this when the list above changes! */
  120 extern const char *pci_power_names[];
  121 
  122 static inline const char *pci_power_name(pci_power_t state)
  123 {
  124 	return pci_power_names[1 + (int) state];
  125 }
  126 
  127 #define PCI_PM_D2_DELAY		200
  128 #define PCI_PM_D3_WAIT		10
  129 #define PCI_PM_D3COLD_WAIT	100
  130 #define PCI_PM_BUS_WAIT		50
  131 
  132 /** The pci_channel state describes connectivity between the CPU and
  133  *  the pci device.  If some PCI bus between here and the pci device
  134  *  has crashed or locked up, this info is reflected here.
  135  */
  136 typedef unsigned int __bitwise pci_channel_state_t;
  137 
  138 enum pci_channel_state {
  139 	/* I/O channel is in normal state */
  140 	pci_channel_io_normal = (__force pci_channel_state_t) 1,
  141 
  142 	/* I/O to channel is blocked */
  143 	pci_channel_io_frozen = (__force pci_channel_state_t) 2,
  144 
  145 	/* PCI card is dead */
  146 	pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
  147 };
  148 
  149 typedef unsigned int __bitwise pcie_reset_state_t;
  150 
  151 enum pcie_reset_state {
  152 	/* Reset is NOT asserted (Use to deassert reset) */
  153 	pcie_deassert_reset = (__force pcie_reset_state_t) 1,
  154 
  155 	/* Use #PERST to reset PCIe device */
  156 	pcie_warm_reset = (__force pcie_reset_state_t) 2,
  157 
  158 	/* Use PCIe Hot Reset to reset device */
  159 	pcie_hot_reset = (__force pcie_reset_state_t) 3
  160 };
  161 
  162 typedef unsigned short __bitwise pci_dev_flags_t;
  163 enum pci_dev_flags {
  164 	/* INTX_DISABLE in PCI_COMMAND register disables MSI
  165 	 * generation too.
  166 	 */
  167 	PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
  168 	/* Device configuration is irrevocably lost if disabled into D3 */
  169 	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
  170 	/* Provide indication device is assigned by a Virtual Machine Manager */
  171 	PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
  172 	/* Flag for quirk use to store if quirk-specific ACS is enabled */
  173 	PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
  174 	/* Flag to indicate the device uses dma_alias_devfn */
  175 	PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4),
  176 	/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
  177 	PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
  178 };
  179 
  180 enum pci_irq_reroute_variant {
  181 	INTEL_IRQ_REROUTE_VARIANT = 1,
  182 	MAX_IRQ_REROUTE_VARIANTS = 3
  183 };
  184 
  185 typedef unsigned short __bitwise pci_bus_flags_t;
  186 enum pci_bus_flags {
  187 	PCI_BUS_FLAGS_NO_MSI   = (__force pci_bus_flags_t) 1,
  188 	PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
  189 };
  190 
  191 /* These values come from the PCI Express Spec */
  192 enum pcie_link_width {
  193 	PCIE_LNK_WIDTH_RESRV	= 0x00,
  194 	PCIE_LNK_X1		= 0x01,
  195 	PCIE_LNK_X2		= 0x02,
  196 	PCIE_LNK_X4		= 0x04,
  197 	PCIE_LNK_X8		= 0x08,
  198 	PCIE_LNK_X12		= 0x0C,
  199 	PCIE_LNK_X16		= 0x10,
  200 	PCIE_LNK_X32		= 0x20,
  201 	PCIE_LNK_WIDTH_UNKNOWN  = 0xFF,
  202 };
  203 
  204 /* Based on the PCI Hotplug Spec, but some values are made up by us */
  205 enum pci_bus_speed {
  206 	PCI_SPEED_33MHz			= 0x00,
  207 	PCI_SPEED_66MHz			= 0x01,
  208 	PCI_SPEED_66MHz_PCIX		= 0x02,
  209 	PCI_SPEED_100MHz_PCIX		= 0x03,
  210 	PCI_SPEED_133MHz_PCIX		= 0x04,
  211 	PCI_SPEED_66MHz_PCIX_ECC	= 0x05,
  212 	PCI_SPEED_100MHz_PCIX_ECC	= 0x06,
  213 	PCI_SPEED_133MHz_PCIX_ECC	= 0x07,
  214 	PCI_SPEED_66MHz_PCIX_266	= 0x09,
  215 	PCI_SPEED_100MHz_PCIX_266	= 0x0a,
  216 	PCI_SPEED_133MHz_PCIX_266	= 0x0b,
  217 	AGP_UNKNOWN			= 0x0c,
  218 	AGP_1X				= 0x0d,
  219 	AGP_2X				= 0x0e,
  220 	AGP_4X				= 0x0f,
  221 	AGP_8X				= 0x10,
  222 	PCI_SPEED_66MHz_PCIX_533	= 0x11,
  223 	PCI_SPEED_100MHz_PCIX_533	= 0x12,
  224 	PCI_SPEED_133MHz_PCIX_533	= 0x13,
  225 	PCIE_SPEED_2_5GT		= 0x14,
  226 	PCIE_SPEED_5_0GT		= 0x15,
  227 	PCIE_SPEED_8_0GT		= 0x16,
  228 	PCI_SPEED_UNKNOWN		= 0xff,
  229 };
  230 
  231 struct pci_cap_saved_data {
  232 	u16 cap_nr;
  233 	bool cap_extended;
  234 	unsigned int size;
  235 	u32 data[0];
  236 };
  237 
  238 struct pci_cap_saved_state {
  239 	struct hlist_node next;
  240 	struct pci_cap_saved_data cap;
  241 };
  242 
  243 struct pcie_link_state;
  244 struct pci_vpd;
  245 struct pci_sriov;
  246 struct pci_ats;
  247 
  248 /*
  249  * The pci_dev structure is used to describe PCI devices.
  250  */
  251 struct pci_dev {
  252 	struct list_head bus_list;	/* node in per-bus list */
  253 	struct pci_bus	*bus;		/* bus this device is on */
  254 	struct pci_bus	*subordinate;	/* bus this device bridges to */
  255 
  256 	void		*sysdata;	/* hook for sys-specific extension */
  257 	struct proc_dir_entry *procent;	/* device entry in /proc/bus/pci */
  258 	struct pci_slot	*slot;		/* Physical slot this device is in */
  259 
  260 	unsigned int	devfn;		/* encoded device & function index */
  261 	unsigned short	vendor;
  262 	unsigned short	device;
  263 	unsigned short	subsystem_vendor;
  264 	unsigned short	subsystem_device;
  265 	unsigned int	class;		/* 3 bytes: (base,sub,prog-if) */
  266 	u8		revision;	/* PCI revision, low byte of class word */
  267 	u8		hdr_type;	/* PCI header type (`multi' flag masked out) */
  268 	u8		pcie_cap;	/* PCIe capability offset */
  269 	u8		msi_cap;	/* MSI capability offset */
  270 	u8		msix_cap;	/* MSI-X capability offset */
  271 	u8		pcie_mpss:3;	/* PCIe Max Payload Size Supported */
  272 	u8		rom_base_reg;	/* which config register controls the ROM */
  273 	u8		pin;		/* which interrupt pin this device uses */
  274 	u16		pcie_flags_reg;	/* cached PCIe Capabilities Register */
  275 	u8		dma_alias_devfn;/* devfn of DMA alias, if any */
  276 
  277 	struct pci_driver *driver;	/* which driver has allocated this device */
  278 	u64		dma_mask;	/* Mask of the bits of bus address this
  279 					   device implements.  Normally this is
  280 					   0xffffffff.  You only need to change
  281 					   this if your device has broken DMA
  282 					   or supports 64-bit transfers.  */
  283 
  284 	struct device_dma_parameters dma_parms;
  285 
  286 	pci_power_t     current_state;  /* Current operating state. In ACPI-speak,
  287 					   this is D0-D3, D0 being fully functional,
  288 					   and D3 being off. */
  289 	u8		pm_cap;		/* PM capability offset */
  290 	unsigned int	pme_support:5;	/* Bitmask of states from which PME#
  291 					   can be generated */
  292 	unsigned int	pme_interrupt:1;
  293 	unsigned int	pme_poll:1;	/* Poll device's PME status bit */
  294 	unsigned int	d1_support:1;	/* Low power state D1 is supported */
  295 	unsigned int	d2_support:1;	/* Low power state D2 is supported */
  296 	unsigned int	no_d1d2:1;	/* D1 and D2 are forbidden */
  297 	unsigned int	no_d3cold:1;	/* D3cold is forbidden */
  298 	unsigned int	d3cold_allowed:1;	/* D3cold is allowed by user */
  299 	unsigned int	mmio_always_on:1;	/* disallow turning off io/mem
  300 						   decoding during bar sizing */
  301 	unsigned int	wakeup_prepared:1;
  302 	unsigned int	runtime_d3cold:1;	/* whether go through runtime
  303 						   D3cold, not set for devices
  304 						   powered on/off by the
  305 						   corresponding bridge */
  306 	unsigned int	d3_delay;	/* D3->D0 transition time in ms */
  307 	unsigned int	d3cold_delay;	/* D3cold->D0 transition time in ms */
  308 
  309 #ifdef CONFIG_PCIEASPM
  310 	struct pcie_link_state	*link_state;	/* ASPM link state */
  311 #endif
  312 
  313 	pci_channel_state_t error_state;	/* current connectivity state */
  314 	struct	device	dev;		/* Generic device interface */
  315 
  316 	int		cfg_size;	/* Size of configuration space */
  317 
  318 	/*
  319 	 * Instead of touching interrupt line and base address registers
  320 	 * directly, use the values stored here. They might be different!
  321 	 */
  322 	unsigned int	irq;
  323 	struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
  324 
  325 	bool match_driver;		/* Skip attaching driver */
  326 	/* These fields are used by common fixups */
  327 	unsigned int	transparent:1;	/* Subtractive decode PCI bridge */
  328 	unsigned int	multifunction:1;/* Part of multi-function device */
  329 	/* keep track of device state */
  330 	unsigned int	is_added:1;
  331 	unsigned int	is_busmaster:1; /* device is busmaster */
  332 	unsigned int	no_msi:1;	/* device may not use msi */
  333 	unsigned int	block_cfg_access:1;	/* config space access is blocked */
  334 	unsigned int	broken_parity_status:1;	/* Device generates false positive parity */
  335 	unsigned int	irq_reroute_variant:2;	/* device needs IRQ rerouting variant */
  336 	unsigned int	msi_enabled:1;
  337 	unsigned int	msix_enabled:1;
  338 	unsigned int	ari_enabled:1;	/* ARI forwarding */
  339 	unsigned int	is_managed:1;
  340 	unsigned int    needs_freset:1; /* Dev requires fundamental reset */
  341 	unsigned int	state_saved:1;
  342 	unsigned int	is_physfn:1;
  343 	unsigned int	is_virtfn:1;
  344 	unsigned int	reset_fn:1;
  345 	unsigned int    is_hotplug_bridge:1;
  346 	unsigned int    __aer_firmware_first_valid:1;
  347 	unsigned int	__aer_firmware_first:1;
  348 	unsigned int	broken_intx_masking:1;
  349 	unsigned int	io_window_1k:1;	/* Intel P2P bridge 1K I/O windows */
  350 	pci_dev_flags_t dev_flags;
  351 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
  352 
  353 	u32		saved_config_space[16]; /* config space saved at suspend time */
  354 	struct hlist_head saved_cap_space;
  355 	struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
  356 	int rom_attr_enabled;		/* has display of the rom attribute been enabled? */
  357 	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
  358 	struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
  359 #ifdef CONFIG_PCI_MSI
  360 	struct list_head msi_list;
  361 	const struct attribute_group **msi_irq_groups;
  362 #endif
  363 	struct pci_vpd *vpd;
  364 #ifdef CONFIG_PCI_ATS
  365 	union {
  366 		struct pci_sriov *sriov;	/* SR-IOV capability related */
  367 		struct pci_dev *physfn;	/* the PF this VF is associated with */
  368 	};
  369 	struct pci_ats	*ats;	/* Address Translation Service */
  370 #endif
  371 	phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
  372 	size_t romlen; /* Length of ROM if it's not from the BAR */
  373 	char *driver_override; /* Driver name to force a match */
  374 };
  375 
  376 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
  377 {
  378 #ifdef CONFIG_PCI_IOV
  379 	if (dev->is_virtfn)
  380 		dev = dev->physfn;
  381 #endif
  382 	return dev;
  383 }
  384 
  385 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
  386 
  387 #define	to_pci_dev(n) container_of(n, struct pci_dev, dev)
  388 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
  389 
  390 static inline int pci_channel_offline(struct pci_dev *pdev)
  391 {
  392 	return (pdev->error_state != pci_channel_io_normal);
  393 }
  394 
  395 struct pci_host_bridge_window {
  396 	struct list_head list;
  397 	struct resource *res;		/* host bridge aperture (CPU address) */
  398 	resource_size_t offset;		/* bus address + offset = CPU address */
  399 };
  400 
  401 struct pci_host_bridge {
  402 	struct device dev;
  403 	struct pci_bus *bus;		/* root bus */
  404 	struct list_head windows;	/* pci_host_bridge_windows */
  405 	void (*release_fn)(struct pci_host_bridge *);
  406 	void *release_data;
  407 };
  408 
  409 #define	to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
  410 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
  411 		     void (*release_fn)(struct pci_host_bridge *),
  412 		     void *release_data);
  413 
  414 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
  415 
  416 /*
  417  * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
  418  * to P2P or CardBus bridge windows) go in a table.  Additional ones (for
  419  * buses below host bridges or subtractive decode bridges) go in the list.
  420  * Use pci_bus_for_each_resource() to iterate through all the resources.
  421  */
  422 
  423 /*
  424  * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
  425  * and there's no way to program the bridge with the details of the window.
  426  * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
  427  * decode bit set, because they are explicit and can be programmed with _SRS.
  428  */
  429 #define PCI_SUBTRACTIVE_DECODE	0x1
  430 
  431 struct pci_bus_resource {
  432 	struct list_head list;
  433 	struct resource *res;
  434 	unsigned int flags;
  435 };
  436 
  437 #define PCI_REGION_FLAG_MASK	0x0fU	/* These bits of resource flags tell us the PCI region flags */
  438 
  439 struct pci_bus {
  440 	struct list_head node;		/* node in list of buses */
  441 	struct pci_bus	*parent;	/* parent bus this bridge is on */
  442 	struct list_head children;	/* list of child buses */
  443 	struct list_head devices;	/* list of devices on this bus */
  444 	struct pci_dev	*self;		/* bridge device as seen by parent */
  445 	struct list_head slots;		/* list of slots on this bus */
  446 	struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
  447 	struct list_head resources;	/* address space routed to this bus */
  448 	struct resource busn_res;	/* bus numbers routed to this bus */
  449 
  450 	struct pci_ops	*ops;		/* configuration access functions */
  451 	struct msi_chip	*msi;		/* MSI controller */
  452 	void		*sysdata;	/* hook for sys-specific extension */
  453 	struct proc_dir_entry *procdir;	/* directory entry in /proc/bus/pci */
  454 
  455 	unsigned char	number;		/* bus number */
  456 	unsigned char	primary;	/* number of primary bridge */
  457 	unsigned char	max_bus_speed;	/* enum pci_bus_speed */
  458 	unsigned char	cur_bus_speed;	/* enum pci_bus_speed */
  459 
  460 	char		name[48];
  461 
  462 	unsigned short  bridge_ctl;	/* manage NO_ISA/FBB/et al behaviors */
  463 	pci_bus_flags_t bus_flags;	/* inherited by child buses */
  464 	struct device		*bridge;
  465 	struct device		dev;
  466 	struct bin_attribute	*legacy_io; /* legacy I/O for this bus */
  467 	struct bin_attribute	*legacy_mem; /* legacy mem */
  468 	unsigned int		is_added:1;
  469 };
  470 
  471 #define to_pci_bus(n)	container_of(n, struct pci_bus, dev)
  472 
  473 /*
  474  * Returns true if the PCI bus is root (behind host-PCI bridge),
  475  * false otherwise
  476  *
  477  * Some code assumes that "bus->self == NULL" means that bus is a root bus.
  478  * This is incorrect because "virtual" buses added for SR-IOV (via
  479  * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
  480  */
  481 static inline bool pci_is_root_bus(struct pci_bus *pbus)
  482 {
  483 	return !(pbus->parent);
  484 }
  485 
  486 /**
  487  * pci_is_bridge - check if the PCI device is a bridge
  488  * @dev: PCI device
  489  *
  490  * Return true if the PCI device is bridge whether it has subordinate
  491  * or not.
  492  */
  493 static inline bool pci_is_bridge(struct pci_dev *dev)
  494 {
  495 	return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
  496 		dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
  497 }
  498 
  499 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
  500 {
  501 	dev = pci_physfn(dev);
  502 	if (pci_is_root_bus(dev->bus))
  503 		return NULL;
  504 
  505 	return dev->bus->self;
  506 }
  507 
  508 #ifdef CONFIG_PCI_MSI
  509 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
  510 {
  511 	return pci_dev->msi_enabled || pci_dev->msix_enabled;
  512 }
  513 #else
  514 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
  515 #endif
  516 
  517 /*
  518  * Error values that may be returned by PCI functions.
  519  */
  520 #define PCIBIOS_SUCCESSFUL		0x00
  521 #define PCIBIOS_FUNC_NOT_SUPPORTED	0x81
  522 #define PCIBIOS_BAD_VENDOR_ID		0x83
  523 #define PCIBIOS_DEVICE_NOT_FOUND	0x86
  524 #define PCIBIOS_BAD_REGISTER_NUMBER	0x87
  525 #define PCIBIOS_SET_FAILED		0x88
  526 #define PCIBIOS_BUFFER_TOO_SMALL	0x89
  527 
  528 /*
  529  * Translate above to generic errno for passing back through non-PCI code.
  530  */
  531 static inline int pcibios_err_to_errno(int err)
  532 {
  533 	if (err <= PCIBIOS_SUCCESSFUL)
  534 		return err; /* Assume already errno */
  535 
  536 	switch (err) {
  537 	case PCIBIOS_FUNC_NOT_SUPPORTED:
  538 		return -ENOENT;
  539 	case PCIBIOS_BAD_VENDOR_ID:
  540 		return -ENOTTY;
  541 	case PCIBIOS_DEVICE_NOT_FOUND:
  542 		return -ENODEV;
  543 	case PCIBIOS_BAD_REGISTER_NUMBER:
  544 		return -EFAULT;
  545 	case PCIBIOS_SET_FAILED:
  546 		return -EIO;
  547 	case PCIBIOS_BUFFER_TOO_SMALL:
  548 		return -ENOSPC;
  549 	}
  550 
  551 	return -ERANGE;
  552 }
  553 
  554 /* Low-level architecture-dependent routines */
  555 
  556 struct pci_ops {
  557 	int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
  558 	int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
  559 };
  560 
  561 /*
  562  * ACPI needs to be able to access PCI config space before we've done a
  563  * PCI bus scan and created pci_bus structures.
  564  */
  565 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
  566 		 int reg, int len, u32 *val);
  567 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
  568 		  int reg, int len, u32 val);
  569 
  570 struct pci_bus_region {
  571 	dma_addr_t start;
  572 	dma_addr_t end;
  573 };
  574 
  575 struct pci_dynids {
  576 	spinlock_t lock;            /* protects list, index */
  577 	struct list_head list;      /* for IDs added at runtime */
  578 };
  579 
  580 
  581 /*
  582  * PCI Error Recovery System (PCI-ERS).  If a PCI device driver provides
  583  * a set of callbacks in struct pci_error_handlers, that device driver
  584  * will be notified of PCI bus errors, and will be driven to recovery
  585  * when an error occurs.
  586  */
  587 
  588 typedef unsigned int __bitwise pci_ers_result_t;
  589 
  590 enum pci_ers_result {
  591 	/* no result/none/not supported in device driver */
  592 	PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
  593 
  594 	/* Device driver can recover without slot reset */
  595 	PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
  596 
  597 	/* Device driver wants slot to be reset. */
  598 	PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
  599 
  600 	/* Device has completely failed, is unrecoverable */
  601 	PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
  602 
  603 	/* Device driver is fully recovered and operational */
  604 	PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
  605 
  606 	/* No AER capabilities registered for the driver */
  607 	PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
  608 };
  609 
  610 /* PCI bus error event callbacks */
  611 struct pci_error_handlers {
  612 	/* PCI bus error detected on this device */
  613 	pci_ers_result_t (*error_detected)(struct pci_dev *dev,
  614 					   enum pci_channel_state error);
  615 
  616 	/* MMIO has been re-enabled, but not DMA */
  617 	pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
  618 
  619 	/* PCI Express link has been reset */
  620 	pci_ers_result_t (*link_reset)(struct pci_dev *dev);
  621 
  622 	/* PCI slot has been reset */
  623 	pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
  624 
  625 	/* PCI function reset prepare or completed */
  626 	void (*reset_notify)(struct pci_dev *dev, bool prepare);
  627 
  628 	/* Device driver may resume normal operations */
  629 	void (*resume)(struct pci_dev *dev);
  630 };
  631 
  632 
  633 struct module;
  634 struct pci_driver {
  635 	struct list_head node;
  636 	const char *name;
  637 	const struct pci_device_id *id_table;	/* must be non-NULL for probe to be called */
  638 	int  (*probe)  (struct pci_dev *dev, const struct pci_device_id *id);	/* New device inserted */
  639 	void (*remove) (struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug capable driver) */
  640 	int  (*suspend) (struct pci_dev *dev, pm_message_t state);	/* Device suspended */
  641 	int  (*suspend_late) (struct pci_dev *dev, pm_message_t state);
  642 	int  (*resume_early) (struct pci_dev *dev);
  643 	int  (*resume) (struct pci_dev *dev);	                /* Device woken up */
  644 	void (*shutdown) (struct pci_dev *dev);
  645 	int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */
  646 	const struct pci_error_handlers *err_handler;
  647 	struct device_driver	driver;
  648 	struct pci_dynids dynids;
  649 };
  650 
  651 #define	to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
  652 
  653 /**
  654  * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
  655  * @_table: device table name
  656  *
  657  * This macro is deprecated and should not be used in new code.
  658  */
  659 #define DEFINE_PCI_DEVICE_TABLE(_table) \
  660 	const struct pci_device_id _table[]
  661 
  662 /**
  663  * PCI_DEVICE - macro used to describe a specific pci device
  664  * @vend: the 16 bit PCI Vendor ID
  665  * @dev: the 16 bit PCI Device ID
  666  *
  667  * This macro is used to create a struct pci_device_id that matches a
  668  * specific device.  The subvendor and subdevice fields will be set to
  669  * PCI_ANY_ID.
  670  */
  671 #define PCI_DEVICE(vend,dev) \
  672 	.vendor = (vend), .device = (dev), \
  673 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
  674 
  675 /**
  676  * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem
  677  * @vend: the 16 bit PCI Vendor ID
  678  * @dev: the 16 bit PCI Device ID
  679  * @subvend: the 16 bit PCI Subvendor ID
  680  * @subdev: the 16 bit PCI Subdevice ID
  681  *
  682  * This macro is used to create a struct pci_device_id that matches a
  683  * specific device with subsystem information.
  684  */
  685 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
  686 	.vendor = (vend), .device = (dev), \
  687 	.subvendor = (subvend), .subdevice = (subdev)
  688 
  689 /**
  690  * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
  691  * @dev_class: the class, subclass, prog-if triple for this device
  692  * @dev_class_mask: the class mask for this device
  693  *
  694  * This macro is used to create a struct pci_device_id that matches a
  695  * specific PCI class.  The vendor, device, subvendor, and subdevice
  696  * fields will be set to PCI_ANY_ID.
  697  */
  698 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
  699 	.class = (dev_class), .class_mask = (dev_class_mask), \
  700 	.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
  701 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
  702 
  703 /**
  704  * PCI_VDEVICE - macro used to describe a specific pci device in short form
  705  * @vend: the vendor name
  706  * @dev: the 16 bit PCI Device ID
  707  *
  708  * This macro is used to create a struct pci_device_id that matches a
  709  * specific PCI device.  The subvendor, and subdevice fields will be set
  710  * to PCI_ANY_ID. The macro allows the next field to follow as the device
  711  * private data.
  712  */
  713 
  714 #define PCI_VDEVICE(vend, dev) \
  715 	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
  716 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
  717 
  718 /* these external functions are only available when PCI support is enabled */
  719 #ifdef CONFIG_PCI
  720 
  721 void pcie_bus_configure_settings(struct pci_bus *bus);
  722 
  723 enum pcie_bus_config_types {
  724 	PCIE_BUS_TUNE_OFF,
  725 	PCIE_BUS_SAFE,
  726 	PCIE_BUS_PERFORMANCE,
  727 	PCIE_BUS_PEER2PEER,
  728 };
  729 
  730 extern enum pcie_bus_config_types pcie_bus_config;
  731 
  732 extern struct bus_type pci_bus_type;
  733 
  734 /* Do NOT directly access these two variables, unless you are arch-specific PCI
  735  * code, or PCI core code. */
  736 extern struct list_head pci_root_buses;	/* list of all known PCI buses */
  737 /* Some device drivers need know if PCI is initiated */
  738 int no_pci_devices(void);
  739 
  740 void pcibios_resource_survey_bus(struct pci_bus *bus);
  741 void pcibios_add_bus(struct pci_bus *bus);
  742 void pcibios_remove_bus(struct pci_bus *bus);
  743 void pcibios_fixup_bus(struct pci_bus *);
  744 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
  745 /* Architecture-specific versions may override this (weak) */
  746 char *pcibios_setup(char *str);
  747 
  748 /* Used only when drivers/pci/setup.c is used */
  749 resource_size_t pcibios_align_resource(void *, const struct resource *,
  750 				resource_size_t,
  751 				resource_size_t);
  752 void pcibios_update_irq(struct pci_dev *, int irq);
  753 
  754 /* Weak but can be overriden by arch */
  755 void pci_fixup_cardbus(struct pci_bus *);
  756 
  757 /* Generic PCI functions used internally */
  758 
  759 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
  760 			     struct resource *res);
  761 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
  762 			     struct pci_bus_region *region);
  763 void pcibios_scan_specific_bus(int busn);
  764 struct pci_bus *pci_find_bus(int domain, int busnr);
  765 void pci_bus_add_devices(const struct pci_bus *bus);
  766 struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
  767 				      struct pci_ops *ops, void *sysdata);
  768 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
  769 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
  770 				    struct pci_ops *ops, void *sysdata,
  771 				    struct list_head *resources);
  772 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
  773 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
  774 void pci_bus_release_busn_res(struct pci_bus *b);
  775 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
  776 					     struct pci_ops *ops, void *sysdata,
  777 					     struct list_head *resources);
  778 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
  779 				int busnr);
  780 void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
  781 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
  782 				 const char *name,
  783 				 struct hotplug_slot *hotplug);
  784 void pci_destroy_slot(struct pci_slot *slot);
  785 int pci_scan_slot(struct pci_bus *bus, int devfn);
  786 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
  787 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
  788 unsigned int pci_scan_child_bus(struct pci_bus *bus);
  789 void pci_bus_add_device(struct pci_dev *dev);
  790 void pci_read_bridge_bases(struct pci_bus *child);
  791 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
  792 					  struct resource *res);
  793 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
  794 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
  795 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
  796 struct pci_dev *pci_dev_get(struct pci_dev *dev);
  797 void pci_dev_put(struct pci_dev *dev);
  798 void pci_remove_bus(struct pci_bus *b);
  799 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
  800 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
  801 void pci_stop_root_bus(struct pci_bus *bus);
  802 void pci_remove_root_bus(struct pci_bus *bus);
  803 void pci_setup_cardbus(struct pci_bus *bus);
  804 void pci_sort_breadthfirst(void);
  805 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
  806 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
  807 #define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0))
  808 
  809 /* Generic PCI functions exported to card drivers */
  810 
  811 enum pci_lost_interrupt_reason {
  812 	PCI_LOST_IRQ_NO_INFORMATION = 0,
  813 	PCI_LOST_IRQ_DISABLE_MSI,
  814 	PCI_LOST_IRQ_DISABLE_MSIX,
  815 	PCI_LOST_IRQ_DISABLE_ACPI,
  816 };
  817 enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev);
  818 int pci_find_capability(struct pci_dev *dev, int cap);
  819 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
  820 int pci_find_ext_capability(struct pci_dev *dev, int cap);
  821 int pci_find_next_ext_capability(struct pci_dev *dev, int pos, int cap);
  822 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
  823 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
  824 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
  825 
  826 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
  827 				struct pci_dev *from);
  828 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
  829 				unsigned int ss_vendor, unsigned int ss_device,
  830 				struct pci_dev *from);
  831 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
  832 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
  833 					    unsigned int devfn);
  834 static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
  835 						   unsigned int devfn)
  836 {
  837 	return pci_get_domain_bus_and_slot(0, bus, devfn);
  838 }
  839 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
  840 int pci_dev_present(const struct pci_device_id *ids);
  841 
  842 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
  843 			     int where, u8 *val);
  844 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
  845 			     int where, u16 *val);
  846 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
  847 			      int where, u32 *val);
  848 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
  849 			      int where, u8 val);
  850 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
  851 			      int where, u16 val);
  852 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
  853 			       int where, u32 val);
  854 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
  855 
  856 static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
  857 {
  858 	return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
  859 }
  860 static inline int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
  861 {
  862 	return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
  863 }
  864 static inline int pci_read_config_dword(const struct pci_dev *dev, int where,
  865 					u32 *val)
  866 {
  867 	return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
  868 }
  869 static inline int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
  870 {
  871 	return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
  872 }
  873 static inline int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
  874 {
  875 	return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
  876 }
  877 static inline int pci_write_config_dword(const struct pci_dev *dev, int where,
  878 					 u32 val)
  879 {
  880 	return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
  881 }
  882 
  883 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
  884 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
  885 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
  886 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
  887 int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
  888 				       u16 clear, u16 set);
  889 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
  890 					u32 clear, u32 set);
  891 
  892 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
  893 					   u16 set)
  894 {
  895 	return pcie_capability_clear_and_set_word(dev, pos, 0, set);
  896 }
  897 
  898 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
  899 					    u32 set)
  900 {
  901 	return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
  902 }
  903 
  904 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
  905 					     u16 clear)
  906 {
  907 	return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
  908 }
  909 
  910 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
  911 					      u32 clear)
  912 {
  913 	return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
  914 }
  915 
  916 /* user-space driven config access */
  917 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
  918 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
  919 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
  920 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
  921 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
  922 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
  923 
  924 int __must_check pci_enable_device(struct pci_dev *dev);
  925 int __must_check pci_enable_device_io(struct pci_dev *dev);
  926 int __must_check pci_enable_device_mem(struct pci_dev *dev);
  927 int __must_check pci_reenable_device(struct pci_dev *);
  928 int __must_check pcim_enable_device(struct pci_dev *pdev);
  929 void pcim_pin_device(struct pci_dev *pdev);
  930 
  931 static inline int pci_is_enabled(struct pci_dev *pdev)
  932 {
  933 	return (atomic_read(&pdev->enable_cnt) > 0);
  934 }
  935 
  936 static inline int pci_is_managed(struct pci_dev *pdev)
  937 {
  938 	return pdev->is_managed;
  939 }
  940 
  941 void pci_disable_device(struct pci_dev *dev);
  942 
  943 extern unsigned int pcibios_max_latency;
  944 void pci_set_master(struct pci_dev *dev);
  945 void pci_clear_master(struct pci_dev *dev);
  946 
  947 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
  948 int pci_set_cacheline_size(struct pci_dev *dev);
  949 #define HAVE_PCI_SET_MWI
  950 int __must_check pci_set_mwi(struct pci_dev *dev);
  951 int pci_try_set_mwi(struct pci_dev *dev);
  952 void pci_clear_mwi(struct pci_dev *dev);
  953 void pci_intx(struct pci_dev *dev, int enable);
  954 bool pci_intx_mask_supported(struct pci_dev *dev);
  955 bool pci_check_and_mask_intx(struct pci_dev *dev);
  956 bool pci_check_and_unmask_intx(struct pci_dev *dev);
  957 void pci_msi_off(struct pci_dev *dev);
  958 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
  959 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
  960 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
  961 int pci_wait_for_pending_transaction(struct pci_dev *dev);
  962 int pcix_get_max_mmrbc(struct pci_dev *dev);
  963 int pcix_get_mmrbc(struct pci_dev *dev);
  964 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
  965 int pcie_get_readrq(struct pci_dev *dev);
  966 int pcie_set_readrq(struct pci_dev *dev, int rq);
  967 int pcie_get_mps(struct pci_dev *dev);
  968 int pcie_set_mps(struct pci_dev *dev, int mps);
  969 int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
  970 			  enum pcie_link_width *width);
  971 int __pci_reset_function(struct pci_dev *dev);
  972 int __pci_reset_function_locked(struct pci_dev *dev);
  973 int pci_reset_function(struct pci_dev *dev);
  974 int pci_try_reset_function(struct pci_dev *dev);
  975 int pci_probe_reset_slot(struct pci_slot *slot);
  976 int pci_reset_slot(struct pci_slot *slot);
  977 int pci_try_reset_slot(struct pci_slot *slot);
  978 int pci_probe_reset_bus(struct pci_bus *bus);
  979 int pci_reset_bus(struct pci_bus *bus);
  980 int pci_try_reset_bus(struct pci_bus *bus);
  981 void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
  982 void pci_update_resource(struct pci_dev *dev, int resno);
  983 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
  984 int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
  985 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
  986 bool pci_device_is_present(struct pci_dev *pdev);
  987 
  988 /* ROM control related routines */
  989 int pci_enable_rom(struct pci_dev *pdev);
  990 void pci_disable_rom(struct pci_dev *pdev);
  991 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
  992 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
  993 size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
  994 void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
  995 
  996 /* Power management related routines */
  997 int pci_save_state(struct pci_dev *dev);
  998 void pci_restore_state(struct pci_dev *dev);
  999 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
 1000 int pci_load_and_free_saved_state(struct pci_dev *dev,
 1001 				  struct pci_saved_state **state);
 1002 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
 1003 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
 1004 						   u16 cap);
 1005 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
 1006 int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
 1007 				u16 cap, unsigned int size);
 1008 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
 1009 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
 1010 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
 1011 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
 1012 void pci_pme_active(struct pci_dev *dev, bool enable);
 1013 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
 1014 		      bool runtime, bool enable);
 1015 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
 1016 int pci_prepare_to_sleep(struct pci_dev *dev);
 1017 int pci_back_from_sleep(struct pci_dev *dev);
 1018 bool pci_dev_run_wake(struct pci_dev *dev);
 1019 bool pci_check_pme_status(struct pci_dev *dev);
 1020 void pci_pme_wakeup_bus(struct pci_bus *bus);
 1021 
 1022 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
 1023 				  bool enable)
 1024 {
 1025 	return __pci_enable_wake(dev, state, false, enable);
 1026 }
 1027 
 1028 /* PCI Virtual Channel */
 1029 int pci_save_vc_state(struct pci_dev *dev);
 1030 void pci_restore_vc_state(struct pci_dev *dev);
 1031 void pci_allocate_vc_save_buffers(struct pci_dev *dev);
 1032 
 1033 /* For use by arch with custom probe code */
 1034 void set_pcie_port_type(struct pci_dev *pdev);
 1035 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
 1036 
 1037 /* Functions for PCI Hotplug drivers to use */
 1038 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
 1039 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
 1040 unsigned int pci_rescan_bus(struct pci_bus *bus);
 1041 void pci_lock_rescan_remove(void);
 1042 void pci_unlock_rescan_remove(void);
 1043 
 1044 /* Vital product data routines */
 1045 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
 1046 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
 1047 
 1048 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
 1049 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
 1050 void pci_bus_assign_resources(const struct pci_bus *bus);
 1051 void pci_bus_size_bridges(struct pci_bus *bus);
 1052 int pci_claim_resource(struct pci_dev *, int);
 1053 void pci_assign_unassigned_resources(void);
 1054 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
 1055 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
 1056 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
 1057 void pdev_enable_device(struct pci_dev *);
 1058 int pci_enable_resources(struct pci_dev *, int mask);
 1059 void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
 1060 		    int (*)(const struct pci_dev *, u8, u8));
 1061 #define HAVE_PCI_REQ_REGIONS	2
 1062 int __must_check pci_request_regions(struct pci_dev *, const char *);
 1063 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
 1064 void pci_release_regions(struct pci_dev *);
 1065 int __must_check pci_request_region(struct pci_dev *, int, const char *);
 1066 int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *);
 1067 void pci_release_region(struct pci_dev *, int);
 1068 int pci_request_selected_regions(struct pci_dev *, int, const char *);
 1069 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
 1070 void pci_release_selected_regions(struct pci_dev *, int);
 1071 
 1072 /* drivers/pci/bus.c */
 1073 struct pci_bus *pci_bus_get(struct pci_bus *bus);
 1074 void pci_bus_put(struct pci_bus *bus);
 1075 void pci_add_resource(struct list_head *resources, struct resource *res);
 1076 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
 1077 			     resource_size_t offset);
 1078 void pci_free_resource_list(struct list_head *resources);
 1079 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, unsigned int flags);
 1080 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
 1081 void pci_bus_remove_resources(struct pci_bus *bus);
 1082 
 1083 #define pci_bus_for_each_resource(bus, res, i)				\
 1084 	for (i = 0;							\
 1085 	    (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
 1086 	     i++)
 1087 
 1088 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
 1089 			struct resource *res, resource_size_t size,
 1090 			resource_size_t align, resource_size_t min,
 1091 			unsigned long type_mask,
 1092 			resource_size_t (*alignf)(void *,
 1093 						  const struct resource *,
 1094 						  resource_size_t,
 1095 						  resource_size_t),
 1096 			void *alignf_data);
 1097 
 1098 static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
 1099 {
 1100 	struct pci_bus_region region;
 1101 
 1102 	pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]);
 1103 	return region.start;
 1104 }
 1105 
 1106 /* Proper probing supporting hot-pluggable devices */
 1107 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
 1108 				       const char *mod_name);
 1109 
 1110 /*
 1111  * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded
 1112  */
 1113 #define pci_register_driver(driver)		\
 1114 	__pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
 1115 
 1116 void pci_unregister_driver(struct pci_driver *dev);
 1117 
 1118 /**
 1119  * module_pci_driver() - Helper macro for registering a PCI driver
 1120  * @__pci_driver: pci_driver struct
 1121  *
 1122  * Helper macro for PCI drivers which do not do anything special in module
 1123  * init/exit. This eliminates a lot of boilerplate. Each module may only
 1124  * use this macro once, and calling it replaces module_init() and module_exit()
 1125  */
 1126 #define module_pci_driver(__pci_driver) \
 1127 	module_driver(__pci_driver, pci_register_driver, \
 1128 		       pci_unregister_driver)
 1129 
 1130 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
 1131 int pci_add_dynid(struct pci_driver *drv,
 1132 		  unsigned int vendor, unsigned int device,
 1133 		  unsigned int subvendor, unsigned int subdevice,
 1134 		  unsigned int class, unsigned int class_mask,
 1135 		  unsigned long driver_data);
 1136 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
 1137 					 struct pci_dev *dev);
 1138 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
 1139 		    int pass);
 1140 
 1141 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
 1142 		  void *userdata);
 1143 int pci_cfg_space_size(struct pci_dev *dev);
 1144 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
 1145 void pci_setup_bridge(struct pci_bus *bus);
 1146 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
 1147 					 unsigned long type);
 1148 
 1149 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
 1150 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
 1151 
 1152 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
 1153 		      unsigned int command_bits, u32 flags);
 1154 /* kmem_cache style wrapper around pci_alloc_consistent() */
 1155 
 1156 #include <linux/pci-dma.h>
 1157 #include <linux/dmapool.h>
 1158 
 1159 #define	pci_pool dma_pool
 1160 #define pci_pool_create(name, pdev, size, align, allocation) \
 1161 		dma_pool_create(name, &pdev->dev, size, align, allocation)
 1162 #define	pci_pool_destroy(pool) dma_pool_destroy(pool)
 1163 #define	pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
 1164 #define	pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
 1165 
 1166 enum pci_dma_burst_strategy {
 1167 	PCI_DMA_BURST_INFINITY,	/* make bursts as large as possible,
 1168 				   strategy_parameter is N/A */
 1169 	PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter
 1170 				   byte boundaries */
 1171 	PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of
 1172 				   strategy_parameter byte boundaries */
 1173 };
 1174 
 1175 struct msix_entry {
 1176 	u32	vector;	/* kernel uses to write allocated vector */
 1177 	u16	entry;	/* driver uses to specify entry, OS writes */
 1178 };
 1179 
 1180 
 1181 #ifdef CONFIG_PCI_MSI
 1182 int pci_msi_vec_count(struct pci_dev *dev);
 1183 void pci_msi_shutdown(struct pci_dev *dev);
 1184 void pci_disable_msi(struct pci_dev *dev);
 1185 int pci_msix_vec_count(struct pci_dev *dev);
 1186 int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec);
 1187 void pci_msix_shutdown(struct pci_dev *dev);
 1188 void pci_disable_msix(struct pci_dev *dev);
 1189 void msi_remove_pci_irq_vectors(struct pci_dev *dev);
 1190 void pci_restore_msi_state(struct pci_dev *dev);
 1191 int pci_msi_enabled(void);
 1192 int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec);
 1193 static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
 1194 {
 1195 	int rc = pci_enable_msi_range(dev, nvec, nvec);
 1196 	if (rc < 0)
 1197 		return rc;
 1198 	return 0;
 1199 }
 1200 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
 1201 			  int minvec, int maxvec);
 1202 static inline int pci_enable_msix_exact(struct pci_dev *dev,
 1203 					struct msix_entry *entries, int nvec)
 1204 {
 1205 	int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
 1206 	if (rc < 0)
 1207 		return rc;
 1208 	return 0;
 1209 }
 1210 #else
 1211 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
 1212 static inline void pci_msi_shutdown(struct pci_dev *dev) { }
 1213 static inline void pci_disable_msi(struct pci_dev *dev) { }
 1214 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
 1215 static inline int pci_enable_msix(struct pci_dev *dev,
 1216 				  struct msix_entry *entries, int nvec)
 1217 { return -ENOSYS; }
 1218 static inline void pci_msix_shutdown(struct pci_dev *dev) { }
 1219 static inline void pci_disable_msix(struct pci_dev *dev) { }
 1220 static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) { }
 1221 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
 1222 static inline int pci_msi_enabled(void) { return 0; }
 1223 static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec,
 1224 				       int maxvec)
 1225 { return -ENOSYS; }
 1226 static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
 1227 { return -ENOSYS; }
 1228 static inline int pci_enable_msix_range(struct pci_dev *dev,
 1229 		      struct msix_entry *entries, int minvec, int maxvec)
 1230 { return -ENOSYS; }
 1231 static inline int pci_enable_msix_exact(struct pci_dev *dev,
 1232 		      struct msix_entry *entries, int nvec)
 1233 { return -ENOSYS; }
 1234 #endif
 1235 
 1236 #ifdef CONFIG_PCIEPORTBUS
 1237 extern bool pcie_ports_disabled;
 1238 extern bool pcie_ports_auto;
 1239 #else
 1240 #define pcie_ports_disabled	true
 1241 #define pcie_ports_auto		false
 1242 #endif
 1243 
 1244 #ifdef CONFIG_PCIEASPM
 1245 bool pcie_aspm_support_enabled(void);
 1246 #else
 1247 static inline bool pcie_aspm_support_enabled(void) { return false; }
 1248 #endif
 1249 
 1250 #ifdef CONFIG_PCIEAER
 1251 void pci_no_aer(void);
 1252 bool pci_aer_available(void);
 1253 #else
 1254 static inline void pci_no_aer(void) { }
 1255 static inline bool pci_aer_available(void) { return false; }
 1256 #endif
 1257 
 1258 #ifdef CONFIG_PCIE_ECRC
 1259 void pcie_set_ecrc_checking(struct pci_dev *dev);
 1260 void pcie_ecrc_get_policy(char *str);
 1261 #else
 1262 static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
 1263 static inline void pcie_ecrc_get_policy(char *str) { }
 1264 #endif
 1265 
 1266 #define pci_enable_msi(pdev)	pci_enable_msi_exact(pdev, 1)
 1267 
 1268 #ifdef CONFIG_HT_IRQ
 1269 /* The functions a driver should call */
 1270 int  ht_create_irq(struct pci_dev *dev, int idx);
 1271 void ht_destroy_irq(unsigned int irq);
 1272 #endif /* CONFIG_HT_IRQ */
 1273 
 1274 void pci_cfg_access_lock(struct pci_dev *dev);
 1275 bool pci_cfg_access_trylock(struct pci_dev *dev);
 1276 void pci_cfg_access_unlock(struct pci_dev *dev);
 1277 
 1278 /*
 1279  * PCI domain support.  Sometimes called PCI segment (eg by ACPI),
 1280  * a PCI domain is defined to be a set of PCI buses which share
 1281  * configuration space.
 1282  */
 1283 #ifdef CONFIG_PCI_DOMAINS
 1284 extern int pci_domains_supported;
 1285 #else
 1286 enum { pci_domains_supported = 0 };
 1287 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
 1288 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
 1289 #endif /* CONFIG_PCI_DOMAINS */
 1290 
 1291 /* some architectures require additional setup to direct VGA traffic */
 1292 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
 1293 		      unsigned int command_bits, u32 flags);
 1294 void pci_register_set_vga_state(arch_set_vga_state_t func);
 1295 
 1296 #else /* CONFIG_PCI is not enabled */
 1297 
 1298 /*
 1299  *  If the system does not have PCI, clearly these return errors.  Define
 1300  *  these as simple inline functions to avoid hair in drivers.
 1301  */
 1302 
 1303 #define _PCI_NOP(o, s, t) \
 1304 	static inline int pci_##o##_config_##s(struct pci_dev *dev, \
 1305 						int where, t val) \
 1306 		{ return PCIBIOS_FUNC_NOT_SUPPORTED; }
 1307 
 1308 #define _PCI_NOP_ALL(o, x)	_PCI_NOP(o, byte, u8 x) \
 1309 				_PCI_NOP(o, word, u16 x) \
 1310 				_PCI_NOP(o, dword, u32 x)
 1311 _PCI_NOP_ALL(read, *)
 1312 _PCI_NOP_ALL(write,)
 1313 
 1314 static inline struct pci_dev *pci_get_device(unsigned int vendor,
 1315 					     unsigned int device,
 1316 					     struct pci_dev *from)
 1317 { return NULL; }
 1318 
 1319 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
 1320 					     unsigned int device,
 1321 					     unsigned int ss_vendor,
 1322 					     unsigned int ss_device,
 1323 					     struct pci_dev *from)
 1324 { return NULL; }
 1325 
 1326 static inline struct pci_dev *pci_get_class(unsigned int class,
 1327 					    struct pci_dev *from)
 1328 { return NULL; }
 1329 
 1330 #define pci_dev_present(ids)	(0)
 1331 #define no_pci_devices()	(1)
 1332 #define pci_dev_put(dev)	do { } while (0)
 1333 
 1334 static inline void pci_set_master(struct pci_dev *dev) { }
 1335 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
 1336 static inline void pci_disable_device(struct pci_dev *dev) { }
 1337 static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
 1338 { return -EIO; }
 1339 static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
 1340 { return -EIO; }
 1341 static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
 1342 					unsigned int size)
 1343 { return -EIO; }
 1344 static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
 1345 					unsigned long mask)
 1346 { return -EIO; }
 1347 static inline int pci_assign_resource(struct pci_dev *dev, int i)
 1348 { return -EBUSY; }
 1349 static inline int __pci_register_driver(struct pci_driver *drv,
 1350 					struct module *owner)
 1351 { return 0; }
 1352 static inline int pci_register_driver(struct pci_driver *drv)
 1353 { return 0; }
 1354 static inline void pci_unregister_driver(struct pci_driver *drv) { }
 1355 static inline int pci_find_capability(struct pci_dev *dev, int cap)
 1356 { return 0; }
 1357 static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
 1358 					   int cap)
 1359 { return 0; }
 1360 static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
 1361 { return 0; }
 1362 
 1363 /* Power management related routines */
 1364 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
 1365 static inline void pci_restore_state(struct pci_dev *dev) { }
 1366 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 1367 { return 0; }
 1368 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
 1369 { return 0; }
 1370 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
 1371 					   pm_message_t state)
 1372 { return PCI_D0; }
 1373 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
 1374 				  int enable)
 1375 { return 0; }
 1376 
 1377 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
 1378 { return -EIO; }
 1379 static inline void pci_release_regions(struct pci_dev *dev) { }
 1380 
 1381 #define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0)
 1382 
 1383 static inline void pci_block_cfg_access(struct pci_dev *dev) { }
 1384 static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
 1385 { return 0; }
 1386 static inline void pci_unblock_cfg_access(struct pci_dev *dev) { }
 1387 
 1388 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
 1389 { return NULL; }
 1390 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
 1391 						unsigned int devfn)
 1392 { return NULL; }
 1393 static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
 1394 						unsigned int devfn)
 1395 { return NULL; }
 1396 
 1397 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
 1398 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
 1399 
 1400 #define dev_is_pci(d) (false)
 1401 #define dev_is_pf(d) (false)
 1402 #define dev_num_vf(d) (0)
 1403 #endif /* CONFIG_PCI */
 1404 
 1405 /* Include architecture-dependent settings and functions */
 1406 
 1407 #include <asm/pci.h>
 1408 
 1409 /* these helpers provide future and backwards compatibility
 1410  * for accessing popular PCI BAR info */
 1411 #define pci_resource_start(dev, bar)	((dev)->resource[(bar)].start)
 1412 #define pci_resource_end(dev, bar)	((dev)->resource[(bar)].end)
 1413 #define pci_resource_flags(dev, bar)	((dev)->resource[(bar)].flags)
 1414 #define pci_resource_len(dev,bar) \
 1415 	((pci_resource_start((dev), (bar)) == 0 &&	\
 1416 	  pci_resource_end((dev), (bar)) ==		\
 1417 	  pci_resource_start((dev), (bar))) ? 0 :	\
 1418 							\
 1419 	 (pci_resource_end((dev), (bar)) -		\
 1420 	  pci_resource_start((dev), (bar)) + 1))
 1421 
 1422 /* Similar to the helpers above, these manipulate per-pci_dev
 1423  * driver-specific data.  They are really just a wrapper around
 1424  * the generic device structure functions of these calls.
 1425  */
 1426 static inline void *pci_get_drvdata(struct pci_dev *pdev)
 1427 {
 1428 	return dev_get_drvdata(&pdev->dev);
 1429 }
 1430 
 1431 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
 1432 {
 1433 	dev_set_drvdata(&pdev->dev, data);
 1434 }
 1435 
 1436 /* If you want to know what to call your pci_dev, ask this function.
 1437  * Again, it's a wrapper around the generic device.
 1438  */
 1439 static inline const char *pci_name(const struct pci_dev *pdev)
 1440 {
 1441 	return dev_name(&pdev->dev);
 1442 }
 1443 
 1444 
 1445 /* Some archs don't want to expose struct resource to userland as-is
 1446  * in sysfs and /proc
 1447  */
 1448 #ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER
 1449 static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
 1450 		const struct resource *rsrc, resource_size_t *start,
 1451 		resource_size_t *end)
 1452 {
 1453 	*start = rsrc->start;
 1454 	*end = rsrc->end;
 1455 }
 1456 #endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */
 1457 
 1458 
 1459 /*
 1460  *  The world is not perfect and supplies us with broken PCI devices.
 1461  *  For at least a part of these bugs we need a work-around, so both
 1462  *  generic (drivers/pci/quirks.c) and per-architecture code can define
 1463  *  fixup hooks to be called for particular buggy devices.
 1464  */
 1465 
 1466 struct pci_fixup {
 1467 	u16 vendor;		/* You can use PCI_ANY_ID here of course */
 1468 	u16 device;		/* You can use PCI_ANY_ID here of course */
 1469 	u32 class;		/* You can use PCI_ANY_ID here too */
 1470 	unsigned int class_shift;	/* should be 0, 8, 16 */
 1471 	void (*hook)(struct pci_dev *dev);
 1472 };
 1473 
 1474 enum pci_fixup_pass {
 1475 	pci_fixup_early,	/* Before probing BARs */
 1476 	pci_fixup_header,	/* After reading configuration header */
 1477 	pci_fixup_final,	/* Final phase of device fixups */
 1478 	pci_fixup_enable,	/* pci_enable_device() time */
 1479 	pci_fixup_resume,	/* pci_device_resume() */
 1480 	pci_fixup_suspend,	/* pci_device_suspend */
 1481 	pci_fixup_resume_early, /* pci_device_resume_early() */
 1482 };
 1483 
 1484 /* Anonymous variables would be nice... */
 1485 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class,	\
 1486 				  class_shift, hook)			\
 1487 	static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used	\
 1488 	__attribute__((__section__(#section), aligned((sizeof(void *)))))    \
 1489 		= { vendor, device, class, class_shift, hook };
 1490 
 1491 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class,		\
 1492 					 class_shift, hook)		\
 1493 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
 1494 		hook, vendor, device, class, class_shift, hook)
 1495 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class,		\
 1496 					 class_shift, hook)		\
 1497 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
 1498 		hook, vendor, device, class, class_shift, hook)
 1499 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class,		\
 1500 					 class_shift, hook)		\
 1501 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
 1502 		hook, vendor, device, class, class_shift, hook)
 1503 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class,		\
 1504 					 class_shift, hook)		\
 1505 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
 1506 		hook, vendor, device, class, class_shift, hook)
 1507 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class,		\
 1508 					 class_shift, hook)		\
 1509 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
 1510 		resume##hook, vendor, device, class,	\
 1511 		class_shift, hook)
 1512 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class,	\
 1513 					 class_shift, hook)		\
 1514 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
 1515 		resume_early##hook, vendor, device,	\
 1516 		class, class_shift, hook)
 1517 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class,		\
 1518 					 class_shift, hook)		\
 1519 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
 1520 		suspend##hook, vendor, device, class,	\
 1521 		class_shift, hook)
 1522 
 1523 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook)			\
 1524 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
 1525 		hook, vendor, device, PCI_ANY_ID, 0, hook)
 1526 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook)			\
 1527 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
 1528 		hook, vendor, device, PCI_ANY_ID, 0, hook)
 1529 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook)			\
 1530 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
 1531 		hook, vendor, device, PCI_ANY_ID, 0, hook)
 1532 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook)			\
 1533 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
 1534 		hook, vendor, device, PCI_ANY_ID, 0, hook)
 1535 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook)			\
 1536 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
 1537 		resume##hook, vendor, device,		\
 1538 		PCI_ANY_ID, 0, hook)
 1539 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook)		\
 1540 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
 1541 		resume_early##hook, vendor, device,	\
 1542 		PCI_ANY_ID, 0, hook)
 1543 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook)			\
 1544 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
 1545 		suspend##hook, vendor, device,		\
 1546 		PCI_ANY_ID, 0, hook)
 1547 
 1548 #ifdef CONFIG_PCI_QUIRKS
 1549 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
 1550 struct pci_dev *pci_get_dma_source(struct pci_dev *dev);
 1551 int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
 1552 void pci_dev_specific_enable_acs(struct pci_dev *dev);
 1553 #else
 1554 static inline void pci_fixup_device(enum pci_fixup_pass pass,
 1555 				    struct pci_dev *dev) { }
 1556 static inline struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
 1557 {
 1558 	return pci_dev_get(dev);
 1559 }
 1560 static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
 1561 					       u16 acs_flags)
 1562 {
 1563 	return -ENOTTY;
 1564 }
 1565 static inline void pci_dev_specific_enable_acs(struct pci_dev *dev) { }
 1566 #endif
 1567 
 1568 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
 1569 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
 1570 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
 1571 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
 1572 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
 1573 				   const char *name);
 1574 void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
 1575 
 1576 extern int pci_pci_problems;
 1577 #define PCIPCI_FAIL		1	/* No PCI PCI DMA */
 1578 #define PCIPCI_TRITON		2
 1579 #define PCIPCI_NATOMA		4
 1580 #define PCIPCI_VIAETBF		8
 1581 #define PCIPCI_VSFX		16
 1582 #define PCIPCI_ALIMAGIK		32	/* Need low latency setting */
 1583 #define PCIAGP_FAIL		64	/* No PCI to AGP DMA */
 1584 
 1585 extern unsigned long pci_cardbus_io_size;
 1586 extern unsigned long pci_cardbus_mem_size;
 1587 extern u8 pci_dfl_cache_line_size;
 1588 extern u8 pci_cache_line_size;
 1589 
 1590 extern unsigned long pci_hotplug_io_size;
 1591 extern unsigned long pci_hotplug_mem_size;
 1592 
 1593 /* Architecture-specific versions may override these (weak) */
 1594 void pcibios_disable_device(struct pci_dev *dev);
 1595 void pcibios_set_master(struct pci_dev *dev);
 1596 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
 1597 				 enum pcie_reset_state state);
 1598 int pcibios_add_device(struct pci_dev *dev);
 1599 void pcibios_release_device(struct pci_dev *dev);
 1600 void pcibios_penalize_isa_irq(int irq, int active);
 1601 
 1602 #ifdef CONFIG_HIBERNATE_CALLBACKS
 1603 extern struct dev_pm_ops pcibios_pm_ops;
 1604 #endif
 1605 
 1606 #ifdef CONFIG_PCI_MMCONFIG
 1607 void __init pci_mmcfg_early_init(void);
 1608 void __init pci_mmcfg_late_init(void);
 1609 #else
 1610 static inline void pci_mmcfg_early_init(void) { }
 1611 static inline void pci_mmcfg_late_init(void) { }
 1612 #endif
 1613 
 1614 int pci_ext_cfg_avail(void);
 1615 
 1616 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
 1617 
 1618 #ifdef CONFIG_PCI_IOV
 1619 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
 1620 void pci_disable_sriov(struct pci_dev *dev);
 1621 int pci_num_vf(struct pci_dev *dev);
 1622 int pci_vfs_assigned(struct pci_dev *dev);
 1623 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
 1624 int pci_sriov_get_totalvfs(struct pci_dev *dev);
 1625 #else
 1626 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
 1627 { return -ENODEV; }
 1628 static inline void pci_disable_sriov(struct pci_dev *dev) { }
 1629 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
 1630 static inline int pci_vfs_assigned(struct pci_dev *dev)
 1631 { return 0; }
 1632 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
 1633 { return 0; }
 1634 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
 1635 { return 0; }
 1636 #endif
 1637 
 1638 #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
 1639 void pci_hp_create_module_link(struct pci_slot *pci_slot);
 1640 void pci_hp_remove_module_link(struct pci_slot *pci_slot);
 1641 #endif
 1642 
 1643 /**
 1644  * pci_pcie_cap - get the saved PCIe capability offset
 1645  * @dev: PCI device
 1646  *
 1647  * PCIe capability offset is calculated at PCI device initialization
 1648  * time and saved in the data structure. This function returns saved
 1649  * PCIe capability offset. Using this instead of pci_find_capability()
 1650  * reduces unnecessary search in the PCI configuration space. If you
 1651  * need to calculate PCIe capability offset from raw device for some
 1652  * reasons, please use pci_find_capability() instead.
 1653  */
 1654 static inline int pci_pcie_cap(struct pci_dev *dev)
 1655 {
 1656 	return dev->pcie_cap;
 1657 }
 1658 
 1659 /**
 1660  * pci_is_pcie - check if the PCI device is PCI Express capable
 1661  * @dev: PCI device
 1662  *
 1663  * Returns: true if the PCI device is PCI Express capable, false otherwise.
 1664  */
 1665 static inline bool pci_is_pcie(struct pci_dev *dev)
 1666 {
 1667 	return pci_pcie_cap(dev);
 1668 }
 1669 
 1670 /**
 1671  * pcie_caps_reg - get the PCIe Capabilities Register
 1672  * @dev: PCI device
 1673  */
 1674 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
 1675 {
 1676 	return dev->pcie_flags_reg;
 1677 }
 1678 
 1679 /**
 1680  * pci_pcie_type - get the PCIe device/port type
 1681  * @dev: PCI device
 1682  */
 1683 static inline int pci_pcie_type(const struct pci_dev *dev)
 1684 {
 1685 	return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
 1686 }
 1687 
 1688 void pci_request_acs(void);
 1689 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
 1690 bool pci_acs_path_enabled(struct pci_dev *start,
 1691 			  struct pci_dev *end, u16 acs_flags);
 1692 
 1693 #define PCI_VPD_LRDT			0x80	/* Large Resource Data Type */
 1694 #define PCI_VPD_LRDT_ID(x)		(x | PCI_VPD_LRDT)
 1695 
 1696 /* Large Resource Data Type Tag Item Names */
 1697 #define PCI_VPD_LTIN_ID_STRING		0x02	/* Identifier String */
 1698 #define PCI_VPD_LTIN_RO_DATA		0x10	/* Read-Only Data */
 1699 #define PCI_VPD_LTIN_RW_DATA		0x11	/* Read-Write Data */
 1700 
 1701 #define PCI_VPD_LRDT_ID_STRING		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
 1702 #define PCI_VPD_LRDT_RO_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
 1703 #define PCI_VPD_LRDT_RW_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
 1704 
 1705 /* Small Resource Data Type Tag Item Names */
 1706 #define PCI_VPD_STIN_END		0x78	/* End */
 1707 
 1708 #define PCI_VPD_SRDT_END		PCI_VPD_STIN_END
 1709 
 1710 #define PCI_VPD_SRDT_TIN_MASK		0x78
 1711 #define PCI_VPD_SRDT_LEN_MASK		0x07
 1712 
 1713 #define PCI_VPD_LRDT_TAG_SIZE		3
 1714 #define PCI_VPD_SRDT_TAG_SIZE		1
 1715 
 1716 #define PCI_VPD_INFO_FLD_HDR_SIZE	3
 1717 
 1718 #define PCI_VPD_RO_KEYWORD_PARTNO	"PN"
 1719 #define PCI_VPD_RO_KEYWORD_MFR_ID	"MN"
 1720 #define PCI_VPD_RO_KEYWORD_VENDOR0	"V0"
 1721 #define PCI_VPD_RO_KEYWORD_CHKSUM	"RV"
 1722 
 1723 /**
 1724  * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
 1725  * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
 1726  *
 1727  * Returns the extracted Large Resource Data Type length.
 1728  */
 1729 static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
 1730 {
 1731 	return (u16)lrdt[1] + ((u16)lrdt[2] << 8);
 1732 }
 1733 
 1734 /**
 1735  * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
 1736  * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
 1737  *
 1738  * Returns the extracted Small Resource Data Type length.
 1739  */
 1740 static inline u8 pci_vpd_srdt_size(const u8 *srdt)
 1741 {
 1742 	return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
 1743 }
 1744 
 1745 /**
 1746  * pci_vpd_info_field_size - Extracts the information field length
 1747  * @lrdt: Pointer to the beginning of an information field header
 1748  *
 1749  * Returns the extracted information field length.
 1750  */
 1751 static inline u8 pci_vpd_info_field_size(const u8 *info_field)
 1752 {
 1753 	return info_field[2];
 1754 }
 1755 
 1756 /**
 1757  * pci_vpd_find_tag - Locates the Resource Data Type tag provided
 1758  * @buf: Pointer to buffered vpd data
 1759  * @off: The offset into the buffer at which to begin the search
 1760  * @len: The length of the vpd buffer
 1761  * @rdt: The Resource Data Type to search for
 1762  *
 1763  * Returns the index where the Resource Data Type was found or
 1764  * -ENOENT otherwise.
 1765  */
 1766 int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt);
 1767 
 1768 /**
 1769  * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD
 1770  * @buf: Pointer to buffered vpd data
 1771  * @off: The offset into the buffer at which to begin the search
 1772  * @len: The length of the buffer area, relative to off, in which to search
 1773  * @kw: The keyword to search for
 1774  *
 1775  * Returns the index where the information field keyword was found or
 1776  * -ENOENT otherwise.
 1777  */
 1778 int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
 1779 			      unsigned int len, const char *kw);
 1780 
 1781 /* PCI <-> OF binding helpers */
 1782 #ifdef CONFIG_OF
 1783 struct device_node;
 1784 void pci_set_of_node(struct pci_dev *dev);
 1785 void pci_release_of_node(struct pci_dev *dev);
 1786 void pci_set_bus_of_node(struct pci_bus *bus);
 1787 void pci_release_bus_of_node(struct pci_bus *bus);
 1788 
 1789 /* Arch may override this (weak) */
 1790 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
 1791 
 1792 static inline struct device_node *
 1793 pci_device_to_OF_node(const struct pci_dev *pdev)
 1794 {
 1795 	return pdev ? pdev->dev.of_node : NULL;
 1796 }
 1797 
 1798 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
 1799 {
 1800 	return bus ? bus->dev.of_node : NULL;
 1801 }
 1802 
 1803 #else /* CONFIG_OF */
 1804 static inline void pci_set_of_node(struct pci_dev *dev) { }
 1805 static inline void pci_release_of_node(struct pci_dev *dev) { }
 1806 static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
 1807 static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
 1808 #endif  /* CONFIG_OF */
 1809 
 1810 #ifdef CONFIG_EEH
 1811 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
 1812 {
 1813 	return pdev->dev.archdata.edev;
 1814 }
 1815 #endif
 1816 
 1817 int pci_for_each_dma_alias(struct pci_dev *pdev,
 1818 			   int (*fn)(struct pci_dev *pdev,
 1819 				     u16 alias, void *data), void *data);
 1820 
 1821 /**
 1822  * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device
 1823  * @pdev: the PCI device
 1824  *
 1825  * if the device is PCIE, return NULL
 1826  * if the device isn't connected to a PCIe bridge (that is its parent is a
 1827  * legacy PCI bridge and the bridge is directly connected to bus 0), return its
 1828  * parent
 1829  */
 1830 struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
 1831 
 1832 #endif /* LINUX_PCI_H */                 1 #ifndef __LINUX_SPINLOCK_H
    2 #define __LINUX_SPINLOCK_H
    3 
    4 /*
    5  * include/linux/spinlock.h - generic spinlock/rwlock declarations
    6  *
    7  * here's the role of the various spinlock/rwlock related include files:
    8  *
    9  * on SMP builds:
   10  *
   11  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
   12  *                        initializers
   13  *
   14  *  linux/spinlock_types.h:
   15  *                        defines the generic type and initializers
   16  *
   17  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
   18  *                        implementations, mostly inline assembly code
   19  *
   20  *   (also included on UP-debug builds:)
   21  *
   22  *  linux/spinlock_api_smp.h:
   23  *                        contains the prototypes for the _spin_*() APIs.
   24  *
   25  *  linux/spinlock.h:     builds the final spin_*() APIs.
   26  *
   27  * on UP builds:
   28  *
   29  *  linux/spinlock_type_up.h:
   30  *                        contains the generic, simplified UP spinlock type.
   31  *                        (which is an empty structure on non-debug builds)
   32  *
   33  *  linux/spinlock_types.h:
   34  *                        defines the generic type and initializers
   35  *
   36  *  linux/spinlock_up.h:
   37  *                        contains the arch_spin_*()/etc. version of UP
   38  *                        builds. (which are NOPs on non-debug, non-preempt
   39  *                        builds)
   40  *
   41  *   (included on UP-non-debug builds:)
   42  *
   43  *  linux/spinlock_api_up.h:
   44  *                        builds the _spin_*() APIs.
   45  *
   46  *  linux/spinlock.h:     builds the final spin_*() APIs.
   47  */
   48 
   49 #include <linux/typecheck.h>
   50 #include <linux/preempt.h>
   51 #include <linux/linkage.h>
   52 #include <linux/compiler.h>
   53 #include <linux/irqflags.h>
   54 #include <linux/thread_info.h>
   55 #include <linux/kernel.h>
   56 #include <linux/stringify.h>
   57 #include <linux/bottom_half.h>
   58 #include <asm/barrier.h>
   59 
   60 
   61 /*
   62  * Must define these before including other files, inline functions need them
   63  */
   64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
   65 
   66 #define LOCK_SECTION_START(extra)               \
   67         ".subsection 1\n\t"                     \
   68         extra                                   \
   69         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
   70         LOCK_SECTION_NAME ":\n\t"               \
   71         ".endif\n"
   72 
   73 #define LOCK_SECTION_END                        \
   74         ".previous\n\t"
   75 
   76 #define __lockfunc __attribute__((section(".spinlock.text")))
   77 
   78 /*
   79  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
   80  */
   81 #include <linux/spinlock_types.h>
   82 
   83 /*
   84  * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
   85  */
   86 #ifdef CONFIG_SMP
   87 # include <asm/spinlock.h>
   88 #else
   89 # include <linux/spinlock_up.h>
   90 #endif
   91 
   92 #ifdef CONFIG_DEBUG_SPINLOCK
   93   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
   94 				   struct lock_class_key *key);
   95 # define raw_spin_lock_init(lock)				\
   96 do {								\
   97 	static struct lock_class_key __key;			\
   98 								\
   99 	__raw_spin_lock_init((lock), #lock, &__key);		\
  100 } while (0)
  101 
  102 #else
  103 # define raw_spin_lock_init(lock)				\
  104 	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
  105 #endif
  106 
  107 #define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
  108 
  109 #ifdef CONFIG_GENERIC_LOCKBREAK
  110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
  111 #else
  112 
  113 #ifdef arch_spin_is_contended
  114 #define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
  115 #else
  116 #define raw_spin_is_contended(lock)	(((void)(lock), 0))
  117 #endif /*arch_spin_is_contended*/
  118 #endif
  119 
  120 /*
  121  * Despite its name it doesn't necessarily has to be a full barrier.
  122  * It should only guarantee that a STORE before the critical section
  123  * can not be reordered with a LOAD inside this section.
  124  * spin_lock() is the one-way barrier, this LOAD can not escape out
  125  * of the region. So the default implementation simply ensures that
  126  * a STORE can not move into the critical section, smp_wmb() should
  127  * serialize it with another STORE done by spin_lock().
  128  */
  129 #ifndef smp_mb__before_spinlock
  130 #define smp_mb__before_spinlock()	smp_wmb()
  131 #endif
  132 
  133 /*
  134  * Place this after a lock-acquisition primitive to guarantee that
  135  * an UNLOCK+LOCK pair act as a full barrier.  This guarantee applies
  136  * if the UNLOCK and LOCK are executed by the same CPU or if the
  137  * UNLOCK and LOCK operate on the same lock variable.
  138  */
  139 #ifndef smp_mb__after_unlock_lock
  140 #define smp_mb__after_unlock_lock()	do { } while (0)
  141 #endif
  142 
  143 /**
  144  * raw_spin_unlock_wait - wait until the spinlock gets unlocked
  145  * @lock: the spinlock in question.
  146  */
  147 #define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
  148 
  149 #ifdef CONFIG_DEBUG_SPINLOCK
  150  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
  151 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  152  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  153  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
  154 #else
  155 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
  156 {
  157 	__acquire(lock);
  158 	arch_spin_lock(&lock->raw_lock);
  159 }
  160 
  161 static inline void
  162 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
  163 {
  164 	__acquire(lock);
  165 	arch_spin_lock_flags(&lock->raw_lock, *flags);
  166 }
  167 
  168 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
  169 {
  170 	return arch_spin_trylock(&(lock)->raw_lock);
  171 }
  172 
  173 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
  174 {
  175 	arch_spin_unlock(&lock->raw_lock);
  176 	__release(lock);
  177 }
  178 #endif
  179 
  180 /*
  181  * Define the various spin_lock methods.  Note we define these
  182  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
  183  * various methods are defined as nops in the case they are not
  184  * required.
  185  */
  186 #define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
  187 
  188 #define raw_spin_lock(lock)	_raw_spin_lock(lock)
  189 
  190 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  191 # define raw_spin_lock_nested(lock, subclass) \
  192 	_raw_spin_lock_nested(lock, subclass)
  193 
  194 # define raw_spin_lock_nest_lock(lock, nest_lock)			\
  195 	 do {								\
  196 		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
  197 		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
  198 	 } while (0)
  199 #else
  200 # define raw_spin_lock_nested(lock, subclass)		_raw_spin_lock(lock)
  201 # define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
  202 #endif
  203 
  204 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  205 
  206 #define raw_spin_lock_irqsave(lock, flags)			\
  207 	do {						\
  208 		typecheck(unsigned long, flags);	\
  209 		flags = _raw_spin_lock_irqsave(lock);	\
  210 	} while (0)
  211 
  212 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  213 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  214 	do {								\
  215 		typecheck(unsigned long, flags);			\
  216 		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
  217 	} while (0)
  218 #else
  219 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  220 	do {								\
  221 		typecheck(unsigned long, flags);			\
  222 		flags = _raw_spin_lock_irqsave(lock);			\
  223 	} while (0)
  224 #endif
  225 
  226 #else
  227 
  228 #define raw_spin_lock_irqsave(lock, flags)		\
  229 	do {						\
  230 		typecheck(unsigned long, flags);	\
  231 		_raw_spin_lock_irqsave(lock, flags);	\
  232 	} while (0)
  233 
  234 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
  235 	raw_spin_lock_irqsave(lock, flags)
  236 
  237 #endif
  238 
  239 #define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
  240 #define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
  241 #define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
  242 #define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
  243 
  244 #define raw_spin_unlock_irqrestore(lock, flags)		\
  245 	do {							\
  246 		typecheck(unsigned long, flags);		\
  247 		_raw_spin_unlock_irqrestore(lock, flags);	\
  248 	} while (0)
  249 #define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
  250 
  251 #define raw_spin_trylock_bh(lock) \
  252 	__cond_lock(lock, _raw_spin_trylock_bh(lock))
  253 
  254 #define raw_spin_trylock_irq(lock) \
  255 ({ \
  256 	local_irq_disable(); \
  257 	raw_spin_trylock(lock) ? \
  258 	1 : ({ local_irq_enable(); 0;  }); \
  259 })
  260 
  261 #define raw_spin_trylock_irqsave(lock, flags) \
  262 ({ \
  263 	local_irq_save(flags); \
  264 	raw_spin_trylock(lock) ? \
  265 	1 : ({ local_irq_restore(flags); 0; }); \
  266 })
  267 
  268 /**
  269  * raw_spin_can_lock - would raw_spin_trylock() succeed?
  270  * @lock: the spinlock in question.
  271  */
  272 #define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))
  273 
  274 /* Include rwlock functions */
  275 #include <linux/rwlock.h>
  276 
  277 /*
  278  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  279  */
  280 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  281 # include <linux/spinlock_api_smp.h>
  282 #else
  283 # include <linux/spinlock_api_up.h>
  284 #endif
  285 
  286 /*
  287  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  288  */
  289 
  290 static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
  291 {
  292 	return &lock->rlock;
  293 }
  294 
  295 #define spin_lock_init(_lock)				\
  296 do {							\
  297 	spinlock_check(_lock);				\
  298 	raw_spin_lock_init(&(_lock)->rlock);		\
  299 } while (0)
  300 
  301 static inline void spin_lock(spinlock_t *lock)
  302 {
  303 	raw_spin_lock(&lock->rlock);
  304 }
  305 
  306 static inline void spin_lock_bh(spinlock_t *lock)
  307 {
  308 	raw_spin_lock_bh(&lock->rlock);
  309 }
  310 
  311 static inline int spin_trylock(spinlock_t *lock)
  312 {
  313 	return raw_spin_trylock(&lock->rlock);
  314 }
  315 
  316 #define spin_lock_nested(lock, subclass)			\
  317 do {								\
  318 	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
  319 } while (0)
  320 
  321 #define spin_lock_nest_lock(lock, nest_lock)				\
  322 do {									\
  323 	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
  324 } while (0)
  325 
  326 static inline void spin_lock_irq(spinlock_t *lock)
  327 {
  328 	raw_spin_lock_irq(&lock->rlock);
  329 }
  330 
  331 #define spin_lock_irqsave(lock, flags)				\
  332 do {								\
  333 	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
  334 } while (0)
  335 
  336 #define spin_lock_irqsave_nested(lock, flags, subclass)			\
  337 do {									\
  338 	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
  339 } while (0)
  340 
  341 static inline void spin_unlock(spinlock_t *lock)
  342 {
  343 	raw_spin_unlock(&lock->rlock);
  344 }
  345 
  346 static inline void spin_unlock_bh(spinlock_t *lock)
  347 {
  348 	raw_spin_unlock_bh(&lock->rlock);
  349 }
  350 
  351 static inline void spin_unlock_irq(spinlock_t *lock)
  352 {
  353 	raw_spin_unlock_irq(&lock->rlock);
  354 }
  355 
  356 static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  357 {
  358 	raw_spin_unlock_irqrestore(&lock->rlock, flags);
  359 }
  360 
  361 static inline int spin_trylock_bh(spinlock_t *lock)
  362 {
  363 	return raw_spin_trylock_bh(&lock->rlock);
  364 }
  365 
  366 static inline int spin_trylock_irq(spinlock_t *lock)
  367 {
  368 	return raw_spin_trylock_irq(&lock->rlock);
  369 }
  370 
  371 #define spin_trylock_irqsave(lock, flags)			\
  372 ({								\
  373 	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
  374 })
  375 
  376 static inline void spin_unlock_wait(spinlock_t *lock)
  377 {
  378 	raw_spin_unlock_wait(&lock->rlock);
  379 }
  380 
  381 static inline int spin_is_locked(spinlock_t *lock)
  382 {
  383 	return raw_spin_is_locked(&lock->rlock);
  384 }
  385 
  386 static inline int spin_is_contended(spinlock_t *lock)
  387 {
  388 	return raw_spin_is_contended(&lock->rlock);
  389 }
  390 
  391 static inline int spin_can_lock(spinlock_t *lock)
  392 {
  393 	return raw_spin_can_lock(&lock->rlock);
  394 }
  395 
  396 #define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
  397 
  398 /*
  399  * Pull the atomic_t declaration:
  400  * (asm-mips/atomic.h needs above definitions)
  401  */
  402 #include <linux/atomic.h>
  403 /**
  404  * atomic_dec_and_lock - lock on reaching reference count zero
  405  * @atomic: the atomic counter
  406  * @lock: the spinlock in question
  407  *
  408  * Decrements @atomic by 1.  If the result is 0, returns true and locks
  409  * @lock.  Returns false for all other cases.
  410  */
  411 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  412 #define atomic_dec_and_lock(atomic, lock) \
  413 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  414 
  415 #endif /* __LINUX_SPINLOCK_H */            | 
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
| Ядро | Модуль | Правило | Верификатор | Вердикт | Статус | Время создания | 
| linux-3.16-rc1.tar.xz | drivers/net/ethernet/3com/typhoon.ko | 39_7a | CPAchecker | Bug | Unreported | 2014-12-12 13:14:40 | 
[В начало]