Bug
        
                          [В начало]
Ошибка # 176
Показать/спрятать трассу ошибок|            Error trace     
         {    19     typedef signed char __s8;    20     typedef unsigned char __u8;    23     typedef unsigned short __u16;    25     typedef int __s32;    26     typedef unsigned int __u32;    29     typedef long long __s64;    30     typedef unsigned long long __u64;    15     typedef signed char s8;    16     typedef unsigned char u8;    18     typedef short s16;    19     typedef unsigned short u16;    21     typedef int s32;    22     typedef unsigned int u32;    24     typedef long long s64;    25     typedef unsigned long long u64;    14     typedef long __kernel_long_t;    15     typedef unsigned long __kernel_ulong_t;    27     typedef int __kernel_pid_t;    48     typedef unsigned int __kernel_uid32_t;    49     typedef unsigned int __kernel_gid32_t;    71     typedef __kernel_ulong_t __kernel_size_t;    72     typedef __kernel_long_t __kernel_ssize_t;    87     typedef long long __kernel_loff_t;    88     typedef __kernel_long_t __kernel_time_t;    89     typedef __kernel_long_t __kernel_clock_t;    90     typedef int __kernel_timer_t;    91     typedef int __kernel_clockid_t;    28     typedef __u16 __le16;    29     typedef __u16 __be16;    30     typedef __u32 __le32;    31     typedef __u32 __be32;    36     typedef __u32 __wsum;   291     struct kernel_symbol {   unsigned long value;   const char *name; } ;    34     struct module ;    12     typedef __u32 __kernel_dev_t;    15     typedef __kernel_dev_t dev_t;    18     typedef unsigned short umode_t;    21     typedef __kernel_pid_t pid_t;    26     typedef __kernel_clockid_t clockid_t;    29     typedef _Bool bool;    31     typedef __kernel_uid32_t uid_t;    32     typedef __kernel_gid32_t gid_t;    45     typedef __kernel_loff_t loff_t;    54     typedef __kernel_size_t size_t;    59     typedef __kernel_ssize_t ssize_t;    69     typedef __kernel_time_t time_t;   102     typedef __s32 int32_t;   106     typedef __u8 uint8_t;   108     typedef __u32 uint32_t;   111     typedef __u64 uint64_t;   133     typedef unsigned long sector_t;   134     typedef unsigned long blkcnt_t;   152     typedef u64 dma_addr_t;   157     typedef unsigned int gfp_t;   158     typedef unsigned int fmode_t;   161     typedef u64 phys_addr_t;   166     typedef phys_addr_t resource_size_t;   176     struct __anonstruct_atomic_t_6 {   int counter; } ;   176     typedef struct __anonstruct_atomic_t_6 atomic_t;   181     struct __anonstruct_atomic64_t_7 {   long counter; } ;   181     typedef struct __anonstruct_atomic64_t_7 atomic64_t;   182     struct list_head {   struct list_head *next;   struct list_head *prev; } ;   187     struct hlist_node ;   187     struct hlist_head {   struct hlist_node *first; } ;   191     struct hlist_node {   struct hlist_node *next;   struct hlist_node **pprev; } ;   202     struct callback_head {   struct callback_head *next;   void (*func)(struct callback_head *); } ;   115     typedef void (*ctor_fn_t)();    83     struct ctl_table ;   283     struct _ddebug {   const char *modname;   const char *function;   const char *filename;   const char *format;   unsigned int lineno;   unsigned char flags; } ;    58     struct device ;    64     struct net_device ;   474     struct file_operations ;   486     struct completion ;   487     struct pt_regs ;   546     struct bug_entry {   int bug_addr_disp;   int file_disp;   unsigned short line;   unsigned short flags; } ;   131     struct timespec ;   132     struct compat_timespec ;   133     struct pollfd ;   134     struct __anonstruct_futex_27 {   u32 *uaddr;   u32 val;   u32 flags;   u32 bitset;   u64 time;   u32 *uaddr2; } ;   134     struct __anonstruct_nanosleep_28 {   clockid_t clockid;   struct timespec *rmtp;   struct compat_timespec *compat_rmtp;   u64 expires; } ;   134     struct __anonstruct_poll_29 {   struct pollfd *ufds;   int nfds;   int has_timeout;   unsigned long tv_sec;   unsigned long tv_nsec; } ;   134     union __anonunion____missing_field_name_26 {   struct __anonstruct_futex_27 futex;   struct __anonstruct_nanosleep_28 nanosleep;   struct __anonstruct_poll_29 poll; } ;   134     struct restart_block {   long int (*fn)(struct restart_block *);   union __anonunion____missing_field_name_26 __annonCompField4; } ;    50     struct task_struct ;    39     struct page ;    26     struct mm_struct ;   288     struct pt_regs {   unsigned long r15;   unsigned long r14;   unsigned long r13;   unsigned long r12;   unsigned long bp;   unsigned long bx;   unsigned long r11;   unsigned long r10;   unsigned long r9;   unsigned long r8;   unsigned long ax;   unsigned long cx;   unsigned long dx;   unsigned long si;   unsigned long di;   unsigned long orig_ax;   unsigned long ip;   unsigned long cs;   unsigned long flags;   unsigned long sp;   unsigned long ss; } ;    66     struct __anonstruct____missing_field_name_32 {   unsigned int a;   unsigned int b; } ;    66     struct __anonstruct____missing_field_name_33 {   u16 limit0;   u16 base0;   unsigned char base1;   unsigned char type;   unsigned char s;   unsigned char dpl;   unsigned char p;   unsigned char limit;   unsigned char avl;   unsigned char l;   unsigned char d;   unsigned char g;   unsigned char base2; } ;    66     union __anonunion____missing_field_name_31 {   struct __anonstruct____missing_field_name_32 __annonCompField5;   struct __anonstruct____missing_field_name_33 __annonCompField6; } ;    66     struct desc_struct {   union __anonunion____missing_field_name_31 __annonCompField7; } ;    13     typedef unsigned long pteval_t;    14     typedef unsigned long pmdval_t;    15     typedef unsigned long pudval_t;    16     typedef unsigned long pgdval_t;    17     typedef unsigned long pgprotval_t;    19     struct __anonstruct_pte_t_34 {   pteval_t pte; } ;    19     typedef struct __anonstruct_pte_t_34 pte_t;    21     struct pgprot {   pgprotval_t pgprot; } ;   256     typedef struct pgprot pgprot_t;   258     struct __anonstruct_pgd_t_35 {   pgdval_t pgd; } ;   258     typedef struct __anonstruct_pgd_t_35 pgd_t;   276     struct __anonstruct_pud_t_36 {   pudval_t pud; } ;   276     typedef struct __anonstruct_pud_t_36 pud_t;   297     struct __anonstruct_pmd_t_37 {   pmdval_t pmd; } ;   297     typedef struct __anonstruct_pmd_t_37 pmd_t;   423     typedef struct page *pgtable_t;   434     struct file ;   445     struct seq_file ;   481     struct thread_struct ;   483     struct cpumask ;    20     struct qspinlock {   atomic_t val; } ;    33     typedef struct qspinlock arch_spinlock_t;    34     struct qrwlock {   atomic_t cnts;   arch_spinlock_t wait_lock; } ;    14     typedef struct qrwlock arch_rwlock_t;   247     struct math_emu_info {   long ___orig_eip;   struct pt_regs *regs; } ;    83     struct static_key {   atomic_t enabled; } ;    26     union __anonunion___u_42 {   int __val;   char __c[1U]; } ;    38     union __anonunion___u_44 {   int __val;   char __c[1U]; } ;    23     typedef atomic64_t atomic_long_t;   359     struct cpumask {   unsigned long bits[128U]; } ;    15     typedef struct cpumask cpumask_t;   657     typedef struct cpumask *cpumask_var_t;    22     struct tracepoint_func {   void *func;   void *data;   int prio; } ;    28     struct tracepoint {   const char *name;   struct static_key key;   int (*regfunc)();   void (*unregfunc)();   struct tracepoint_func *funcs; } ;   233     struct fregs_state {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u32 status; } ;    26     struct __anonstruct____missing_field_name_61 {   u64 rip;   u64 rdp; } ;    26     struct __anonstruct____missing_field_name_62 {   u32 fip;   u32 fcs;   u32 foo;   u32 fos; } ;    26     union __anonunion____missing_field_name_60 {   struct __anonstruct____missing_field_name_61 __annonCompField13;   struct __anonstruct____missing_field_name_62 __annonCompField14; } ;    26     union __anonunion____missing_field_name_63 {   u32 padding1[12U];   u32 sw_reserved[12U]; } ;    26     struct fxregs_state {   u16 cwd;   u16 swd;   u16 twd;   u16 fop;   union __anonunion____missing_field_name_60 __annonCompField15;   u32 mxcsr;   u32 mxcsr_mask;   u32 st_space[32U];   u32 xmm_space[64U];   u32 padding[12U];   union __anonunion____missing_field_name_63 __annonCompField16; } ;    66     struct swregs_state {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u8 ftop;   u8 changed;   u8 lookahead;   u8 no_update;   u8 rm;   u8 alimit;   struct math_emu_info *info;   u32 entry_eip; } ;   227     struct xstate_header {   u64 xfeatures;   u64 xcomp_bv;   u64 reserved[6U]; } ;   233     struct xregs_state {   struct fxregs_state i387;   struct xstate_header header;   u8 extended_state_area[0U]; } ;   254     union fpregs_state {   struct fregs_state fsave;   struct fxregs_state fxsave;   struct swregs_state soft;   struct xregs_state xsave;   u8 __padding[4096U]; } ;   271     struct fpu {   unsigned int last_cpu;   unsigned char fpstate_active;   unsigned char fpregs_active;   union fpregs_state state; } ;   181     struct seq_operations ;   415     struct perf_event ;   420     struct __anonstruct_mm_segment_t_75 {   unsigned long seg; } ;   420     typedef struct __anonstruct_mm_segment_t_75 mm_segment_t;   421     struct thread_struct {   struct desc_struct tls_array[3U];   unsigned long sp0;   unsigned long sp;   unsigned short es;   unsigned short ds;   unsigned short fsindex;   unsigned short gsindex;   u32 status;   unsigned long fsbase;   unsigned long gsbase;   struct perf_event *ptrace_bps[4U];   unsigned long debugreg6;   unsigned long ptrace_dr7;   unsigned long cr2;   unsigned long trap_nr;   unsigned long error_code;   unsigned long *io_bitmap_ptr;   unsigned long iopl;   unsigned int io_bitmap_max;   mm_segment_t addr_limit;   unsigned char sig_on_uaccess_err;   unsigned char uaccess_err;   struct fpu fpu; } ;    48     struct thread_info {   unsigned long flags; } ;    33     struct lockdep_map ;    55     struct stack_trace {   unsigned int nr_entries;   unsigned int max_entries;   unsigned long *entries;   int skip; } ;    28     struct lockdep_subclass_key {   char __one_byte; } ;    53     struct lock_class_key {   struct lockdep_subclass_key subkeys[8U]; } ;    59     struct lock_class {   struct hlist_node hash_entry;   struct list_head lock_entry;   struct lockdep_subclass_key *key;   unsigned int subclass;   unsigned int dep_gen_id;   unsigned long usage_mask;   struct stack_trace usage_traces[13U];   struct list_head locks_after;   struct list_head locks_before;   unsigned int version;   unsigned long ops;   const char *name;   int name_version;   unsigned long contention_point[4U];   unsigned long contending_point[4U]; } ;   144     struct lockdep_map {   struct lock_class_key *key;   struct lock_class *class_cache[2U];   const char *name;   int cpu;   unsigned long ip; } ;   207     struct held_lock {   u64 prev_chain_key;   unsigned long acquire_ip;   struct lockdep_map *instance;   struct lockdep_map *nest_lock;   u64 waittime_stamp;   u64 holdtime_stamp;   unsigned short class_idx;   unsigned char irq_context;   unsigned char trylock;   unsigned char read;   unsigned char check;   unsigned char hardirqs_off;   unsigned short references;   unsigned int pin_count; } ;   593     struct raw_spinlock {   arch_spinlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    32     typedef struct raw_spinlock raw_spinlock_t;    33     struct __anonstruct____missing_field_name_77 {   u8 __padding[24U];   struct lockdep_map dep_map; } ;    33     union __anonunion____missing_field_name_76 {   struct raw_spinlock rlock;   struct __anonstruct____missing_field_name_77 __annonCompField19; } ;    33     struct spinlock {   union __anonunion____missing_field_name_76 __annonCompField20; } ;    76     typedef struct spinlock spinlock_t;    23     struct __anonstruct_rwlock_t_78 {   arch_rwlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    23     typedef struct __anonstruct_rwlock_t_78 rwlock_t;   408     struct seqcount {   unsigned int sequence;   struct lockdep_map dep_map; } ;    52     typedef struct seqcount seqcount_t;   407     struct __anonstruct_seqlock_t_93 {   struct seqcount seqcount;   spinlock_t lock; } ;   407     typedef struct __anonstruct_seqlock_t_93 seqlock_t;   601     struct timespec {   __kernel_time_t tv_sec;   long tv_nsec; } ;     7     typedef __s64 time64_t;    83     struct user_namespace ;    22     struct __anonstruct_kuid_t_94 {   uid_t val; } ;    22     typedef struct __anonstruct_kuid_t_94 kuid_t;    27     struct __anonstruct_kgid_t_95 {   gid_t val; } ;    27     typedef struct __anonstruct_kgid_t_95 kgid_t;   139     struct kstat {   u32 result_mask;   umode_t mode;   unsigned int nlink;   uint32_t blksize;   u64 attributes;   u64 ino;   dev_t dev;   dev_t rdev;   kuid_t uid;   kgid_t gid;   loff_t size;   struct timespec atime;   struct timespec mtime;   struct timespec ctime;   struct timespec btime;   u64 blocks; } ;    48     struct vm_area_struct ;    39     struct __wait_queue_head {   spinlock_t lock;   struct list_head task_list; } ;    44     typedef struct __wait_queue_head wait_queue_head_t;    97     struct __anonstruct_nodemask_t_96 {   unsigned long bits[16U]; } ;    97     typedef struct __anonstruct_nodemask_t_96 nodemask_t;   247     typedef unsigned int isolate_mode_t;    13     struct optimistic_spin_queue {   atomic_t tail; } ;    39     struct ww_acquire_ctx ;    40     struct mutex {   atomic_long_t owner;   spinlock_t wait_lock;   struct optimistic_spin_queue osq;   struct list_head wait_list;   void *magic;   struct lockdep_map dep_map; } ;    72     struct mutex_waiter {   struct list_head list;   struct task_struct *task;   struct ww_acquire_ctx *ww_ctx;   void *magic; } ;   229     struct rw_semaphore ;   230     struct rw_semaphore {   atomic_long_t count;   struct list_head wait_list;   raw_spinlock_t wait_lock;   struct optimistic_spin_queue osq;   struct task_struct *owner;   struct lockdep_map dep_map; } ;    28     typedef s64 ktime_t;  1109     struct timer_list {   struct hlist_node entry;   unsigned long expires;   void (*function)(unsigned long);   unsigned long data;   u32 flags;   struct lockdep_map lockdep_map; } ;   211     struct hrtimer ;   212     enum hrtimer_restart ;   235     struct workqueue_struct ;   236     struct work_struct ;    54     struct work_struct {   atomic_long_t data;   struct list_head entry;   void (*func)(struct work_struct *);   struct lockdep_map lockdep_map; } ;   107     struct delayed_work {   struct work_struct work;   struct timer_list timer;   struct workqueue_struct *wq;   int cpu; } ;   268     struct notifier_block ;    53     struct notifier_block {   int (*notifier_call)(struct notifier_block *, unsigned long, void *);   struct notifier_block *next;   int priority; } ;    70     struct raw_notifier_head {   struct notifier_block *head; } ;   217     struct resource ;    68     struct resource {   resource_size_t start;   resource_size_t end;   const char *name;   unsigned long flags;   unsigned long desc;   struct resource *parent;   struct resource *sibling;   struct resource *child; } ;   236     struct pci_dev ;   144     struct pci_bus ;    38     struct ldt_struct ;    38     struct vdso_image ;    38     struct __anonstruct_mm_context_t_161 {   struct ldt_struct *ldt;   unsigned short ia32_compat;   struct mutex lock;   void *vdso;   const struct vdso_image *vdso_image;   atomic_t perf_rdpmc_allowed;   u16 pkey_allocation_map;   s16 execute_only_pkey;   void *bd_addr; } ;    38     typedef struct __anonstruct_mm_context_t_161 mm_context_t;    34     struct bio_vec ;  1266     struct llist_node ;    69     struct llist_node {   struct llist_node *next; } ;   551     struct rb_node {   unsigned long __rb_parent_color;   struct rb_node *rb_right;   struct rb_node *rb_left; } ;    41     struct rb_root {   struct rb_node *rb_node; } ;   835     struct nsproxy ;   836     struct ctl_table_root ;   837     struct ctl_table_header ;   838     struct ctl_dir ;    39     typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *);    61     struct ctl_table_poll {   atomic_t event;   wait_queue_head_t wait; } ;   100     struct ctl_table {   const char *procname;   void *data;   int maxlen;   umode_t mode;   struct ctl_table *child;   proc_handler *proc_handler;   struct ctl_table_poll *poll;   void *extra1;   void *extra2; } ;   121     struct ctl_node {   struct rb_node node;   struct ctl_table_header *header; } ;   126     struct __anonstruct____missing_field_name_208 {   struct ctl_table *ctl_table;   int used;   int count;   int nreg; } ;   126     union __anonunion____missing_field_name_207 {   struct __anonstruct____missing_field_name_208 __annonCompField31;   struct callback_head rcu; } ;   126     struct ctl_table_set ;   126     struct ctl_table_header {   union __anonunion____missing_field_name_207 __annonCompField32;   struct completion *unregistering;   struct ctl_table *ctl_table_arg;   struct ctl_table_root *root;   struct ctl_table_set *set;   struct ctl_dir *parent;   struct ctl_node *node;   struct list_head inodes; } ;   148     struct ctl_dir {   struct ctl_table_header header;   struct rb_root root; } ;   154     struct ctl_table_set {   int (*is_seen)(struct ctl_table_set *);   struct ctl_dir dir; } ;   159     struct ctl_table_root {   struct ctl_table_set default_set;   struct ctl_table_set * (*lookup)(struct ctl_table_root *);   void (*set_ownership)(struct ctl_table_header *, struct ctl_table *, kuid_t *, kgid_t *);   int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ;    37     struct cred ;    19     struct vmacache {   u32 seqnum;   struct vm_area_struct *vmas[4U]; } ;    41     struct task_rss_stat {   int events;   int count[4U]; } ;    49     struct mm_rss_stat {   atomic_long_t count[4U]; } ;    54     struct page_frag {   struct page *page;   __u32 offset;   __u32 size; } ;    61     struct tlbflush_unmap_batch {   struct cpumask cpumask;   bool flush_required;   bool writable; } ;    85     struct completion {   unsigned int done;   wait_queue_head_t wait; } ;   108     struct inode ;    58     struct arch_uprobe_task {   unsigned long saved_scratch_register;   unsigned int saved_trap_nr;   unsigned int saved_tf; } ;    66     enum uprobe_task_state {   UTASK_RUNNING = 0,   UTASK_SSTEP = 1,   UTASK_SSTEP_ACK = 2,   UTASK_SSTEP_TRAPPED = 3 } ;    73     struct __anonstruct____missing_field_name_215 {   struct arch_uprobe_task autask;   unsigned long vaddr; } ;    73     struct __anonstruct____missing_field_name_216 {   struct callback_head dup_xol_work;   unsigned long dup_xol_addr; } ;    73     union __anonunion____missing_field_name_214 {   struct __anonstruct____missing_field_name_215 __annonCompField35;   struct __anonstruct____missing_field_name_216 __annonCompField36; } ;    73     struct uprobe ;    73     struct return_instance ;    73     struct uprobe_task {   enum uprobe_task_state state;   union __anonunion____missing_field_name_214 __annonCompField37;   struct uprobe *active_uprobe;   unsigned long xol_vaddr;   struct return_instance *return_instances;   unsigned int depth; } ;    95     struct return_instance {   struct uprobe *uprobe;   unsigned long func;   unsigned long stack;   unsigned long orig_ret_vaddr;   bool chained;   struct return_instance *next; } ;   111     struct xol_area ;   112     struct uprobes_state {   struct xol_area *xol_area; } ;   151     struct address_space ;   152     struct mem_cgroup ;   153     union __anonunion____missing_field_name_217 {   struct address_space *mapping;   void *s_mem;   atomic_t compound_mapcount; } ;   153     union __anonunion____missing_field_name_218 {   unsigned long index;   void *freelist; } ;   153     struct __anonstruct____missing_field_name_222 {   unsigned short inuse;   unsigned short objects;   unsigned char frozen; } ;   153     union __anonunion____missing_field_name_221 {   atomic_t _mapcount;   unsigned int active;   struct __anonstruct____missing_field_name_222 __annonCompField40;   int units; } ;   153     struct __anonstruct____missing_field_name_220 {   union __anonunion____missing_field_name_221 __annonCompField41;   atomic_t _refcount; } ;   153     union __anonunion____missing_field_name_219 {   unsigned long counters;   struct __anonstruct____missing_field_name_220 __annonCompField42; } ;   153     struct dev_pagemap ;   153     struct __anonstruct____missing_field_name_224 {   struct page *next;   int pages;   int pobjects; } ;   153     struct __anonstruct____missing_field_name_225 {   unsigned long compound_head;   unsigned int compound_dtor;   unsigned int compound_order; } ;   153     struct __anonstruct____missing_field_name_226 {   unsigned long __pad;   pgtable_t pmd_huge_pte; } ;   153     union __anonunion____missing_field_name_223 {   struct list_head lru;   struct dev_pagemap *pgmap;   struct __anonstruct____missing_field_name_224 __annonCompField44;   struct callback_head callback_head;   struct __anonstruct____missing_field_name_225 __annonCompField45;   struct __anonstruct____missing_field_name_226 __annonCompField46; } ;   153     struct kmem_cache ;   153     union __anonunion____missing_field_name_227 {   unsigned long private;   spinlock_t *ptl;   struct kmem_cache *slab_cache; } ;   153     struct page {   unsigned long flags;   union __anonunion____missing_field_name_217 __annonCompField38;   union __anonunion____missing_field_name_218 __annonCompField39;   union __anonunion____missing_field_name_219 __annonCompField43;   union __anonunion____missing_field_name_223 __annonCompField47;   union __anonunion____missing_field_name_227 __annonCompField48;   struct mem_cgroup *mem_cgroup; } ;   266     struct userfaultfd_ctx ;   266     struct vm_userfaultfd_ctx {   struct userfaultfd_ctx *ctx; } ;   273     struct __anonstruct_shared_228 {   struct rb_node rb;   unsigned long rb_subtree_last; } ;   273     struct anon_vma ;   273     struct vm_operations_struct ;   273     struct mempolicy ;   273     struct vm_area_struct {   unsigned long vm_start;   unsigned long vm_end;   struct vm_area_struct *vm_next;   struct vm_area_struct *vm_prev;   struct rb_node vm_rb;   unsigned long rb_subtree_gap;   struct mm_struct *vm_mm;   pgprot_t vm_page_prot;   unsigned long vm_flags;   struct __anonstruct_shared_228 shared;   struct list_head anon_vma_chain;   struct anon_vma *anon_vma;   const struct vm_operations_struct *vm_ops;   unsigned long vm_pgoff;   struct file *vm_file;   void *vm_private_data;   struct mempolicy *vm_policy;   struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;   346     struct core_thread {   struct task_struct *task;   struct core_thread *next; } ;   351     struct core_state {   atomic_t nr_threads;   struct core_thread dumper;   struct completion startup; } ;   357     struct kioctx_table ;   358     struct linux_binfmt ;   358     struct mmu_notifier_mm ;   358     struct mm_struct {   struct vm_area_struct *mmap;   struct rb_root mm_rb;   u32 vmacache_seqnum;   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   unsigned long mmap_base;   unsigned long mmap_legacy_base;   unsigned long task_size;   unsigned long highest_vm_end;   pgd_t *pgd;   atomic_t mm_users;   atomic_t mm_count;   atomic_long_t nr_ptes;   atomic_long_t nr_pmds;   int map_count;   spinlock_t page_table_lock;   struct rw_semaphore mmap_sem;   struct list_head mmlist;   unsigned long hiwater_rss;   unsigned long hiwater_vm;   unsigned long total_vm;   unsigned long locked_vm;   unsigned long pinned_vm;   unsigned long data_vm;   unsigned long exec_vm;   unsigned long stack_vm;   unsigned long def_flags;   unsigned long start_code;   unsigned long end_code;   unsigned long start_data;   unsigned long end_data;   unsigned long start_brk;   unsigned long brk;   unsigned long start_stack;   unsigned long arg_start;   unsigned long arg_end;   unsigned long env_start;   unsigned long env_end;   unsigned long saved_auxv[46U];   struct mm_rss_stat rss_stat;   struct linux_binfmt *binfmt;   cpumask_var_t cpu_vm_mask_var;   mm_context_t context;   unsigned long flags;   struct core_state *core_state;   spinlock_t ioctx_lock;   struct kioctx_table *ioctx_table;   struct task_struct *owner;   struct user_namespace *user_ns;   struct file *exe_file;   struct mmu_notifier_mm *mmu_notifier_mm;   struct cpumask cpumask_allocation;   unsigned long numa_next_scan;   unsigned long numa_scan_offset;   int numa_scan_seq;   bool tlb_flush_pending;   struct uprobes_state uprobes_state;   atomic_long_t hugetlb_usage;   struct work_struct async_put_work; } ;   544     struct vm_fault ;   598     struct vdso_image {   void *data;   unsigned long size;   unsigned long alt;   unsigned long alt_len;   long sym_vvar_start;   long sym_vvar_page;   long sym_hpet_page;   long sym_pvclock_page;   long sym_VDSO32_NOTE_MASK;   long sym___kernel_sigreturn;   long sym___kernel_rt_sigreturn;   long sym___kernel_vsyscall;   long sym_int80_landing_pad; } ;    15     typedef __u64 Elf64_Addr;    16     typedef __u16 Elf64_Half;    18     typedef __u64 Elf64_Off;    20     typedef __u32 Elf64_Word;    21     typedef __u64 Elf64_Xword;   190     struct elf64_sym {   Elf64_Word st_name;   unsigned char st_info;   unsigned char st_other;   Elf64_Half st_shndx;   Elf64_Addr st_value;   Elf64_Xword st_size; } ;   198     typedef struct elf64_sym Elf64_Sym;   219     struct elf64_hdr {   unsigned char e_ident[16U];   Elf64_Half e_type;   Elf64_Half e_machine;   Elf64_Word e_version;   Elf64_Addr e_entry;   Elf64_Off e_phoff;   Elf64_Off e_shoff;   Elf64_Word e_flags;   Elf64_Half e_ehsize;   Elf64_Half e_phentsize;   Elf64_Half e_phnum;   Elf64_Half e_shentsize;   Elf64_Half e_shnum;   Elf64_Half e_shstrndx; } ;   235     typedef struct elf64_hdr Elf64_Ehdr;   314     struct elf64_shdr {   Elf64_Word sh_name;   Elf64_Word sh_type;   Elf64_Xword sh_flags;   Elf64_Addr sh_addr;   Elf64_Off sh_offset;   Elf64_Xword sh_size;   Elf64_Word sh_link;   Elf64_Word sh_info;   Elf64_Xword sh_addralign;   Elf64_Xword sh_entsize; } ;   326     typedef struct elf64_shdr Elf64_Shdr;    65     struct radix_tree_root ;    65     union __anonunion____missing_field_name_233 {   struct list_head private_list;   struct callback_head callback_head; } ;    65     struct radix_tree_node {   unsigned char shift;   unsigned char offset;   unsigned char count;   unsigned char exceptional;   struct radix_tree_node *parent;   struct radix_tree_root *root;   union __anonunion____missing_field_name_233 __annonCompField49;   void *slots[64U];   unsigned long tags[3U][1U]; } ;   107     struct radix_tree_root {   gfp_t gfp_mask;   struct radix_tree_node *rnode; } ;   566     struct idr {   struct radix_tree_root idr_rt;   unsigned int idr_next; } ;   176     struct ida {   struct radix_tree_root ida_rt; } ;   216     struct dentry ;   217     struct iattr ;   218     struct super_block ;   219     struct file_system_type ;   220     struct kernfs_open_node ;   221     struct kernfs_iattrs ;   245     struct kernfs_root ;   245     struct kernfs_elem_dir {   unsigned long subdirs;   struct rb_root children;   struct kernfs_root *root; } ;    86     struct kernfs_node ;    86     struct kernfs_elem_symlink {   struct kernfs_node *target_kn; } ;    90     struct kernfs_ops ;    90     struct kernfs_elem_attr {   const struct kernfs_ops *ops;   struct kernfs_open_node *open;   loff_t size;   struct kernfs_node *notify_next; } ;    97     union __anonunion____missing_field_name_242 {   struct kernfs_elem_dir dir;   struct kernfs_elem_symlink symlink;   struct kernfs_elem_attr attr; } ;    97     struct kernfs_node {   atomic_t count;   atomic_t active;   struct lockdep_map dep_map;   struct kernfs_node *parent;   const char *name;   struct rb_node rb;   const void *ns;   unsigned int hash;   union __anonunion____missing_field_name_242 __annonCompField50;   void *priv;   unsigned short flags;   umode_t mode;   unsigned int ino;   struct kernfs_iattrs *iattr; } ;   139     struct kernfs_syscall_ops {   int (*remount_fs)(struct kernfs_root *, int *, char *);   int (*show_options)(struct seq_file *, struct kernfs_root *);   int (*mkdir)(struct kernfs_node *, const char *, umode_t );   int (*rmdir)(struct kernfs_node *);   int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *);   int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ;   158     struct kernfs_root {   struct kernfs_node *kn;   unsigned int flags;   struct ida ino_ida;   struct kernfs_syscall_ops *syscall_ops;   struct list_head supers;   wait_queue_head_t deactivate_waitq; } ;   174     struct kernfs_open_file {   struct kernfs_node *kn;   struct file *file;   struct seq_file *seq_file;   void *priv;   struct mutex mutex;   struct mutex prealloc_mutex;   int event;   struct list_head list;   char *prealloc_buf;   size_t atomic_write_len;   bool mmapped;   bool released;   const struct vm_operations_struct *vm_ops; } ;   194     struct kernfs_ops {   int (*open)(struct kernfs_open_file *);   void (*release)(struct kernfs_open_file *);   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   ssize_t  (*read)(struct kernfs_open_file *, char *, size_t , loff_t );   size_t atomic_write_len;   bool prealloc;   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *);   struct lock_class_key lockdep_key; } ;   521     struct sock ;   522     struct kobject ;   523     enum kobj_ns_type {   KOBJ_NS_TYPE_NONE = 0,   KOBJ_NS_TYPE_NET = 1,   KOBJ_NS_TYPES = 2 } ;   529     struct kobj_ns_type_operations {   enum kobj_ns_type type;   bool  (*current_may_mount)();   void * (*grab_current_ns)();   const void * (*netlink_ns)(struct sock *);   const void * (*initial_ns)();   void (*drop_ns)(void *); } ;    59     struct bin_attribute ;    60     struct attribute {   const char *name;   umode_t mode;   bool ignore_lockdep;   struct lock_class_key *key;   struct lock_class_key skey; } ;    37     struct attribute_group {   const char *name;   umode_t  (*is_visible)(struct kobject *, struct attribute *, int);   umode_t  (*is_bin_visible)(struct kobject *, struct bin_attribute *, int);   struct attribute **attrs;   struct bin_attribute **bin_attrs; } ;    92     struct bin_attribute {   struct attribute attr;   size_t size;   void *private;   ssize_t  (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   ssize_t  (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;   165     struct sysfs_ops {   ssize_t  (*show)(struct kobject *, struct attribute *, char *);   ssize_t  (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;   530     struct refcount_struct {   atomic_t refs; } ;    11     typedef struct refcount_struct refcount_t;    41     struct kref {   refcount_t refcount; } ;    52     struct kset ;    52     struct kobj_type ;    52     struct kobject {   const char *name;   struct list_head entry;   struct kobject *parent;   struct kset *kset;   struct kobj_type *ktype;   struct kernfs_node *sd;   struct kref kref;   struct delayed_work release;   unsigned char state_initialized;   unsigned char state_in_sysfs;   unsigned char state_add_uevent_sent;   unsigned char state_remove_uevent_sent;   unsigned char uevent_suppress; } ;   115     struct kobj_type {   void (*release)(struct kobject *);   const struct sysfs_ops *sysfs_ops;   struct attribute **default_attrs;   const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *);   const void * (*namespace)(struct kobject *); } ;   123     struct kobj_uevent_env {   char *argv[3U];   char *envp[32U];   int envp_idx;   char buf[2048U];   int buflen; } ;   131     struct kset_uevent_ops {   const int (*filter)(struct kset *, struct kobject *);   const const char * (*name)(struct kset *, struct kobject *);   const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;   148     struct kset {   struct list_head list;   spinlock_t list_lock;   struct kobject kobj;   const struct kset_uevent_ops *uevent_ops; } ;   223     struct kernel_param ;   228     struct kernel_param_ops {   unsigned int flags;   int (*set)(const char *, const struct kernel_param *);   int (*get)(char *, const struct kernel_param *);   void (*free)(void *); } ;    62     struct kparam_string ;    62     struct kparam_array ;    62     union __anonunion____missing_field_name_245 {   void *arg;   const struct kparam_string *str;   const struct kparam_array *arr; } ;    62     struct kernel_param {   const char *name;   struct module *mod;   const struct kernel_param_ops *ops;   const u16 perm;   s8 level;   u8 flags;   union __anonunion____missing_field_name_245 __annonCompField51; } ;    83     struct kparam_string {   unsigned int maxlen;   char *string; } ;    89     struct kparam_array {   unsigned int max;   unsigned int elemsize;   unsigned int *num;   const struct kernel_param_ops *ops;   void *elem; } ;   470     struct latch_tree_node {   struct rb_node node[2U]; } ;   211     struct mod_arch_specific { } ;    38     struct exception_table_entry ;    39     struct module_param_attrs ;    39     struct module_kobject {   struct kobject kobj;   struct module *mod;   struct kobject *drivers_dir;   struct module_param_attrs *mp;   struct completion *kobj_completion; } ;    49     struct module_attribute {   struct attribute attr;   ssize_t  (*show)(struct module_attribute *, struct module_kobject *, char *);   ssize_t  (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t );   void (*setup)(struct module *, const char *);   int (*test)(struct module *);   void (*free)(struct module *); } ;   276     enum module_state {   MODULE_STATE_LIVE = 0,   MODULE_STATE_COMING = 1,   MODULE_STATE_GOING = 2,   MODULE_STATE_UNFORMED = 3 } ;   283     struct mod_tree_node {   struct module *mod;   struct latch_tree_node node; } ;   288     struct module_layout {   void *base;   unsigned int size;   unsigned int text_size;   unsigned int ro_size;   unsigned int ro_after_init_size;   struct mod_tree_node mtn; } ;   304     struct mod_kallsyms {   Elf64_Sym *symtab;   unsigned int num_symtab;   char *strtab; } ;   318     struct klp_modinfo {   Elf64_Ehdr hdr;   Elf64_Shdr *sechdrs;   char *secstrings;   unsigned int symndx; } ;   326     struct module_sect_attrs ;   326     struct module_notes_attrs ;   326     struct trace_event_call ;   326     struct trace_enum_map ;   326     struct module {   enum module_state state;   struct list_head list;   char name[56U];   struct module_kobject mkobj;   struct module_attribute *modinfo_attrs;   const char *version;   const char *srcversion;   struct kobject *holders_dir;   const struct kernel_symbol *syms;   const s32 *crcs;   unsigned int num_syms;   struct mutex param_lock;   struct kernel_param *kp;   unsigned int num_kp;   unsigned int num_gpl_syms;   const struct kernel_symbol *gpl_syms;   const s32 *gpl_crcs;   const struct kernel_symbol *unused_syms;   const s32 *unused_crcs;   unsigned int num_unused_syms;   unsigned int num_unused_gpl_syms;   const struct kernel_symbol *unused_gpl_syms;   const s32 *unused_gpl_crcs;   bool sig_ok;   bool async_probe_requested;   const struct kernel_symbol *gpl_future_syms;   const s32 *gpl_future_crcs;   unsigned int num_gpl_future_syms;   unsigned int num_exentries;   struct exception_table_entry *extable;   int (*init)();   struct module_layout core_layout;   struct module_layout init_layout;   struct mod_arch_specific arch;   unsigned long taints;   unsigned int num_bugs;   struct list_head bug_list;   struct bug_entry *bug_table;   struct mod_kallsyms *kallsyms;   struct mod_kallsyms core_kallsyms;   struct module_sect_attrs *sect_attrs;   struct module_notes_attrs *notes_attrs;   char *args;   void *percpu;   unsigned int percpu_size;   unsigned int num_tracepoints;   const struct tracepoint **tracepoints_ptrs;   unsigned int num_trace_bprintk_fmt;   const char **trace_bprintk_fmt_start;   struct trace_event_call **trace_events;   unsigned int num_trace_events;   struct trace_enum_map **trace_enums;   unsigned int num_trace_enums;   bool klp;   bool klp_alive;   struct klp_modinfo *klp_info;   struct list_head source_list;   struct list_head target_list;   void (*exit)();   atomic_t refcnt;   ctor_fn_t  (**ctors)();   unsigned int num_ctors; } ;    33     enum irqreturn {   IRQ_NONE = 0,   IRQ_HANDLED = 1,   IRQ_WAKE_THREAD = 2 } ;    16     typedef enum irqreturn irqreturn_t;    64     struct irq_domain ;   440     struct proc_dir_entry ;   133     struct exception_table_entry {   int insn;   int fixup;   int handler; } ;    61     struct timerqueue_node {   struct rb_node node;   ktime_t expires; } ;    12     struct timerqueue_head {   struct rb_root head;   struct timerqueue_node *next; } ;    50     struct hrtimer_clock_base ;    51     struct hrtimer_cpu_base ;    60     enum hrtimer_restart {   HRTIMER_NORESTART = 0,   HRTIMER_RESTART = 1 } ;    65     struct hrtimer {   struct timerqueue_node node;   ktime_t _softexpires;   enum hrtimer_restart  (*function)(struct hrtimer *);   struct hrtimer_clock_base *base;   u8 state;   u8 is_rel; } ;   113     struct hrtimer_clock_base {   struct hrtimer_cpu_base *cpu_base;   int index;   clockid_t clockid;   struct timerqueue_head active;   ktime_t  (*get_time)();   ktime_t offset; } ;   146     struct hrtimer_cpu_base {   raw_spinlock_t lock;   seqcount_t seq;   struct hrtimer *running;   unsigned int cpu;   unsigned int active_bases;   unsigned int clock_was_set_seq;   bool migration_enabled;   bool nohz_active;   unsigned char in_hrtirq;   unsigned char hres_active;   unsigned char hang_detected;   ktime_t expires_next;   struct hrtimer *next_timer;   unsigned int nr_events;   unsigned int nr_retries;   unsigned int nr_hangs;   unsigned int max_hang_time;   struct hrtimer_clock_base clock_base[4U]; } ;   506     struct tasklet_struct {   struct tasklet_struct *next;   unsigned long state;   atomic_t count;   void (*func)(unsigned long);   unsigned long data; } ;    13     typedef unsigned long kernel_ulong_t;    14     struct pci_device_id {   __u32 vendor;   __u32 device;   __u32 subvendor;   __u32 subdevice;   __u32 class;   __u32 class_mask;   kernel_ulong_t driver_data; } ;   187     struct acpi_device_id {   __u8 id[9U];   kernel_ulong_t driver_data;   __u32 cls;   __u32 cls_msk; } ;   230     struct of_device_id {   char name[32U];   char type[32U];   char compatible[128U];   const void *data; } ;   676     struct klist_node ;    37     struct klist_node {   void *n_klist;   struct list_head n_node;   struct kref n_ref; } ;    93     struct hlist_bl_node ;    93     struct hlist_bl_head {   struct hlist_bl_node *first; } ;    36     struct hlist_bl_node {   struct hlist_bl_node *next;   struct hlist_bl_node **pprev; } ;   114     struct __anonstruct____missing_field_name_311 {   spinlock_t lock;   int count; } ;   114     union __anonunion____missing_field_name_310 {   struct __anonstruct____missing_field_name_311 __annonCompField60; } ;   114     struct lockref {   union __anonunion____missing_field_name_310 __annonCompField61; } ;    77     struct path ;    78     struct vfsmount ;    79     struct __anonstruct____missing_field_name_313 {   u32 hash;   u32 len; } ;    79     union __anonunion____missing_field_name_312 {   struct __anonstruct____missing_field_name_313 __annonCompField62;   u64 hash_len; } ;    79     struct qstr {   union __anonunion____missing_field_name_312 __annonCompField63;   const unsigned char *name; } ;    66     struct dentry_operations ;    66     union __anonunion____missing_field_name_314 {   struct list_head d_lru;   wait_queue_head_t *d_wait; } ;    66     union __anonunion_d_u_315 {   struct hlist_node d_alias;   struct hlist_bl_node d_in_lookup_hash;   struct callback_head d_rcu; } ;    66     struct dentry {   unsigned int d_flags;   seqcount_t d_seq;   struct hlist_bl_node d_hash;   struct dentry *d_parent;   struct qstr d_name;   struct inode *d_inode;   unsigned char d_iname[32U];   struct lockref d_lockref;   const struct dentry_operations *d_op;   struct super_block *d_sb;   unsigned long d_time;   void *d_fsdata;   union __anonunion____missing_field_name_314 __annonCompField64;   struct list_head d_child;   struct list_head d_subdirs;   union __anonunion_d_u_315 d_u; } ;   122     struct dentry_operations {   int (*d_revalidate)(struct dentry *, unsigned int);   int (*d_weak_revalidate)(struct dentry *, unsigned int);   int (*d_hash)(const struct dentry *, struct qstr *);   int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *);   int (*d_delete)(const struct dentry *);   int (*d_init)(struct dentry *);   void (*d_release)(struct dentry *);   void (*d_prune)(struct dentry *);   void (*d_iput)(struct dentry *, struct inode *);   char * (*d_dname)(struct dentry *, char *, int);   struct vfsmount * (*d_automount)(struct path *);   int (*d_manage)(const struct path *, bool );   struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ;   593     struct path {   struct vfsmount *mnt;   struct dentry *dentry; } ;    19     struct shrink_control {   gfp_t gfp_mask;   unsigned long nr_to_scan;   int nid;   struct mem_cgroup *memcg; } ;    27     struct shrinker {   unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *);   unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *);   int seeks;   long batch;   unsigned long flags;   struct list_head list;   atomic_long_t *nr_deferred; } ;    80     struct list_lru_one {   struct list_head list;   long nr_items; } ;    32     struct list_lru_memcg {   struct list_lru_one *lru[0U]; } ;    37     struct list_lru_node {   spinlock_t lock;   struct list_lru_one lru;   struct list_lru_memcg *memcg_lrus; } ;    47     struct list_lru {   struct list_lru_node *node;   struct list_head list; } ;   189     enum pid_type {   PIDTYPE_PID = 0,   PIDTYPE_PGID = 1,   PIDTYPE_SID = 2,   PIDTYPE_MAX = 3 } ;   196     struct pid_namespace ;   196     struct upid {   int nr;   struct pid_namespace *ns;   struct hlist_node pid_chain; } ;    56     struct pid {   atomic_t count;   unsigned int level;   struct hlist_head tasks[3U];   struct callback_head rcu;   struct upid numbers[1U]; } ;    68     struct pid_link {   struct hlist_node node;   struct pid *pid; } ;    22     struct kernel_cap_struct {   __u32 cap[2U]; } ;    25     typedef struct kernel_cap_struct kernel_cap_t;    45     struct fiemap_extent {   __u64 fe_logical;   __u64 fe_physical;   __u64 fe_length;   __u64 fe_reserved64[2U];   __u32 fe_flags;   __u32 fe_reserved[3U]; } ;    38     enum migrate_mode {   MIGRATE_ASYNC = 0,   MIGRATE_SYNC_LIGHT = 1,   MIGRATE_SYNC = 2 } ;    44     struct rcuwait {   struct task_struct *task; } ;    32     enum rcu_sync_type {   RCU_SYNC = 0,   RCU_SCHED_SYNC = 1,   RCU_BH_SYNC = 2 } ;    38     struct rcu_sync {   int gp_state;   int gp_count;   wait_queue_head_t gp_wait;   int cb_state;   struct callback_head cb_head;   enum rcu_sync_type gp_type; } ;    66     struct percpu_rw_semaphore {   struct rcu_sync rss;   unsigned int *read_count;   struct rw_semaphore rw_sem;   struct rcuwait writer;   int readers_block; } ;   144     struct delayed_call {   void (*fn)(void *);   void *arg; } ;   283     struct backing_dev_info ;   284     struct bdi_writeback ;   286     struct export_operations ;   288     struct iovec ;   289     struct kiocb ;   290     struct pipe_inode_info ;   291     struct poll_table_struct ;   292     struct kstatfs ;   293     struct swap_info_struct ;   294     struct iov_iter ;   295     struct fscrypt_info ;   296     struct fscrypt_operations ;    76     struct iattr {   unsigned int ia_valid;   umode_t ia_mode;   kuid_t ia_uid;   kgid_t ia_gid;   loff_t ia_size;   struct timespec ia_atime;   struct timespec ia_mtime;   struct timespec ia_ctime;   struct file *ia_file; } ;   210     struct percpu_counter {   raw_spinlock_t lock;   s64 count;   struct list_head list;   s32 *counters; } ;   213     struct dquot ;   214     struct kqid ;    19     typedef __kernel_uid32_t projid_t;    23     struct __anonstruct_kprojid_t_317 {   projid_t val; } ;    23     typedef struct __anonstruct_kprojid_t_317 kprojid_t;   181     enum quota_type {   USRQUOTA = 0,   GRPQUOTA = 1,   PRJQUOTA = 2 } ;    66     typedef long long qsize_t;    67     union __anonunion____missing_field_name_318 {   kuid_t uid;   kgid_t gid;   kprojid_t projid; } ;    67     struct kqid {   union __anonunion____missing_field_name_318 __annonCompField65;   enum quota_type type; } ;   194     struct mem_dqblk {   qsize_t dqb_bhardlimit;   qsize_t dqb_bsoftlimit;   qsize_t dqb_curspace;   qsize_t dqb_rsvspace;   qsize_t dqb_ihardlimit;   qsize_t dqb_isoftlimit;   qsize_t dqb_curinodes;   time64_t dqb_btime;   time64_t dqb_itime; } ;   216     struct quota_format_type ;   217     struct mem_dqinfo {   struct quota_format_type *dqi_format;   int dqi_fmt_id;   struct list_head dqi_dirty_list;   unsigned long dqi_flags;   unsigned int dqi_bgrace;   unsigned int dqi_igrace;   qsize_t dqi_max_spc_limit;   qsize_t dqi_max_ino_limit;   void *dqi_priv; } ;   282     struct dquot {   struct hlist_node dq_hash;   struct list_head dq_inuse;   struct list_head dq_free;   struct list_head dq_dirty;   struct mutex dq_lock;   atomic_t dq_count;   wait_queue_head_t dq_wait_unused;   struct super_block *dq_sb;   struct kqid dq_id;   loff_t dq_off;   unsigned long dq_flags;   struct mem_dqblk dq_dqb; } ;   309     struct quota_format_ops {   int (*check_quota_file)(struct super_block *, int);   int (*read_file_info)(struct super_block *, int);   int (*write_file_info)(struct super_block *, int);   int (*free_file_info)(struct super_block *, int);   int (*read_dqblk)(struct dquot *);   int (*commit_dqblk)(struct dquot *);   int (*release_dqblk)(struct dquot *);   int (*get_next_id)(struct super_block *, struct kqid *); } ;   321     struct dquot_operations {   int (*write_dquot)(struct dquot *);   struct dquot * (*alloc_dquot)(struct super_block *, int);   void (*destroy_dquot)(struct dquot *);   int (*acquire_dquot)(struct dquot *);   int (*release_dquot)(struct dquot *);   int (*mark_dirty)(struct dquot *);   int (*write_info)(struct super_block *, int);   qsize_t * (*get_reserved_space)(struct inode *);   int (*get_projid)(struct inode *, kprojid_t *);   int (*get_next_id)(struct super_block *, struct kqid *); } ;   338     struct qc_dqblk {   int d_fieldmask;   u64 d_spc_hardlimit;   u64 d_spc_softlimit;   u64 d_ino_hardlimit;   u64 d_ino_softlimit;   u64 d_space;   u64 d_ino_count;   s64 d_ino_timer;   s64 d_spc_timer;   int d_ino_warns;   int d_spc_warns;   u64 d_rt_spc_hardlimit;   u64 d_rt_spc_softlimit;   u64 d_rt_space;   s64 d_rt_spc_timer;   int d_rt_spc_warns; } ;   361     struct qc_type_state {   unsigned int flags;   unsigned int spc_timelimit;   unsigned int ino_timelimit;   unsigned int rt_spc_timelimit;   unsigned int spc_warnlimit;   unsigned int ino_warnlimit;   unsigned int rt_spc_warnlimit;   unsigned long long ino;   blkcnt_t blocks;   blkcnt_t nextents; } ;   407     struct qc_state {   unsigned int s_incoredqs;   struct qc_type_state s_state[3U]; } ;   418     struct qc_info {   int i_fieldmask;   unsigned int i_flags;   unsigned int i_spc_timelimit;   unsigned int i_ino_timelimit;   unsigned int i_rt_spc_timelimit;   unsigned int i_spc_warnlimit;   unsigned int i_ino_warnlimit;   unsigned int i_rt_spc_warnlimit; } ;   431     struct quotactl_ops {   int (*quota_on)(struct super_block *, int, int, const struct path *);   int (*quota_off)(struct super_block *, int);   int (*quota_enable)(struct super_block *, unsigned int);   int (*quota_disable)(struct super_block *, unsigned int);   int (*quota_sync)(struct super_block *, int);   int (*set_info)(struct super_block *, int, struct qc_info *);   int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *);   int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*get_state)(struct super_block *, struct qc_state *);   int (*rm_xquota)(struct super_block *, unsigned int); } ;   447     struct quota_format_type {   int qf_fmt_id;   const struct quota_format_ops *qf_ops;   struct module *qf_owner;   struct quota_format_type *qf_next; } ;   511     struct quota_info {   unsigned int flags;   struct mutex dqio_mutex;   struct inode *files[3U];   struct mem_dqinfo info[3U];   const struct quota_format_ops *ops[3U]; } ;   540     struct writeback_control ;   541     struct kiocb {   struct file *ki_filp;   loff_t ki_pos;   void (*ki_complete)(struct kiocb *, long, long);   void *private;   int ki_flags; } ;   317     struct address_space_operations {   int (*writepage)(struct page *, struct writeback_control *);   int (*readpage)(struct file *, struct page *);   int (*writepages)(struct address_space *, struct writeback_control *);   int (*set_page_dirty)(struct page *);   int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int);   int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **);   int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *);   sector_t  (*bmap)(struct address_space *, sector_t );   void (*invalidatepage)(struct page *, unsigned int, unsigned int);   int (*releasepage)(struct page *, gfp_t );   void (*freepage)(struct page *);   ssize_t  (*direct_IO)(struct kiocb *, struct iov_iter *);   int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode );   bool  (*isolate_page)(struct page *, isolate_mode_t );   void (*putback_page)(struct page *);   int (*launder_page)(struct page *);   int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);   void (*is_dirty_writeback)(struct page *, bool *, bool *);   int (*error_remove_page)(struct address_space *, struct page *);   int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *);   void (*swap_deactivate)(struct file *); } ;   376     struct address_space {   struct inode *host;   struct radix_tree_root page_tree;   spinlock_t tree_lock;   atomic_t i_mmap_writable;   struct rb_root i_mmap;   struct rw_semaphore i_mmap_rwsem;   unsigned long nrpages;   unsigned long nrexceptional;   unsigned long writeback_index;   const struct address_space_operations *a_ops;   unsigned long flags;   spinlock_t private_lock;   gfp_t gfp_mask;   struct list_head private_list;   void *private_data; } ;   398     struct request_queue ;   399     struct hd_struct ;   399     struct gendisk ;   399     struct block_device {   dev_t bd_dev;   int bd_openers;   struct inode *bd_inode;   struct super_block *bd_super;   struct mutex bd_mutex;   void *bd_claiming;   void *bd_holder;   int bd_holders;   bool bd_write_holder;   struct list_head bd_holder_disks;   struct block_device *bd_contains;   unsigned int bd_block_size;   struct hd_struct *bd_part;   unsigned int bd_part_count;   int bd_invalidated;   struct gendisk *bd_disk;   struct request_queue *bd_queue;   struct backing_dev_info *bd_bdi;   struct list_head bd_list;   unsigned long bd_private;   int bd_fsfreeze_count;   struct mutex bd_fsfreeze_mutex; } ;   515     struct posix_acl ;   542     struct inode_operations ;   542     union __anonunion____missing_field_name_323 {   const unsigned int i_nlink;   unsigned int __i_nlink; } ;   542     union __anonunion____missing_field_name_324 {   struct hlist_head i_dentry;   struct callback_head i_rcu; } ;   542     struct file_lock_context ;   542     struct cdev ;   542     union __anonunion____missing_field_name_325 {   struct pipe_inode_info *i_pipe;   struct block_device *i_bdev;   struct cdev *i_cdev;   char *i_link;   unsigned int i_dir_seq; } ;   542     struct inode {   umode_t i_mode;   unsigned short i_opflags;   kuid_t i_uid;   kgid_t i_gid;   unsigned int i_flags;   struct posix_acl *i_acl;   struct posix_acl *i_default_acl;   const struct inode_operations *i_op;   struct super_block *i_sb;   struct address_space *i_mapping;   void *i_security;   unsigned long i_ino;   union __anonunion____missing_field_name_323 __annonCompField66;   dev_t i_rdev;   loff_t i_size;   struct timespec i_atime;   struct timespec i_mtime;   struct timespec i_ctime;   spinlock_t i_lock;   unsigned short i_bytes;   unsigned int i_blkbits;   blkcnt_t i_blocks;   unsigned long i_state;   struct rw_semaphore i_rwsem;   unsigned long dirtied_when;   unsigned long dirtied_time_when;   struct hlist_node i_hash;   struct list_head i_io_list;   struct bdi_writeback *i_wb;   int i_wb_frn_winner;   u16 i_wb_frn_avg_time;   u16 i_wb_frn_history;   struct list_head i_lru;   struct list_head i_sb_list;   struct list_head i_wb_list;   union __anonunion____missing_field_name_324 __annonCompField67;   u64 i_version;   atomic_t i_count;   atomic_t i_dio_count;   atomic_t i_writecount;   atomic_t i_readcount;   const struct file_operations *i_fop;   struct file_lock_context *i_flctx;   struct address_space i_data;   struct list_head i_devices;   union __anonunion____missing_field_name_325 __annonCompField68;   __u32 i_generation;   __u32 i_fsnotify_mask;   struct hlist_head i_fsnotify_marks;   struct fscrypt_info *i_crypt_info;   void *i_private; } ;   803     struct fown_struct {   rwlock_t lock;   struct pid *pid;   enum pid_type pid_type;   kuid_t uid;   kuid_t euid;   int signum; } ;   811     struct file_ra_state {   unsigned long start;   unsigned int size;   unsigned int async_size;   unsigned int ra_pages;   unsigned int mmap_miss;   loff_t prev_pos; } ;   834     union __anonunion_f_u_326 {   struct llist_node fu_llist;   struct callback_head fu_rcuhead; } ;   834     struct file {   union __anonunion_f_u_326 f_u;   struct path f_path;   struct inode *f_inode;   const struct file_operations *f_op;   spinlock_t f_lock;   atomic_long_t f_count;   unsigned int f_flags;   fmode_t f_mode;   struct mutex f_pos_lock;   loff_t f_pos;   struct fown_struct f_owner;   const struct cred *f_cred;   struct file_ra_state f_ra;   u64 f_version;   void *f_security;   void *private_data;   struct list_head f_ep_links;   struct list_head f_tfile_llink;   struct address_space *f_mapping; } ;   919     typedef void *fl_owner_t;   920     struct file_lock ;   921     struct file_lock_operations {   void (*fl_copy_lock)(struct file_lock *, struct file_lock *);   void (*fl_release_private)(struct file_lock *); } ;   927     struct lock_manager_operations {   int (*lm_compare_owner)(struct file_lock *, struct file_lock *);   unsigned long int (*lm_owner_key)(struct file_lock *);   fl_owner_t  (*lm_get_owner)(fl_owner_t );   void (*lm_put_owner)(fl_owner_t );   void (*lm_notify)(struct file_lock *);   int (*lm_grant)(struct file_lock *, int);   bool  (*lm_break)(struct file_lock *);   int (*lm_change)(struct file_lock *, int, struct list_head *);   void (*lm_setup)(struct file_lock *, void **); } ;   948     struct net ;   954     struct nlm_lockowner ;   955     struct nfs_lock_info {   u32 state;   struct nlm_lockowner *owner;   struct list_head list; } ;    14     struct nfs4_lock_state ;    15     struct nfs4_lock_info {   struct nfs4_lock_state *owner; } ;    19     struct fasync_struct ;    19     struct __anonstruct_afs_328 {   struct list_head link;   int state; } ;    19     union __anonunion_fl_u_327 {   struct nfs_lock_info nfs_fl;   struct nfs4_lock_info nfs4_fl;   struct __anonstruct_afs_328 afs; } ;    19     struct file_lock {   struct file_lock *fl_next;   struct list_head fl_list;   struct hlist_node fl_link;   struct list_head fl_block;   fl_owner_t fl_owner;   unsigned int fl_flags;   unsigned char fl_type;   unsigned int fl_pid;   int fl_link_cpu;   struct pid *fl_nspid;   wait_queue_head_t fl_wait;   struct file *fl_file;   loff_t fl_start;   loff_t fl_end;   struct fasync_struct *fl_fasync;   unsigned long fl_break_time;   unsigned long fl_downgrade_time;   const struct file_lock_operations *fl_ops;   const struct lock_manager_operations *fl_lmops;   union __anonunion_fl_u_327 fl_u; } ;  1007     struct file_lock_context {   spinlock_t flc_lock;   struct list_head flc_flock;   struct list_head flc_posix;   struct list_head flc_lease; } ;  1074     struct files_struct ;  1227     struct fasync_struct {   spinlock_t fa_lock;   int magic;   int fa_fd;   struct fasync_struct *fa_next;   struct file *fa_file;   struct callback_head fa_rcu; } ;  1262     struct sb_writers {   int frozen;   wait_queue_head_t wait_unfrozen;   struct percpu_rw_semaphore rw_sem[3U]; } ;  1292     struct super_operations ;  1292     struct xattr_handler ;  1292     struct mtd_info ;  1292     struct super_block {   struct list_head s_list;   dev_t s_dev;   unsigned char s_blocksize_bits;   unsigned long s_blocksize;   loff_t s_maxbytes;   struct file_system_type *s_type;   const struct super_operations *s_op;   const struct dquot_operations *dq_op;   const struct quotactl_ops *s_qcop;   const struct export_operations *s_export_op;   unsigned long s_flags;   unsigned long s_iflags;   unsigned long s_magic;   struct dentry *s_root;   struct rw_semaphore s_umount;   int s_count;   atomic_t s_active;   void *s_security;   const struct xattr_handler **s_xattr;   const struct fscrypt_operations *s_cop;   struct hlist_bl_head s_anon;   struct list_head s_mounts;   struct block_device *s_bdev;   struct backing_dev_info *s_bdi;   struct mtd_info *s_mtd;   struct hlist_node s_instances;   unsigned int s_quota_types;   struct quota_info s_dquot;   struct sb_writers s_writers;   char s_id[32U];   u8 s_uuid[16U];   void *s_fs_info;   unsigned int s_max_links;   fmode_t s_mode;   u32 s_time_gran;   struct mutex s_vfs_rename_mutex;   char *s_subtype;   char *s_options;   const struct dentry_operations *s_d_op;   int cleancache_poolid;   struct shrinker s_shrink;   atomic_long_t s_remove_count;   int s_readonly_remount;   struct workqueue_struct *s_dio_done_wq;   struct hlist_head s_pins;   struct user_namespace *s_user_ns;   struct list_lru s_dentry_lru;   struct list_lru s_inode_lru;   struct callback_head rcu;   struct work_struct destroy_work;   struct mutex s_sync_lock;   int s_stack_depth;   spinlock_t s_inode_list_lock;   struct list_head s_inodes;   spinlock_t s_inode_wblist_lock;   struct list_head s_inodes_wb; } ;  1579     struct fiemap_extent_info {   unsigned int fi_flags;   unsigned int fi_extents_mapped;   unsigned int fi_extents_max;   struct fiemap_extent *fi_extents_start; } ;  1592     struct dir_context ;  1617     struct dir_context {   int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int);   loff_t pos; } ;  1624     struct file_operations {   struct module *owner;   loff_t  (*llseek)(struct file *, loff_t , int);   ssize_t  (*read)(struct file *, char *, size_t , loff_t *);   ssize_t  (*write)(struct file *, const char *, size_t , loff_t *);   ssize_t  (*read_iter)(struct kiocb *, struct iov_iter *);   ssize_t  (*write_iter)(struct kiocb *, struct iov_iter *);   int (*iterate)(struct file *, struct dir_context *);   int (*iterate_shared)(struct file *, struct dir_context *);   unsigned int (*poll)(struct file *, struct poll_table_struct *);   long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long);   long int (*compat_ioctl)(struct file *, unsigned int, unsigned long);   int (*mmap)(struct file *, struct vm_area_struct *);   int (*open)(struct inode *, struct file *);   int (*flush)(struct file *, fl_owner_t );   int (*release)(struct inode *, struct file *);   int (*fsync)(struct file *, loff_t , loff_t , int);   int (*fasync)(int, struct file *, int);   int (*lock)(struct file *, int, struct file_lock *);   ssize_t  (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int);   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   int (*check_flags)(int);   int (*flock)(struct file *, int, struct file_lock *);   ssize_t  (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int);   ssize_t  (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int);   int (*setlease)(struct file *, long, struct file_lock **, void **);   long int (*fallocate)(struct file *, int, loff_t , loff_t );   void (*show_fdinfo)(struct seq_file *, struct file *);   ssize_t  (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int);   int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 );   ssize_t  (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ;  1692     struct inode_operations {   struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int);   const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *);   int (*permission)(struct inode *, int);   struct posix_acl * (*get_acl)(struct inode *, int);   int (*readlink)(struct dentry *, char *, int);   int (*create)(struct inode *, struct dentry *, umode_t , bool );   int (*link)(struct dentry *, struct inode *, struct dentry *);   int (*unlink)(struct inode *, struct dentry *);   int (*symlink)(struct inode *, struct dentry *, const char *);   int (*mkdir)(struct inode *, struct dentry *, umode_t );   int (*rmdir)(struct inode *, struct dentry *);   int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t );   int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int);   int (*setattr)(struct dentry *, struct iattr *);   int (*getattr)(const struct path *, struct kstat *, u32 , unsigned int);   ssize_t  (*listxattr)(struct dentry *, char *, size_t );   int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 );   int (*update_time)(struct inode *, struct timespec *, int);   int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *);   int (*tmpfile)(struct inode *, struct dentry *, umode_t );   int (*set_acl)(struct inode *, struct posix_acl *, int); } ;  1771     struct super_operations {   struct inode * (*alloc_inode)(struct super_block *);   void (*destroy_inode)(struct inode *);   void (*dirty_inode)(struct inode *, int);   int (*write_inode)(struct inode *, struct writeback_control *);   int (*drop_inode)(struct inode *);   void (*evict_inode)(struct inode *);   void (*put_super)(struct super_block *);   int (*sync_fs)(struct super_block *, int);   int (*freeze_super)(struct super_block *);   int (*freeze_fs)(struct super_block *);   int (*thaw_super)(struct super_block *);   int (*unfreeze_fs)(struct super_block *);   int (*statfs)(struct dentry *, struct kstatfs *);   int (*remount_fs)(struct super_block *, int *, char *);   void (*umount_begin)(struct super_block *);   int (*show_options)(struct seq_file *, struct dentry *);   int (*show_devname)(struct seq_file *, struct dentry *);   int (*show_path)(struct seq_file *, struct dentry *);   int (*show_stats)(struct seq_file *, struct dentry *);   ssize_t  (*quota_read)(struct super_block *, int, char *, size_t , loff_t );   ssize_t  (*quota_write)(struct super_block *, int, const char *, size_t , loff_t );   struct dquot ** (*get_dquots)(struct inode *);   int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t );   long int (*nr_cached_objects)(struct super_block *, struct shrink_control *);   long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;  2014     struct file_system_type {   const char *name;   int fs_flags;   struct dentry * (*mount)(struct file_system_type *, int, const char *, void *);   void (*kill_sb)(struct super_block *);   struct module *owner;   struct file_system_type *next;   struct hlist_head fs_supers;   struct lock_class_key s_lock_key;   struct lock_class_key s_umount_key;   struct lock_class_key s_vfs_rename_key;   struct lock_class_key s_writers_key[3U];   struct lock_class_key i_lock_key;   struct lock_class_key i_mutex_key;   struct lock_class_key i_mutex_dir_key; } ;  3219     struct assoc_array_ptr ;  3219     struct assoc_array {   struct assoc_array_ptr *root;   unsigned long nr_leaves_on_tree; } ;    31     typedef int32_t key_serial_t;    34     typedef uint32_t key_perm_t;    35     struct key ;    36     struct user_struct ;    37     struct signal_struct ;    38     struct key_type ;    42     struct keyring_index_key {   struct key_type *type;   const char *description;   size_t desc_len; } ;    91     union key_payload {   void *rcu_data0;   void *data[4U]; } ;   128     union __anonunion____missing_field_name_329 {   struct list_head graveyard_link;   struct rb_node serial_node; } ;   128     struct key_user ;   128     union __anonunion____missing_field_name_330 {   time_t expiry;   time_t revoked_at; } ;   128     struct __anonstruct____missing_field_name_332 {   struct key_type *type;   char *description; } ;   128     union __anonunion____missing_field_name_331 {   struct keyring_index_key index_key;   struct __anonstruct____missing_field_name_332 __annonCompField71; } ;   128     struct __anonstruct____missing_field_name_334 {   struct list_head name_link;   struct assoc_array keys; } ;   128     union __anonunion____missing_field_name_333 {   union key_payload payload;   struct __anonstruct____missing_field_name_334 __annonCompField73;   int reject_error; } ;   128     struct key {   atomic_t usage;   key_serial_t serial;   union __anonunion____missing_field_name_329 __annonCompField69;   struct rw_semaphore sem;   struct key_user *user;   void *security;   union __anonunion____missing_field_name_330 __annonCompField70;   time_t last_used_at;   kuid_t uid;   kgid_t gid;   key_perm_t perm;   unsigned short quotalen;   unsigned short datalen;   unsigned long flags;   union __anonunion____missing_field_name_331 __annonCompField72;   union __anonunion____missing_field_name_333 __annonCompField74;   int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ;   380     struct audit_context ;    26     struct sem_undo_list ;    26     struct sysv_sem {   struct sem_undo_list *undo_list; } ;    26     struct sysv_shm {   struct list_head shm_clist; } ;    12     enum kcov_mode {   KCOV_MODE_DISABLED = 0,   KCOV_MODE_TRACE = 1 } ;    84     struct plist_node {   int prio;   struct list_head prio_list;   struct list_head node_list; } ;    43     struct seccomp_filter ;    44     struct seccomp {   int mode;   struct seccomp_filter *filter; } ;    11     struct latency_record {   unsigned long backtrace[12U];   unsigned int count;   unsigned long time;   unsigned long max; } ;    24     struct __anonstruct_sigset_t_335 {   unsigned long sig[1U]; } ;    24     typedef struct __anonstruct_sigset_t_335 sigset_t;    25     struct siginfo ;    38     union sigval {   int sival_int;   void *sival_ptr; } ;    10     typedef union sigval sigval_t;    11     struct __anonstruct__kill_337 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid; } ;    11     struct __anonstruct__timer_338 {   __kernel_timer_t _tid;   int _overrun;   char _pad[0U];   sigval_t _sigval;   int _sys_private; } ;    11     struct __anonstruct__rt_339 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   sigval_t _sigval; } ;    11     struct __anonstruct__sigchld_340 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   int _status;   __kernel_clock_t _utime;   __kernel_clock_t _stime; } ;    11     struct __anonstruct__addr_bnd_343 {   void *_lower;   void *_upper; } ;    11     union __anonunion____missing_field_name_342 {   struct __anonstruct__addr_bnd_343 _addr_bnd;   __u32 _pkey; } ;    11     struct __anonstruct__sigfault_341 {   void *_addr;   short _addr_lsb;   union __anonunion____missing_field_name_342 __annonCompField75; } ;    11     struct __anonstruct__sigpoll_344 {   long _band;   int _fd; } ;    11     struct __anonstruct__sigsys_345 {   void *_call_addr;   int _syscall;   unsigned int _arch; } ;    11     union __anonunion__sifields_336 {   int _pad[28U];   struct __anonstruct__kill_337 _kill;   struct __anonstruct__timer_338 _timer;   struct __anonstruct__rt_339 _rt;   struct __anonstruct__sigchld_340 _sigchld;   struct __anonstruct__sigfault_341 _sigfault;   struct __anonstruct__sigpoll_344 _sigpoll;   struct __anonstruct__sigsys_345 _sigsys; } ;    11     struct siginfo {   int si_signo;   int si_errno;   int si_code;   union __anonunion__sifields_336 _sifields; } ;   118     typedef struct siginfo siginfo_t;    21     struct sigpending {   struct list_head list;   sigset_t signal; } ;    65     struct task_io_accounting {   u64 rchar;   u64 wchar;   u64 syscr;   u64 syscw;   u64 read_bytes;   u64 write_bytes;   u64 cancelled_write_bytes; } ;    45     struct bio_list ;    46     struct blk_plug ;    47     struct cfs_rq ;    48     struct fs_struct ;    49     struct futex_pi_state ;    50     struct io_context ;    51     struct nameidata ;    52     struct perf_event_context ;    54     struct reclaim_state ;    55     struct robust_list_head ;    58     struct sighand_struct ;    59     struct task_delay_info ;    60     struct task_group ;   187     struct prev_cputime {   u64 utime;   u64 stime;   raw_spinlock_t lock; } ;   203     struct task_cputime {   u64 utime;   u64 stime;   unsigned long long sum_exec_runtime; } ;   220     struct sched_info {   unsigned long pcount;   unsigned long long run_delay;   unsigned long long last_arrival;   unsigned long long last_queued; } ;   244     struct load_weight {   unsigned long weight;   u32 inv_weight; } ;   261     struct sched_avg {   u64 last_update_time;   u64 load_sum;   u32 util_sum;   u32 period_contrib;   unsigned long load_avg;   unsigned long util_avg; } ;   322     struct sched_statistics {   u64 wait_start;   u64 wait_max;   u64 wait_count;   u64 wait_sum;   u64 iowait_count;   u64 iowait_sum;   u64 sleep_start;   u64 sleep_max;   s64 sum_sleep_runtime;   u64 block_start;   u64 block_max;   u64 exec_max;   u64 slice_max;   u64 nr_migrations_cold;   u64 nr_failed_migrations_affine;   u64 nr_failed_migrations_running;   u64 nr_failed_migrations_hot;   u64 nr_forced_migrations;   u64 nr_wakeups;   u64 nr_wakeups_sync;   u64 nr_wakeups_migrate;   u64 nr_wakeups_local;   u64 nr_wakeups_remote;   u64 nr_wakeups_affine;   u64 nr_wakeups_affine_attempts;   u64 nr_wakeups_passive;   u64 nr_wakeups_idle; } ;   357     struct sched_entity {   struct load_weight load;   struct rb_node run_node;   struct list_head group_node;   unsigned int on_rq;   u64 exec_start;   u64 sum_exec_runtime;   u64 vruntime;   u64 prev_sum_exec_runtime;   u64 nr_migrations;   struct sched_statistics statistics;   int depth;   struct sched_entity *parent;   struct cfs_rq *cfs_rq;   struct cfs_rq *my_q;   struct sched_avg avg; } ;   393     struct rt_rq ;   393     struct sched_rt_entity {   struct list_head run_list;   unsigned long timeout;   unsigned long watchdog_stamp;   unsigned int time_slice;   unsigned short on_rq;   unsigned short on_list;   struct sched_rt_entity *back;   struct sched_rt_entity *parent;   struct rt_rq *rt_rq;   struct rt_rq *my_q; } ;   411     struct sched_dl_entity {   struct rb_node rb_node;   u64 dl_runtime;   u64 dl_deadline;   u64 dl_period;   u64 dl_bw;   s64 runtime;   u64 deadline;   unsigned int flags;   int dl_throttled;   int dl_boosted;   int dl_yielded;   struct hrtimer dl_timer; } ;   478     struct wake_q_node {   struct wake_q_node *next; } ;   482     struct sched_class ;   482     struct rt_mutex_waiter ;   482     struct css_set ;   482     struct compat_robust_list_head ;   482     struct numa_group ;   482     struct kcov ;   482     struct task_struct {   struct thread_info thread_info;   volatile long state;   void *stack;   atomic_t usage;   unsigned int flags;   unsigned int ptrace;   struct llist_node wake_entry;   int on_cpu;   unsigned int cpu;   unsigned int wakee_flips;   unsigned long wakee_flip_decay_ts;   struct task_struct *last_wakee;   int wake_cpu;   int on_rq;   int prio;   int static_prio;   int normal_prio;   unsigned int rt_priority;   const struct sched_class *sched_class;   struct sched_entity se;   struct sched_rt_entity rt;   struct task_group *sched_task_group;   struct sched_dl_entity dl;   struct hlist_head preempt_notifiers;   unsigned int policy;   int nr_cpus_allowed;   cpumask_t cpus_allowed;   unsigned long rcu_tasks_nvcsw;   bool rcu_tasks_holdout;   struct list_head rcu_tasks_holdout_list;   int rcu_tasks_idle_cpu;   struct sched_info sched_info;   struct list_head tasks;   struct plist_node pushable_tasks;   struct rb_node pushable_dl_tasks;   struct mm_struct *mm;   struct mm_struct *active_mm;   struct vmacache vmacache;   struct task_rss_stat rss_stat;   int exit_state;   int exit_code;   int exit_signal;   int pdeath_signal;   unsigned long jobctl;   unsigned int personality;   unsigned char sched_reset_on_fork;   unsigned char sched_contributes_to_load;   unsigned char sched_migrated;   unsigned char sched_remote_wakeup;   unsigned char;   unsigned char in_execve;   unsigned char in_iowait;   unsigned char restore_sigmask;   unsigned char memcg_may_oom;   unsigned char memcg_kmem_skip_account;   unsigned char brk_randomized;   unsigned long atomic_flags;   struct restart_block restart_block;   pid_t pid;   pid_t tgid;   struct task_struct *real_parent;   struct task_struct *parent;   struct list_head children;   struct list_head sibling;   struct task_struct *group_leader;   struct list_head ptraced;   struct list_head ptrace_entry;   struct pid_link pids[3U];   struct list_head thread_group;   struct list_head thread_node;   struct completion *vfork_done;   int *set_child_tid;   int *clear_child_tid;   u64 utime;   u64 stime;   u64 gtime;   struct prev_cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   u64 start_time;   u64 real_start_time;   unsigned long min_flt;   unsigned long maj_flt;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   const struct cred *ptracer_cred;   const struct cred *real_cred;   const struct cred *cred;   char comm[16U];   struct nameidata *nameidata;   struct sysv_sem sysvsem;   struct sysv_shm sysvshm;   unsigned long last_switch_count;   struct fs_struct *fs;   struct files_struct *files;   struct nsproxy *nsproxy;   struct signal_struct *signal;   struct sighand_struct *sighand;   sigset_t blocked;   sigset_t real_blocked;   sigset_t saved_sigmask;   struct sigpending pending;   unsigned long sas_ss_sp;   size_t sas_ss_size;   unsigned int sas_ss_flags;   struct callback_head *task_works;   struct audit_context *audit_context;   kuid_t loginuid;   unsigned int sessionid;   struct seccomp seccomp;   u32 parent_exec_id;   u32 self_exec_id;   spinlock_t alloc_lock;   raw_spinlock_t pi_lock;   struct wake_q_node wake_q;   struct rb_root pi_waiters;   struct rb_node *pi_waiters_leftmost;   struct rt_mutex_waiter *pi_blocked_on;   struct mutex_waiter *blocked_on;   unsigned int irq_events;   unsigned long hardirq_enable_ip;   unsigned long hardirq_disable_ip;   unsigned int hardirq_enable_event;   unsigned int hardirq_disable_event;   int hardirqs_enabled;   int hardirq_context;   unsigned long softirq_disable_ip;   unsigned long softirq_enable_ip;   unsigned int softirq_disable_event;   unsigned int softirq_enable_event;   int softirqs_enabled;   int softirq_context;   u64 curr_chain_key;   int lockdep_depth;   unsigned int lockdep_recursion;   struct held_lock held_locks[48U];   gfp_t lockdep_reclaim_gfp;   unsigned int in_ubsan;   void *journal_info;   struct bio_list *bio_list;   struct blk_plug *plug;   struct reclaim_state *reclaim_state;   struct backing_dev_info *backing_dev_info;   struct io_context *io_context;   unsigned long ptrace_message;   siginfo_t *last_siginfo;   struct task_io_accounting ioac;   u64 acct_rss_mem1;   u64 acct_vm_mem1;   u64 acct_timexpd;   nodemask_t mems_allowed;   seqcount_t mems_allowed_seq;   int cpuset_mem_spread_rotor;   int cpuset_slab_spread_rotor;   struct css_set *cgroups;   struct list_head cg_list;   int closid;   struct robust_list_head *robust_list;   struct compat_robust_list_head *compat_robust_list;   struct list_head pi_state_list;   struct futex_pi_state *pi_state_cache;   struct perf_event_context *perf_event_ctxp[2U];   struct mutex perf_event_mutex;   struct list_head perf_event_list;   struct mempolicy *mempolicy;   short il_next;   short pref_node_fork;   int numa_scan_seq;   unsigned int numa_scan_period;   unsigned int numa_scan_period_max;   int numa_preferred_nid;   unsigned long numa_migrate_retry;   u64 node_stamp;   u64 last_task_numa_placement;   u64 last_sum_exec_runtime;   struct callback_head numa_work;   struct list_head numa_entry;   struct numa_group *numa_group;   unsigned long *numa_faults;   unsigned long total_numa_faults;   unsigned long numa_faults_locality[3U];   unsigned long numa_pages_migrated;   struct tlbflush_unmap_batch tlb_ubc;   struct callback_head rcu;   struct pipe_inode_info *splice_pipe;   struct page_frag task_frag;   struct task_delay_info *delays;   int make_it_fail;   int nr_dirtied;   int nr_dirtied_pause;   unsigned long dirty_paused_when;   int latency_record_count;   struct latency_record latency_record[32U];   u64 timer_slack_ns;   u64 default_timer_slack_ns;   unsigned int kasan_depth;   unsigned long trace;   unsigned long trace_recursion;   enum kcov_mode kcov_mode;   unsigned int kcov_size;   void *kcov_area;   struct kcov *kcov;   struct mem_cgroup *memcg_in_oom;   gfp_t memcg_oom_gfp_mask;   int memcg_oom_order;   unsigned int memcg_nr_pages_over_high;   struct uprobe_task *utask;   unsigned int sequential_io;   unsigned int sequential_io_avg;   unsigned long task_state_change;   int pagefault_disabled;   struct task_struct *oom_reaper_list;   atomic_t stack_refcount;   struct thread_struct thread; } ;  1562     struct user_struct {   atomic_t __count;   atomic_t processes;   atomic_t sigpending;   atomic_t fanotify_listeners;   atomic_long_t epoll_watches;   unsigned long mq_bytes;   unsigned long locked_shm;   unsigned long unix_inflight;   atomic_long_t pipe_bufs;   struct key *uid_keyring;   struct key *session_keyring;   struct hlist_node uidhash_node;   kuid_t uid;   atomic_long_t locked_vm; } ;    60     struct group_info {   atomic_t usage;   int ngroups;   kgid_t gid[0U]; } ;    86     struct cred {   atomic_t usage;   atomic_t subscribers;   void *put_addr;   unsigned int magic;   kuid_t uid;   kgid_t gid;   kuid_t suid;   kgid_t sgid;   kuid_t euid;   kgid_t egid;   kuid_t fsuid;   kgid_t fsgid;   unsigned int securebits;   kernel_cap_t cap_inheritable;   kernel_cap_t cap_permitted;   kernel_cap_t cap_effective;   kernel_cap_t cap_bset;   kernel_cap_t cap_ambient;   unsigned char jit_keyring;   struct key *session_keyring;   struct key *process_keyring;   struct key *thread_keyring;   struct key *request_key_auth;   void *security;   struct user_struct *user;   struct user_namespace *user_ns;   struct group_info *group_info;   struct callback_head rcu; } ;   369     struct seq_file {   char *buf;   size_t size;   size_t from;   size_t count;   size_t pad_until;   loff_t index;   loff_t read_pos;   u64 version;   struct mutex lock;   const struct seq_operations *op;   int poll_event;   const struct file *file;   void *private; } ;    30     struct seq_operations {   void * (*start)(struct seq_file *, loff_t *);   void (*stop)(struct seq_file *, void *);   void * (*next)(struct seq_file *, void *, loff_t *);   int (*show)(struct seq_file *, void *); } ;   222     struct pinctrl ;   223     struct pinctrl_state ;   200     struct dev_pin_info {   struct pinctrl *p;   struct pinctrl_state *default_state;   struct pinctrl_state *init_state;   struct pinctrl_state *sleep_state;   struct pinctrl_state *idle_state; } ;    58     struct pm_message {   int event; } ;    64     typedef struct pm_message pm_message_t;    65     struct dev_pm_ops {   int (*prepare)(struct device *);   void (*complete)(struct device *);   int (*suspend)(struct device *);   int (*resume)(struct device *);   int (*freeze)(struct device *);   int (*thaw)(struct device *);   int (*poweroff)(struct device *);   int (*restore)(struct device *);   int (*suspend_late)(struct device *);   int (*resume_early)(struct device *);   int (*freeze_late)(struct device *);   int (*thaw_early)(struct device *);   int (*poweroff_late)(struct device *);   int (*restore_early)(struct device *);   int (*suspend_noirq)(struct device *);   int (*resume_noirq)(struct device *);   int (*freeze_noirq)(struct device *);   int (*thaw_noirq)(struct device *);   int (*poweroff_noirq)(struct device *);   int (*restore_noirq)(struct device *);   int (*runtime_suspend)(struct device *);   int (*runtime_resume)(struct device *);   int (*runtime_idle)(struct device *); } ;   315     enum rpm_status {   RPM_ACTIVE = 0,   RPM_RESUMING = 1,   RPM_SUSPENDED = 2,   RPM_SUSPENDING = 3 } ;   322     enum rpm_request {   RPM_REQ_NONE = 0,   RPM_REQ_IDLE = 1,   RPM_REQ_SUSPEND = 2,   RPM_REQ_AUTOSUSPEND = 3,   RPM_REQ_RESUME = 4 } ;   330     struct wakeup_source ;   331     struct wake_irq ;   332     struct pm_domain_data ;   333     struct pm_subsys_data {   spinlock_t lock;   unsigned int refcount;   struct list_head clock_list;   struct pm_domain_data *domain_data; } ;   551     struct dev_pm_qos ;   551     struct dev_pm_info {   pm_message_t power_state;   unsigned char can_wakeup;   unsigned char async_suspend;   bool in_dpm_list;   bool is_prepared;   bool is_suspended;   bool is_noirq_suspended;   bool is_late_suspended;   bool early_init;   bool direct_complete;   spinlock_t lock;   struct list_head entry;   struct completion completion;   struct wakeup_source *wakeup;   bool wakeup_path;   bool syscore;   bool no_pm_callbacks;   struct timer_list suspend_timer;   unsigned long timer_expires;   struct work_struct work;   wait_queue_head_t wait_queue;   struct wake_irq *wakeirq;   atomic_t usage_count;   atomic_t child_count;   unsigned char disable_depth;   unsigned char idle_notification;   unsigned char request_pending;   unsigned char deferred_resume;   unsigned char run_wake;   unsigned char runtime_auto;   bool ignore_children;   unsigned char no_callbacks;   unsigned char irq_safe;   unsigned char use_autosuspend;   unsigned char timer_autosuspends;   unsigned char memalloc_noio;   unsigned int links_count;   enum rpm_request request;   enum rpm_status runtime_status;   int runtime_error;   int autosuspend_delay;   unsigned long last_busy;   unsigned long active_jiffies;   unsigned long suspended_jiffies;   unsigned long accounting_timestamp;   struct pm_subsys_data *subsys_data;   void (*set_latency_tolerance)(struct device *, s32 );   struct dev_pm_qos *qos; } ;   613     struct dev_pm_domain {   struct dev_pm_ops ops;   void (*detach)(struct device *, bool );   int (*activate)(struct device *);   void (*sync)(struct device *);   void (*dismiss)(struct device *); } ;    76     struct dev_archdata {   void *iommu; } ;     8     struct dma_map_ops ;    21     struct device_private ;    22     struct device_driver ;    23     struct driver_private ;    24     struct class ;    25     struct subsys_private ;    26     struct bus_type ;    27     struct device_node ;    28     struct fwnode_handle ;    29     struct iommu_ops ;    30     struct iommu_group ;    31     struct iommu_fwspec ;    62     struct device_attribute ;    62     struct bus_type {   const char *name;   const char *dev_name;   struct device *dev_root;   struct device_attribute *dev_attrs;   const struct attribute_group **bus_groups;   const struct attribute_group **dev_groups;   const struct attribute_group **drv_groups;   int (*match)(struct device *, struct device_driver *);   int (*uevent)(struct device *, struct kobj_uevent_env *);   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*online)(struct device *);   int (*offline)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   int (*num_vf)(struct device *);   const struct dev_pm_ops *pm;   const struct iommu_ops *iommu_ops;   struct subsys_private *p;   struct lock_class_key lock_key; } ;   147     struct device_type ;   206     enum probe_type {   PROBE_DEFAULT_STRATEGY = 0,   PROBE_PREFER_ASYNCHRONOUS = 1,   PROBE_FORCE_SYNCHRONOUS = 2 } ;   212     struct device_driver {   const char *name;   struct bus_type *bus;   struct module *owner;   const char *mod_name;   bool suppress_bind_attrs;   enum probe_type probe_type;   const struct of_device_id *of_match_table;   const struct acpi_device_id *acpi_match_table;   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct attribute_group **groups;   const struct dev_pm_ops *pm;   struct driver_private *p; } ;   362     struct class_attribute ;   362     struct class {   const char *name;   struct module *owner;   struct class_attribute *class_attrs;   const struct attribute_group **class_groups;   const struct attribute_group **dev_groups;   struct kobject *dev_kobj;   int (*dev_uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *);   void (*class_release)(struct class *);   void (*dev_release)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct kobj_ns_type_operations *ns_type;   const void * (*namespace)(struct device *);   const struct dev_pm_ops *pm;   struct subsys_private *p; } ;   457     struct class_attribute {   struct attribute attr;   ssize_t  (*show)(struct class *, struct class_attribute *, char *);   ssize_t  (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;   527     struct device_type {   const char *name;   const struct attribute_group **groups;   int (*uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *);   void (*release)(struct device *);   const struct dev_pm_ops *pm; } ;   555     struct device_attribute {   struct attribute attr;   ssize_t  (*show)(struct device *, struct device_attribute *, char *);   ssize_t  (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;   727     struct device_dma_parameters {   unsigned int max_segment_size;   unsigned long segment_boundary_mask; } ;   790     enum dl_dev_state {   DL_DEV_NO_DRIVER = 0,   DL_DEV_PROBING = 1,   DL_DEV_DRIVER_BOUND = 2,   DL_DEV_UNBINDING = 3 } ;   797     struct dev_links_info {   struct list_head suppliers;   struct list_head consumers;   enum dl_dev_state status; } ;   817     struct dma_coherent_mem ;   817     struct cma ;   817     struct device {   struct device *parent;   struct device_private *p;   struct kobject kobj;   const char *init_name;   const struct device_type *type;   struct mutex mutex;   struct bus_type *bus;   struct device_driver *driver;   void *platform_data;   void *driver_data;   struct dev_links_info links;   struct dev_pm_info power;   struct dev_pm_domain *pm_domain;   struct irq_domain *msi_domain;   struct dev_pin_info *pins;   struct list_head msi_list;   int numa_node;   const struct dma_map_ops *dma_ops;   u64 *dma_mask;   u64 coherent_dma_mask;   unsigned long dma_pfn_offset;   struct device_dma_parameters *dma_parms;   struct list_head dma_pools;   struct dma_coherent_mem *dma_mem;   struct cma *cma_area;   struct dev_archdata archdata;   struct device_node *of_node;   struct fwnode_handle *fwnode;   dev_t devt;   u32 id;   spinlock_t devres_lock;   struct list_head devres_head;   struct klist_node knode_class;   struct class *class;   const struct attribute_group **groups;   void (*release)(struct device *);   struct iommu_group *iommu_group;   struct iommu_fwspec *iommu_fwspec;   bool offline_disabled;   bool offline; } ;   976     struct wakeup_source {   const char *name;   struct list_head entry;   spinlock_t lock;   struct wake_irq *wakeirq;   struct timer_list timer;   unsigned long timer_expires;   ktime_t total_time;   ktime_t max_time;   ktime_t last_time;   ktime_t start_prevent_time;   ktime_t prevent_sleep_time;   unsigned long event_count;   unsigned long active_count;   unsigned long relax_count;   unsigned long expire_count;   unsigned long wakeup_count;   bool active;   bool autosleep_enabled; } ;    70     struct hotplug_slot ;    70     struct pci_slot {   struct pci_bus *bus;   struct list_head list;   struct hotplug_slot *hotplug;   unsigned char number;   struct kobject kobj; } ;   108     typedef int pci_power_t;   135     typedef unsigned int pci_channel_state_t;   136     enum pci_channel_state {   pci_channel_io_normal = 1,   pci_channel_io_frozen = 2,   pci_channel_io_perm_failure = 3 } ;   161     typedef unsigned short pci_dev_flags_t;   188     typedef unsigned short pci_bus_flags_t;   246     struct pcie_link_state ;   247     struct pci_vpd ;   248     struct pci_sriov ;   250     struct pci_driver ;   250     union __anonunion____missing_field_name_361 {   struct pci_sriov *sriov;   struct pci_dev *physfn; } ;   250     struct pci_dev {   struct list_head bus_list;   struct pci_bus *bus;   struct pci_bus *subordinate;   void *sysdata;   struct proc_dir_entry *procent;   struct pci_slot *slot;   unsigned int devfn;   unsigned short vendor;   unsigned short device;   unsigned short subsystem_vendor;   unsigned short subsystem_device;   unsigned int class;   u8 revision;   u8 hdr_type;   u16 aer_cap;   u8 pcie_cap;   u8 msi_cap;   u8 msix_cap;   unsigned char pcie_mpss;   u8 rom_base_reg;   u8 pin;   u16 pcie_flags_reg;   unsigned long *dma_alias_mask;   struct pci_driver *driver;   u64 dma_mask;   struct device_dma_parameters dma_parms;   pci_power_t current_state;   u8 pm_cap;   unsigned char pme_support;   unsigned char pme_interrupt;   unsigned char pme_poll;   unsigned char d1_support;   unsigned char d2_support;   unsigned char no_d1d2;   unsigned char no_d3cold;   unsigned char bridge_d3;   unsigned char d3cold_allowed;   unsigned char mmio_always_on;   unsigned char wakeup_prepared;   unsigned char runtime_d3cold;   unsigned char ignore_hotplug;   unsigned char hotplug_user_indicators;   unsigned int d3_delay;   unsigned int d3cold_delay;   struct pcie_link_state *link_state;   pci_channel_state_t error_state;   struct device dev;   int cfg_size;   unsigned int irq;   struct resource resource[17U];   bool match_driver;   unsigned char transparent;   unsigned char multifunction;   unsigned char is_added;   unsigned char is_busmaster;   unsigned char no_msi;   unsigned char no_64bit_msi;   unsigned char block_cfg_access;   unsigned char broken_parity_status;   unsigned char irq_reroute_variant;   unsigned char msi_enabled;   unsigned char msix_enabled;   unsigned char ari_enabled;   unsigned char ats_enabled;   unsigned char is_managed;   unsigned char needs_freset;   unsigned char state_saved;   unsigned char is_physfn;   unsigned char is_virtfn;   unsigned char reset_fn;   unsigned char is_hotplug_bridge;   unsigned char __aer_firmware_first_valid;   unsigned char __aer_firmware_first;   unsigned char broken_intx_masking;   unsigned char io_window_1k;   unsigned char irq_managed;   unsigned char has_secondary_link;   unsigned char non_compliant_bars;   pci_dev_flags_t dev_flags;   atomic_t enable_cnt;   u32 saved_config_space[16U];   struct hlist_head saved_cap_space;   struct bin_attribute *rom_attr;   int rom_attr_enabled;   struct bin_attribute *res_attr[17U];   struct bin_attribute *res_attr_wc[17U];   unsigned char ptm_root;   unsigned char ptm_enabled;   u8 ptm_granularity;   const struct attribute_group **msi_irq_groups;   struct pci_vpd *vpd;   union __anonunion____missing_field_name_361 __annonCompField80;   u16 ats_cap;   u8 ats_stu;   atomic_t ats_ref_cnt;   phys_addr_t rom;   size_t romlen;   char *driver_override; } ;   419     struct pci_ops ;   419     struct msi_controller ;   482     struct pci_bus {   struct list_head node;   struct pci_bus *parent;   struct list_head children;   struct list_head devices;   struct pci_dev *self;   struct list_head slots;   struct resource *resource[4U];   struct list_head resources;   struct resource busn_res;   struct pci_ops *ops;   struct msi_controller *msi;   void *sysdata;   struct proc_dir_entry *procdir;   unsigned char number;   unsigned char primary;   unsigned char max_bus_speed;   unsigned char cur_bus_speed;   char name[48U];   unsigned short bridge_ctl;   pci_bus_flags_t bus_flags;   struct device *bridge;   struct device dev;   struct bin_attribute *legacy_io;   struct bin_attribute *legacy_mem;   unsigned char is_added; } ;   606     struct pci_ops {   int (*add_bus)(struct pci_bus *);   void (*remove_bus)(struct pci_bus *);   void * (*map_bus)(struct pci_bus *, unsigned int, int);   int (*read)(struct pci_bus *, unsigned int, int, int, u32 *);   int (*write)(struct pci_bus *, unsigned int, int, int, u32 ); } ;   636     struct pci_dynids {   spinlock_t lock;   struct list_head list; } ;   650     typedef unsigned int pci_ers_result_t;   660     struct pci_error_handlers {   pci_ers_result_t  (*error_detected)(struct pci_dev *, enum pci_channel_state );   pci_ers_result_t  (*mmio_enabled)(struct pci_dev *);   pci_ers_result_t  (*slot_reset)(struct pci_dev *);   void (*reset_notify)(struct pci_dev *, bool );   void (*resume)(struct pci_dev *); } ;   690     struct pci_driver {   struct list_head node;   const char *name;   const struct pci_device_id *id_table;   int (*probe)(struct pci_dev *, const struct pci_device_id *);   void (*remove)(struct pci_dev *);   int (*suspend)(struct pci_dev *, pm_message_t );   int (*suspend_late)(struct pci_dev *, pm_message_t );   int (*resume_early)(struct pci_dev *);   int (*resume)(struct pci_dev *);   void (*shutdown)(struct pci_dev *);   int (*sriov_configure)(struct pci_dev *, int);   const struct pci_error_handlers *err_handler;   struct device_driver driver;   struct pci_dynids dynids; } ;  1270     struct percpu_ref ;    55     typedef void percpu_ref_func_t(struct percpu_ref *);    68     struct percpu_ref {   atomic_long_t count;   unsigned long percpu_count_ptr;   percpu_ref_func_t *release;   percpu_ref_func_t *confirm_switch;   bool force_atomic;   struct callback_head rcu; } ;   277     struct vm_fault {   struct vm_area_struct *vma;   unsigned int flags;   gfp_t gfp_mask;   unsigned long pgoff;   unsigned long address;   pmd_t *pmd;   pud_t *pud;   pte_t orig_pte;   struct page *cow_page;   struct mem_cgroup *memcg;   struct page *page;   pte_t *pte;   spinlock_t *ptl;   pgtable_t prealloc_pte; } ;   340     enum page_entry_size {   PE_SIZE_PTE = 0,   PE_SIZE_PMD = 1,   PE_SIZE_PUD = 2 } ;   346     struct vm_operations_struct {   void (*open)(struct vm_area_struct *);   void (*close)(struct vm_area_struct *);   int (*mremap)(struct vm_area_struct *);   int (*fault)(struct vm_fault *);   int (*huge_fault)(struct vm_fault *, enum page_entry_size );   void (*map_pages)(struct vm_fault *, unsigned long, unsigned long);   int (*page_mkwrite)(struct vm_fault *);   int (*pfn_mkwrite)(struct vm_fault *);   int (*access)(struct vm_area_struct *, unsigned long, void *, int, int);   const char * (*name)(struct vm_area_struct *);   int (*set_policy)(struct vm_area_struct *, struct mempolicy *);   struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long);   struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;  1357     struct kvec ;  2513     struct scatterlist {   unsigned long sg_magic;   unsigned long page_link;   unsigned int offset;   unsigned int length;   dma_addr_t dma_address;   unsigned int dma_length; } ;    21     struct sg_table {   struct scatterlist *sgl;   unsigned int nents;   unsigned int orig_nents; } ;    96     enum dma_data_direction {   DMA_BIDIRECTIONAL = 0,   DMA_TO_DEVICE = 1,   DMA_FROM_DEVICE = 2,   DMA_NONE = 3 } ;   158     struct dma_map_ops {   void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long);   void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long);   int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long);   int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long);   dma_addr_t  (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long);   void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long);   int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long);   void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long);   dma_addr_t  (*map_resource)(struct device *, phys_addr_t , size_t , enum dma_data_direction , unsigned long);   void (*unmap_resource)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long);   void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction );   void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction );   int (*mapping_error)(struct device *, dma_addr_t );   int (*dma_supported)(struct device *, u64 );   int (*set_dma_mask)(struct device *, u64 );   int is_phys; } ;    56     struct iovec {   void *iov_base;   __kernel_size_t iov_len; } ;    21     struct kvec {   void *iov_base;   size_t iov_len; } ;    29     union __anonunion____missing_field_name_374 {   const struct iovec *iov;   const struct kvec *kvec;   const struct bio_vec *bvec;   struct pipe_inode_info *pipe; } ;    29     union __anonunion____missing_field_name_375 {   unsigned long nr_segs;   int idx; } ;    29     struct iov_iter {   int type;   size_t iov_offset;   size_t count;   union __anonunion____missing_field_name_374 __annonCompField81;   union __anonunion____missing_field_name_375 __annonCompField82; } ;  1437     struct dql {   unsigned int num_queued;   unsigned int adj_limit;   unsigned int last_obj_cnt;   unsigned int limit;   unsigned int num_completed;   unsigned int prev_ovlimit;   unsigned int prev_num_queued;   unsigned int prev_last_obj_cnt;   unsigned int lowest_slack;   unsigned long slack_start_time;   unsigned int max_limit;   unsigned int min_limit;   unsigned int slack_hold_time; } ;    11     typedef unsigned short __kernel_sa_family_t;    23     typedef __kernel_sa_family_t sa_family_t;    24     struct sockaddr {   sa_family_t sa_family;   char sa_data[14U]; } ;    43     struct __anonstruct_sync_serial_settings_377 {   unsigned int clock_rate;   unsigned int clock_type;   unsigned short loopback; } ;    43     typedef struct __anonstruct_sync_serial_settings_377 sync_serial_settings;    50     struct __anonstruct_te1_settings_378 {   unsigned int clock_rate;   unsigned int clock_type;   unsigned short loopback;   unsigned int slot_map; } ;    50     typedef struct __anonstruct_te1_settings_378 te1_settings;    55     struct __anonstruct_raw_hdlc_proto_379 {   unsigned short encoding;   unsigned short parity; } ;    55     typedef struct __anonstruct_raw_hdlc_proto_379 raw_hdlc_proto;    65     struct __anonstruct_fr_proto_380 {   unsigned int t391;   unsigned int t392;   unsigned int n391;   unsigned int n392;   unsigned int n393;   unsigned short lmi;   unsigned short dce; } ;    65     typedef struct __anonstruct_fr_proto_380 fr_proto;    69     struct __anonstruct_fr_proto_pvc_381 {   unsigned int dlci; } ;    69     typedef struct __anonstruct_fr_proto_pvc_381 fr_proto_pvc;    74     struct __anonstruct_fr_proto_pvc_info_382 {   unsigned int dlci;   char master[16U]; } ;    74     typedef struct __anonstruct_fr_proto_pvc_info_382 fr_proto_pvc_info;    79     struct __anonstruct_cisco_proto_383 {   unsigned int interval;   unsigned int timeout; } ;    79     typedef struct __anonstruct_cisco_proto_383 cisco_proto;   117     struct ifmap {   unsigned long mem_start;   unsigned long mem_end;   unsigned short base_addr;   unsigned char irq;   unsigned char dma;   unsigned char port; } ;   201     union __anonunion_ifs_ifsu_384 {   raw_hdlc_proto *raw_hdlc;   cisco_proto *cisco;   fr_proto *fr;   fr_proto_pvc *fr_pvc;   fr_proto_pvc_info *fr_pvc_info;   sync_serial_settings *sync;   te1_settings *te1; } ;   201     struct if_settings {   unsigned int type;   unsigned int size;   union __anonunion_ifs_ifsu_384 ifs_ifsu; } ;   220     union __anonunion_ifr_ifrn_385 {   char ifrn_name[16U]; } ;   220     union __anonunion_ifr_ifru_386 {   struct sockaddr ifru_addr;   struct sockaddr ifru_dstaddr;   struct sockaddr ifru_broadaddr;   struct sockaddr ifru_netmask;   struct sockaddr ifru_hwaddr;   short ifru_flags;   int ifru_ivalue;   int ifru_mtu;   struct ifmap ifru_map;   char ifru_slave[16U];   char ifru_newname[16U];   void *ifru_data;   struct if_settings ifru_settings; } ;   220     struct ifreq {   union __anonunion_ifr_ifrn_385 ifr_ifrn;   union __anonunion_ifr_ifru_386 ifr_ifru; } ;    18     typedef s32 compat_time_t;    39     typedef s32 compat_long_t;    45     typedef u32 compat_uptr_t;    46     struct compat_timespec {   compat_time_t tv_sec;   s32 tv_nsec; } ;   278     struct compat_robust_list {   compat_uptr_t next; } ;   282     struct compat_robust_list_head {   struct compat_robust_list list;   compat_long_t futex_offset;   compat_uptr_t list_op_pending; } ;   126     struct sk_buff ;   161     struct in6_addr ;    15     typedef u64 netdev_features_t;    99     union __anonunion_in6_u_412 {   __u8 u6_addr8[16U];   __be16 u6_addr16[8U];   __be32 u6_addr32[4U]; } ;    99     struct in6_addr {   union __anonunion_in6_u_412 in6_u; } ;    46     struct ethhdr {   unsigned char h_dest[6U];   unsigned char h_source[6U];   __be16 h_proto; } ;   246     struct pipe_buf_operations ;   246     struct pipe_buffer {   struct page *page;   unsigned int offset;   unsigned int len;   const struct pipe_buf_operations *ops;   unsigned int flags;   unsigned long private; } ;    27     struct pipe_inode_info {   struct mutex mutex;   wait_queue_head_t wait;   unsigned int nrbufs;   unsigned int curbuf;   unsigned int buffers;   unsigned int readers;   unsigned int writers;   unsigned int files;   unsigned int waiting_writers;   unsigned int r_counter;   unsigned int w_counter;   struct page *tmp_page;   struct fasync_struct *fasync_readers;   struct fasync_struct *fasync_writers;   struct pipe_buffer *bufs;   struct user_struct *user; } ;    63     struct pipe_buf_operations {   int can_merge;   int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);   void (*release)(struct pipe_inode_info *, struct pipe_buffer *);   int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);   void (*get)(struct pipe_inode_info *, struct pipe_buffer *); } ;   255     union __anonunion____missing_field_name_426 {   __be32 ipv4_daddr;   struct in6_addr ipv6_daddr;   char neigh_header[8U]; } ;   255     struct nf_bridge_info {   atomic_t use;   unsigned char orig_proto;   unsigned char pkt_otherhost;   unsigned char in_prerouting;   unsigned char bridged_dnat;   __u16 frag_max_size;   struct net_device *physindev;   struct net_device *physoutdev;   union __anonunion____missing_field_name_426 __annonCompField91; } ;   279     struct sk_buff_head {   struct sk_buff *next;   struct sk_buff *prev;   __u32 qlen;   spinlock_t lock; } ;   501     typedef unsigned int sk_buff_data_t;   502     struct __anonstruct____missing_field_name_429 {   u32 stamp_us;   u32 stamp_jiffies; } ;   502     union __anonunion____missing_field_name_428 {   u64 v64;   struct __anonstruct____missing_field_name_429 __annonCompField92; } ;   502     struct skb_mstamp {   union __anonunion____missing_field_name_428 __annonCompField93; } ;   565     union __anonunion____missing_field_name_432 {   ktime_t tstamp;   struct skb_mstamp skb_mstamp; } ;   565     struct __anonstruct____missing_field_name_431 {   struct sk_buff *next;   struct sk_buff *prev;   union __anonunion____missing_field_name_432 __annonCompField94; } ;   565     union __anonunion____missing_field_name_430 {   struct __anonstruct____missing_field_name_431 __annonCompField95;   struct rb_node rbnode; } ;   565     union __anonunion____missing_field_name_433 {   struct net_device *dev;   unsigned long dev_scratch; } ;   565     struct sec_path ;   565     struct __anonstruct____missing_field_name_435 {   __u16 csum_start;   __u16 csum_offset; } ;   565     union __anonunion____missing_field_name_434 {   __wsum csum;   struct __anonstruct____missing_field_name_435 __annonCompField98; } ;   565     union __anonunion____missing_field_name_436 {   unsigned int napi_id;   unsigned int sender_cpu; } ;   565     union __anonunion____missing_field_name_437 {   __u32 mark;   __u32 reserved_tailroom; } ;   565     union __anonunion____missing_field_name_438 {   __be16 inner_protocol;   __u8 inner_ipproto; } ;   565     struct sk_buff {   union __anonunion____missing_field_name_430 __annonCompField96;   struct sock *sk;   union __anonunion____missing_field_name_433 __annonCompField97;   char cb[48U];   unsigned long _skb_refdst;   void (*destructor)(struct sk_buff *);   struct sec_path *sp;   unsigned long _nfct;   struct nf_bridge_info *nf_bridge;   unsigned int len;   unsigned int data_len;   __u16 mac_len;   __u16 hdr_len;   __u16 queue_mapping;   __u8 __cloned_offset[0U];   unsigned char cloned;   unsigned char nohdr;   unsigned char fclone;   unsigned char peeked;   unsigned char head_frag;   unsigned char xmit_more;   unsigned char __unused;   __u32 headers_start[0U];   __u8 __pkt_type_offset[0U];   unsigned char pkt_type;   unsigned char pfmemalloc;   unsigned char ignore_df;   unsigned char nf_trace;   unsigned char ip_summed;   unsigned char ooo_okay;   unsigned char l4_hash;   unsigned char sw_hash;   unsigned char wifi_acked_valid;   unsigned char wifi_acked;   unsigned char no_fcs;   unsigned char encapsulation;   unsigned char encap_hdr_csum;   unsigned char csum_valid;   unsigned char csum_complete_sw;   unsigned char csum_level;   unsigned char csum_bad;   unsigned char dst_pending_confirm;   unsigned char ndisc_nodetype;   unsigned char ipvs_property;   unsigned char inner_protocol_type;   unsigned char remcsum_offload;   unsigned char offload_fwd_mark;   unsigned char tc_skip_classify;   unsigned char tc_at_ingress;   unsigned char tc_redirected;   unsigned char tc_from_ingress;   __u16 tc_index;   union __anonunion____missing_field_name_434 __annonCompField99;   __u32 priority;   int skb_iif;   __u32 hash;   __be16 vlan_proto;   __u16 vlan_tci;   union __anonunion____missing_field_name_436 __annonCompField100;   __u32 secmark;   union __anonunion____missing_field_name_437 __annonCompField101;   union __anonunion____missing_field_name_438 __annonCompField102;   __u16 inner_transport_header;   __u16 inner_network_header;   __u16 inner_mac_header;   __be16 protocol;   __u16 transport_header;   __u16 network_header;   __u16 mac_header;   __u32 headers_end[0U];   sk_buff_data_t tail;   sk_buff_data_t end;   unsigned char *head;   unsigned char *data;   unsigned int truesize;   atomic_t users; } ;   852     struct dst_entry ;    39     struct ethtool_cmd {   __u32 cmd;   __u32 supported;   __u32 advertising;   __u16 speed;   __u8 duplex;   __u8 port;   __u8 phy_address;   __u8 transceiver;   __u8 autoneg;   __u8 mdio_support;   __u32 maxtxpkt;   __u32 maxrxpkt;   __u16 speed_hi;   __u8 eth_tp_mdix;   __u8 eth_tp_mdix_ctrl;   __u32 lp_advertising;   __u32 reserved[2U]; } ;   130     struct ethtool_drvinfo {   __u32 cmd;   char driver[32U];   char version[32U];   char fw_version[32U];   char bus_info[32U];   char erom_version[32U];   char reserved2[12U];   __u32 n_priv_flags;   __u32 n_stats;   __u32 testinfo_len;   __u32 eedump_len;   __u32 regdump_len; } ;   194     struct ethtool_wolinfo {   __u32 cmd;   __u32 supported;   __u32 wolopts;   __u8 sopass[6U]; } ;   238     struct ethtool_tunable {   __u32 cmd;   __u32 id;   __u32 type_id;   __u32 len;   void *data[0U]; } ;   256     struct ethtool_regs {   __u32 cmd;   __u32 version;   __u32 len;   __u8 data[0U]; } ;   285     struct ethtool_eeprom {   __u32 cmd;   __u32 magic;   __u32 offset;   __u32 len;   __u8 data[0U]; } ;   311     struct ethtool_eee {   __u32 cmd;   __u32 supported;   __u32 advertised;   __u32 lp_advertised;   __u32 eee_active;   __u32 eee_enabled;   __u32 tx_lpi_enabled;   __u32 tx_lpi_timer;   __u32 reserved[2U]; } ;   340     struct ethtool_modinfo {   __u32 cmd;   __u32 type;   __u32 eeprom_len;   __u32 reserved[8U]; } ;   357     struct ethtool_coalesce {   __u32 cmd;   __u32 rx_coalesce_usecs;   __u32 rx_max_coalesced_frames;   __u32 rx_coalesce_usecs_irq;   __u32 rx_max_coalesced_frames_irq;   __u32 tx_coalesce_usecs;   __u32 tx_max_coalesced_frames;   __u32 tx_coalesce_usecs_irq;   __u32 tx_max_coalesced_frames_irq;   __u32 stats_block_coalesce_usecs;   __u32 use_adaptive_rx_coalesce;   __u32 use_adaptive_tx_coalesce;   __u32 pkt_rate_low;   __u32 rx_coalesce_usecs_low;   __u32 rx_max_coalesced_frames_low;   __u32 tx_coalesce_usecs_low;   __u32 tx_max_coalesced_frames_low;   __u32 pkt_rate_high;   __u32 rx_coalesce_usecs_high;   __u32 rx_max_coalesced_frames_high;   __u32 tx_coalesce_usecs_high;   __u32 tx_max_coalesced_frames_high;   __u32 rate_sample_interval; } ;   456     struct ethtool_ringparam {   __u32 cmd;   __u32 rx_max_pending;   __u32 rx_mini_max_pending;   __u32 rx_jumbo_max_pending;   __u32 tx_max_pending;   __u32 rx_pending;   __u32 rx_mini_pending;   __u32 rx_jumbo_pending;   __u32 tx_pending; } ;   493     struct ethtool_channels {   __u32 cmd;   __u32 max_rx;   __u32 max_tx;   __u32 max_other;   __u32 max_combined;   __u32 rx_count;   __u32 tx_count;   __u32 other_count;   __u32 combined_count; } ;   521     struct ethtool_pauseparam {   __u32 cmd;   __u32 autoneg;   __u32 rx_pause;   __u32 tx_pause; } ;   627     struct ethtool_test {   __u32 cmd;   __u32 flags;   __u32 reserved;   __u32 len;   __u64 data[0U]; } ;   659     struct ethtool_stats {   __u32 cmd;   __u32 n_stats;   __u64 data[0U]; } ;   701     struct ethtool_tcpip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be16 psrc;   __be16 pdst;   __u8 tos; } ;   734     struct ethtool_ah_espip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be32 spi;   __u8 tos; } ;   750     struct ethtool_usrip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be32 l4_4_bytes;   __u8 tos;   __u8 ip_ver;   __u8 proto; } ;   770     struct ethtool_tcpip6_spec {   __be32 ip6src[4U];   __be32 ip6dst[4U];   __be16 psrc;   __be16 pdst;   __u8 tclass; } ;   788     struct ethtool_ah_espip6_spec {   __be32 ip6src[4U];   __be32 ip6dst[4U];   __be32 spi;   __u8 tclass; } ;   804     struct ethtool_usrip6_spec {   __be32 ip6src[4U];   __be32 ip6dst[4U];   __be32 l4_4_bytes;   __u8 tclass;   __u8 l4_proto; } ;   820     union ethtool_flow_union {   struct ethtool_tcpip4_spec tcp_ip4_spec;   struct ethtool_tcpip4_spec udp_ip4_spec;   struct ethtool_tcpip4_spec sctp_ip4_spec;   struct ethtool_ah_espip4_spec ah_ip4_spec;   struct ethtool_ah_espip4_spec esp_ip4_spec;   struct ethtool_usrip4_spec usr_ip4_spec;   struct ethtool_tcpip6_spec tcp_ip6_spec;   struct ethtool_tcpip6_spec udp_ip6_spec;   struct ethtool_tcpip6_spec sctp_ip6_spec;   struct ethtool_ah_espip6_spec ah_ip6_spec;   struct ethtool_ah_espip6_spec esp_ip6_spec;   struct ethtool_usrip6_spec usr_ip6_spec;   struct ethhdr ether_spec;   __u8 hdata[52U]; } ;   837     struct ethtool_flow_ext {   __u8 padding[2U];   unsigned char h_dest[6U];   __be16 vlan_etype;   __be16 vlan_tci;   __be32 data[2U]; } ;   856     struct ethtool_rx_flow_spec {   __u32 flow_type;   union ethtool_flow_union h_u;   struct ethtool_flow_ext h_ext;   union ethtool_flow_union m_u;   struct ethtool_flow_ext m_ext;   __u64 ring_cookie;   __u32 location; } ;   906     struct ethtool_rxnfc {   __u32 cmd;   __u32 flow_type;   __u64 data;   struct ethtool_rx_flow_spec fs;   __u32 rule_cnt;   __u32 rule_locs[0U]; } ;  1077     struct ethtool_flash {   __u32 cmd;   __u32 region;   char data[128U]; } ;  1085     struct ethtool_dump {   __u32 cmd;   __u32 version;   __u32 flag;   __u32 len;   __u8 data[0U]; } ;  1161     struct ethtool_ts_info {   __u32 cmd;   __u32 so_timestamping;   __s32 phc_index;   __u32 tx_types;   __u32 tx_reserved[3U];   __u32 rx_filters;   __u32 rx_reserved[3U]; } ;  1539     struct ethtool_link_settings {   __u32 cmd;   __u32 speed;   __u8 duplex;   __u8 port;   __u8 phy_address;   __u8 autoneg;   __u8 mdio_support;   __u8 eth_tp_mdix;   __u8 eth_tp_mdix_ctrl;   __s8 link_mode_masks_nwords;   __u32 reserved[8U];   __u32 link_mode_masks[0U]; } ;    39     enum ethtool_phys_id_state {   ETHTOOL_ID_INACTIVE = 0,   ETHTOOL_ID_ACTIVE = 1,   ETHTOOL_ID_ON = 2,   ETHTOOL_ID_OFF = 3 } ;    97     struct __anonstruct_link_modes_442 {   unsigned long supported[1U];   unsigned long advertising[1U];   unsigned long lp_advertising[1U]; } ;    97     struct ethtool_link_ksettings {   struct ethtool_link_settings base;   struct __anonstruct_link_modes_442 link_modes; } ;   158     struct ethtool_ops {   int (*get_settings)(struct net_device *, struct ethtool_cmd *);   int (*set_settings)(struct net_device *, struct ethtool_cmd *);   void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);   int (*get_regs_len)(struct net_device *);   void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);   void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);   int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);   u32  (*get_msglevel)(struct net_device *);   void (*set_msglevel)(struct net_device *, u32 );   int (*nway_reset)(struct net_device *);   u32  (*get_link)(struct net_device *);   int (*get_eeprom_len)(struct net_device *);   int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);   int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);   void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);   int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);   void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *);   int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *);   void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);   void (*get_strings)(struct net_device *, u32 , u8 *);   int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state );   void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *);   int (*begin)(struct net_device *);   void (*complete)(struct net_device *);   u32  (*get_priv_flags)(struct net_device *);   int (*set_priv_flags)(struct net_device *, u32 );   int (*get_sset_count)(struct net_device *, int);   int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *);   int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);   int (*flash_device)(struct net_device *, struct ethtool_flash *);   int (*reset)(struct net_device *, u32 *);   u32  (*get_rxfh_key_size)(struct net_device *);   u32  (*get_rxfh_indir_size)(struct net_device *);   int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *);   int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8 );   void (*get_channels)(struct net_device *, struct ethtool_channels *);   int (*set_channels)(struct net_device *, struct ethtool_channels *);   int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);   int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *);   int (*set_dump)(struct net_device *, struct ethtool_dump *);   int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);   int (*get_module_info)(struct net_device *, struct ethtool_modinfo *);   int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*get_eee)(struct net_device *, struct ethtool_eee *);   int (*set_eee)(struct net_device *, struct ethtool_eee *);   int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *);   int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *);   int (*get_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *);   int (*set_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *);   int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *);   int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); } ;   375     struct prot_inuse ;   376     struct netns_core {   struct ctl_table_header *sysctl_hdr;   int sysctl_somaxconn;   struct prot_inuse *inuse; } ;    38     struct u64_stats_sync { } ;   164     struct ipstats_mib {   u64 mibs[36U];   struct u64_stats_sync syncp; } ;    61     struct icmp_mib {   unsigned long mibs[28U]; } ;    67     struct icmpmsg_mib {   atomic_long_t mibs[512U]; } ;    72     struct icmpv6_mib {   unsigned long mibs[6U]; } ;    83     struct icmpv6msg_mib {   atomic_long_t mibs[512U]; } ;    93     struct tcp_mib {   unsigned long mibs[16U]; } ;   100     struct udp_mib {   unsigned long mibs[9U]; } ;   106     struct linux_mib {   unsigned long mibs[119U]; } ;   112     struct linux_xfrm_mib {   unsigned long mibs[29U]; } ;   118     struct netns_mib {   struct tcp_mib *tcp_statistics;   struct ipstats_mib *ip_statistics;   struct linux_mib *net_statistics;   struct udp_mib *udp_statistics;   struct udp_mib *udplite_statistics;   struct icmp_mib *icmp_statistics;   struct icmpmsg_mib *icmpmsg_statistics;   struct proc_dir_entry *proc_net_devsnmp6;   struct udp_mib *udp_stats_in6;   struct udp_mib *udplite_stats_in6;   struct ipstats_mib *ipv6_statistics;   struct icmpv6_mib *icmpv6_statistics;   struct icmpv6msg_mib *icmpv6msg_statistics;   struct linux_xfrm_mib *xfrm_statistics; } ;    26     struct netns_unix {   int sysctl_max_dgram_qlen;   struct ctl_table_header *ctl; } ;    12     struct netns_packet {   struct mutex sklist_lock;   struct hlist_head sklist; } ;    14     struct netns_frags {   struct percpu_counter mem;   int timeout;   int high_thresh;   int low_thresh;   int max_dist; } ;   181     struct ipv4_devconf ;   182     struct fib_rules_ops ;   183     struct fib_table ;   184     struct local_ports {   seqlock_t lock;   int range[2U];   bool warned; } ;    24     struct ping_group_range {   seqlock_t lock;   kgid_t range[2U]; } ;    29     struct inet_hashinfo ;    30     struct inet_timewait_death_row {   atomic_t tw_count;   struct inet_hashinfo *hashinfo;   int sysctl_tw_recycle;   int sysctl_max_tw_buckets; } ;    39     struct inet_peer_base ;    39     struct xt_table ;    39     struct netns_ipv4 {   struct ctl_table_header *forw_hdr;   struct ctl_table_header *frags_hdr;   struct ctl_table_header *ipv4_hdr;   struct ctl_table_header *route_hdr;   struct ctl_table_header *xfrm4_hdr;   struct ipv4_devconf *devconf_all;   struct ipv4_devconf *devconf_dflt;   struct fib_rules_ops *rules_ops;   bool fib_has_custom_rules;   struct fib_table *fib_main;   struct fib_table *fib_default;   int fib_num_tclassid_users;   struct hlist_head *fib_table_hash;   bool fib_offload_disabled;   struct sock *fibnl;   struct sock **icmp_sk;   struct sock *mc_autojoin_sk;   struct inet_peer_base *peers;   struct sock **tcp_sk;   struct netns_frags frags;   struct xt_table *iptable_filter;   struct xt_table *iptable_mangle;   struct xt_table *iptable_raw;   struct xt_table *arptable_filter;   struct xt_table *iptable_security;   struct xt_table *nat_table;   int sysctl_icmp_echo_ignore_all;   int sysctl_icmp_echo_ignore_broadcasts;   int sysctl_icmp_ignore_bogus_error_responses;   int sysctl_icmp_ratelimit;   int sysctl_icmp_ratemask;   int sysctl_icmp_errors_use_inbound_ifaddr;   struct local_ports ip_local_ports;   int sysctl_tcp_ecn;   int sysctl_tcp_ecn_fallback;   int sysctl_ip_default_ttl;   int sysctl_ip_no_pmtu_disc;   int sysctl_ip_fwd_use_pmtu;   int sysctl_ip_nonlocal_bind;   int sysctl_ip_dynaddr;   int sysctl_ip_early_demux;   int sysctl_fwmark_reflect;   int sysctl_tcp_fwmark_accept;   int sysctl_tcp_l3mdev_accept;   int sysctl_tcp_mtu_probing;   int sysctl_tcp_base_mss;   int sysctl_tcp_probe_threshold;   u32 sysctl_tcp_probe_interval;   int sysctl_tcp_keepalive_time;   int sysctl_tcp_keepalive_probes;   int sysctl_tcp_keepalive_intvl;   int sysctl_tcp_syn_retries;   int sysctl_tcp_synack_retries;   int sysctl_tcp_syncookies;   int sysctl_tcp_reordering;   int sysctl_tcp_retries1;   int sysctl_tcp_retries2;   int sysctl_tcp_orphan_retries;   int sysctl_tcp_fin_timeout;   unsigned int sysctl_tcp_notsent_lowat;   int sysctl_tcp_tw_reuse;   struct inet_timewait_death_row tcp_death_row;   int sysctl_max_syn_backlog;   int sysctl_udp_l3mdev_accept;   int sysctl_igmp_max_memberships;   int sysctl_igmp_max_msf;   int sysctl_igmp_llm_reports;   int sysctl_igmp_qrv;   struct ping_group_range ping_group_range;   atomic_t dev_addr_genid;   unsigned long *sysctl_local_reserved_ports;   int sysctl_ip_prot_sock;   struct list_head mr_tables;   struct fib_rules_ops *mr_rules_ops;   int sysctl_fib_multipath_use_neigh;   unsigned int fib_seq;   atomic_t rt_genid; } ;   162     struct neighbour ;   162     struct dst_ops {   unsigned short family;   unsigned int gc_thresh;   int (*gc)(struct dst_ops *);   struct dst_entry * (*check)(struct dst_entry *, __u32 );   unsigned int (*default_advmss)(const struct dst_entry *);   unsigned int (*mtu)(const struct dst_entry *);   u32 * (*cow_metrics)(struct dst_entry *, unsigned long);   void (*destroy)(struct dst_entry *);   void (*ifdown)(struct dst_entry *, struct net_device *, int);   struct dst_entry * (*negative_advice)(struct dst_entry *);   void (*link_failure)(struct sk_buff *);   void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 );   void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *);   int (*local_out)(struct net *, struct sock *, struct sk_buff *);   struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *);   void (*confirm_neigh)(const struct dst_entry *, const void *);   struct kmem_cache *kmem_cachep;   struct percpu_counter pcpuc_entries; } ;    68     struct netns_sysctl_ipv6 {   struct ctl_table_header *hdr;   struct ctl_table_header *route_hdr;   struct ctl_table_header *icmp_hdr;   struct ctl_table_header *frags_hdr;   struct ctl_table_header *xfrm6_hdr;   int bindv6only;   int flush_delay;   int ip6_rt_max_size;   int ip6_rt_gc_min_interval;   int ip6_rt_gc_timeout;   int ip6_rt_gc_interval;   int ip6_rt_gc_elasticity;   int ip6_rt_mtu_expires;   int ip6_rt_min_advmss;   int flowlabel_consistency;   int auto_flowlabels;   int icmpv6_time;   int anycast_src_echo_reply;   int ip_nonlocal_bind;   int fwmark_reflect;   int idgen_retries;   int idgen_delay;   int flowlabel_state_ranges; } ;    40     struct ipv6_devconf ;    40     struct rt6_info ;    40     struct rt6_statistics ;    40     struct fib6_table ;    40     struct seg6_pernet_data ;    40     struct netns_ipv6 {   struct netns_sysctl_ipv6 sysctl;   struct ipv6_devconf *devconf_all;   struct ipv6_devconf *devconf_dflt;   struct inet_peer_base *peers;   struct netns_frags frags;   struct xt_table *ip6table_filter;   struct xt_table *ip6table_mangle;   struct xt_table *ip6table_raw;   struct xt_table *ip6table_security;   struct xt_table *ip6table_nat;   struct rt6_info *ip6_null_entry;   struct rt6_statistics *rt6_stats;   struct timer_list ip6_fib_timer;   struct hlist_head *fib_table_hash;   struct fib6_table *fib6_main_tbl;   struct list_head fib6_walkers;   struct dst_ops ip6_dst_ops;   rwlock_t fib6_walker_lock;   spinlock_t fib6_gc_lock;   unsigned int ip6_rt_gc_expire;   unsigned long ip6_rt_last_gc;   struct rt6_info *ip6_prohibit_entry;   struct rt6_info *ip6_blk_hole_entry;   struct fib6_table *fib6_local_tbl;   struct fib_rules_ops *fib6_rules_ops;   struct sock **icmp_sk;   struct sock *ndisc_sk;   struct sock *tcp_sk;   struct sock *igmp_sk;   struct sock *mc_autojoin_sk;   struct list_head mr6_tables;   struct fib_rules_ops *mr6_rules_ops;   atomic_t dev_addr_genid;   atomic_t fib6_sernum;   struct seg6_pernet_data *seg6_data; } ;    90     struct netns_nf_frag {   struct netns_sysctl_ipv6 sysctl;   struct netns_frags frags; } ;    96     struct netns_sysctl_lowpan {   struct ctl_table_header *frags_hdr; } ;    14     struct netns_ieee802154_lowpan {   struct netns_sysctl_lowpan sysctl;   struct netns_frags frags; } ;    20     struct sctp_mib ;    21     struct netns_sctp {   struct sctp_mib *sctp_statistics;   struct proc_dir_entry *proc_net_sctp;   struct ctl_table_header *sysctl_header;   struct sock *ctl_sock;   struct list_head local_addr_list;   struct list_head addr_waitq;   struct timer_list addr_wq_timer;   struct list_head auto_asconf_splist;   spinlock_t addr_wq_lock;   spinlock_t local_addr_lock;   unsigned int rto_initial;   unsigned int rto_min;   unsigned int rto_max;   int rto_alpha;   int rto_beta;   int max_burst;   int cookie_preserve_enable;   char *sctp_hmac_alg;   unsigned int valid_cookie_life;   unsigned int sack_timeout;   unsigned int hb_interval;   int max_retrans_association;   int max_retrans_path;   int max_retrans_init;   int pf_retrans;   int pf_enable;   int sndbuf_policy;   int rcvbuf_policy;   int default_auto_asconf;   int addip_enable;   int addip_noauth;   int prsctp_enable;   int reconf_enable;   int auth_enable;   int scope_policy;   int rwnd_upd_shift;   unsigned long max_autoclose; } ;   144     struct netns_dccp {   struct sock *v4_ctl_sk;   struct sock *v6_ctl_sk; } ;    78     struct nf_logger ;    79     struct nf_queue_handler ;    80     struct nf_hook_entry ;    80     struct netns_nf {   struct proc_dir_entry *proc_netfilter;   const struct nf_queue_handler *queue_handler;   const struct nf_logger *nf_loggers[13U];   struct ctl_table_header *nf_log_dir_header;   struct nf_hook_entry *hooks[13U][8U];   bool defrag_ipv4;   bool defrag_ipv6; } ;    26     struct ebt_table ;    27     struct netns_xt {   struct list_head tables[13U];   bool notrack_deprecated_warning;   bool clusterip_deprecated_warning;   struct ebt_table *broute_table;   struct ebt_table *frame_filter;   struct ebt_table *frame_nat; } ;    19     struct hlist_nulls_node ;    19     struct hlist_nulls_head {   struct hlist_nulls_node *first; } ;    23     struct hlist_nulls_node {   struct hlist_nulls_node *next;   struct hlist_nulls_node **pprev; } ;   115     struct ip_conntrack_stat {   unsigned int found;   unsigned int invalid;   unsigned int ignore;   unsigned int insert;   unsigned int insert_failed;   unsigned int drop;   unsigned int early_drop;   unsigned int error;   unsigned int expect_new;   unsigned int expect_create;   unsigned int expect_delete;   unsigned int search_restart; } ;    13     struct nf_proto_net {   struct ctl_table_header *ctl_table_header;   struct ctl_table *ctl_table;   unsigned int users; } ;    27     struct nf_generic_net {   struct nf_proto_net pn;   unsigned int timeout; } ;    32     struct nf_tcp_net {   struct nf_proto_net pn;   unsigned int timeouts[14U];   unsigned int tcp_loose;   unsigned int tcp_be_liberal;   unsigned int tcp_max_retrans; } ;    46     struct nf_udp_net {   struct nf_proto_net pn;   unsigned int timeouts[2U]; } ;    51     struct nf_icmp_net {   struct nf_proto_net pn;   unsigned int timeout; } ;    56     struct nf_dccp_net {   struct nf_proto_net pn;   int dccp_loose;   unsigned int dccp_timeout[10U]; } ;    63     struct nf_sctp_net {   struct nf_proto_net pn;   unsigned int timeouts[10U]; } ;    70     struct nf_ip_net {   struct nf_generic_net generic;   struct nf_tcp_net tcp;   struct nf_udp_net udp;   struct nf_icmp_net icmp;   struct nf_icmp_net icmpv6;   struct nf_dccp_net dccp;   struct nf_sctp_net sctp; } ;    84     struct ct_pcpu {   spinlock_t lock;   struct hlist_nulls_head unconfirmed;   struct hlist_nulls_head dying; } ;    91     struct nf_ct_event_notifier ;    91     struct nf_exp_event_notifier ;    91     struct netns_ct {   atomic_t count;   unsigned int expect_count;   struct delayed_work ecache_dwork;   bool ecache_dwork_pending;   struct ctl_table_header *sysctl_header;   struct ctl_table_header *acct_sysctl_header;   struct ctl_table_header *tstamp_sysctl_header;   struct ctl_table_header *event_sysctl_header;   struct ctl_table_header *helper_sysctl_header;   unsigned int sysctl_log_invalid;   int sysctl_events;   int sysctl_acct;   int sysctl_auto_assign_helper;   bool auto_assign_helper_warned;   int sysctl_tstamp;   int sysctl_checksum;   struct ct_pcpu *pcpu_lists;   struct ip_conntrack_stat *stat;   struct nf_ct_event_notifier *nf_conntrack_event_cb;   struct nf_exp_event_notifier *nf_expect_event_cb;   struct nf_ip_net nf_ct_proto;   unsigned int labels_used; } ;   122     struct nft_af_info ;   123     struct netns_nftables {   struct list_head af_info;   struct list_head commit_list;   struct nft_af_info *ipv4;   struct nft_af_info *ipv6;   struct nft_af_info *inet;   struct nft_af_info *arp;   struct nft_af_info *bridge;   struct nft_af_info *netdev;   unsigned int base_seq;   u8 gencursor; } ;   509     struct flow_cache_percpu {   struct hlist_head *hash_table;   int hash_count;   u32 hash_rnd;   int hash_rnd_recalc;   struct tasklet_struct flush_tasklet; } ;    16     struct flow_cache {   u32 hash_shift;   struct flow_cache_percpu *percpu;   struct hlist_node node;   int low_watermark;   int high_watermark;   struct timer_list rnd_timer; } ;    25     struct xfrm_policy_hash {   struct hlist_head *table;   unsigned int hmask;   u8 dbits4;   u8 sbits4;   u8 dbits6;   u8 sbits6; } ;    21     struct xfrm_policy_hthresh {   struct work_struct work;   seqlock_t lock;   u8 lbits4;   u8 rbits4;   u8 lbits6;   u8 rbits6; } ;    30     struct netns_xfrm {   struct list_head state_all;   struct hlist_head *state_bydst;   struct hlist_head *state_bysrc;   struct hlist_head *state_byspi;   unsigned int state_hmask;   unsigned int state_num;   struct work_struct state_hash_work;   struct list_head policy_all;   struct hlist_head *policy_byidx;   unsigned int policy_idx_hmask;   struct hlist_head policy_inexact[3U];   struct xfrm_policy_hash policy_bydst[3U];   unsigned int policy_count[6U];   struct work_struct policy_hash_work;   struct xfrm_policy_hthresh policy_hthresh;   struct sock *nlsk;   struct sock *nlsk_stash;   u32 sysctl_aevent_etime;   u32 sysctl_aevent_rseqth;   int sysctl_larval_drop;   u32 sysctl_acq_expires;   struct ctl_table_header *sysctl_hdr;   struct dst_ops xfrm4_dst_ops;   struct dst_ops xfrm6_dst_ops;   spinlock_t xfrm_state_lock;   spinlock_t xfrm_policy_lock;   struct mutex xfrm_cfg_mutex;   struct flow_cache flow_cache_global;   atomic_t flow_cache_genid;   struct list_head flow_cache_gc_list;   atomic_t flow_cache_gc_count;   spinlock_t flow_cache_gc_lock;   struct work_struct flow_cache_gc_work;   struct work_struct flow_cache_flush_work;   struct mutex flow_flush_sem; } ;    87     struct mpls_route ;    88     struct netns_mpls {   size_t platform_labels;   struct mpls_route **platform_label;   struct ctl_table_header *ctl; } ;    16     struct proc_ns_operations ;    17     struct ns_common {   atomic_long_t stashed;   const struct proc_ns_operations *ops;   unsigned int inum; } ;    11     struct net_generic ;    12     struct netns_ipvs ;    13     struct ucounts ;    13     struct net {   atomic_t passive;   atomic_t count;   spinlock_t rules_mod_lock;   atomic64_t cookie_gen;   struct list_head list;   struct list_head cleanup_list;   struct list_head exit_list;   struct user_namespace *user_ns;   struct ucounts *ucounts;   spinlock_t nsid_lock;   struct idr netns_ids;   struct ns_common ns;   struct proc_dir_entry *proc_net;   struct proc_dir_entry *proc_net_stat;   struct ctl_table_set sysctls;   struct sock *rtnl;   struct sock *genl_sock;   struct list_head dev_base_head;   struct hlist_head *dev_name_head;   struct hlist_head *dev_index_head;   unsigned int dev_base_seq;   int ifindex;   unsigned int dev_unreg_count;   struct list_head rules_ops;   struct net_device *loopback_dev;   struct netns_core core;   struct netns_mib mib;   struct netns_packet packet;   struct netns_unix unx;   struct netns_ipv4 ipv4;   struct netns_ipv6 ipv6;   struct netns_ieee802154_lowpan ieee802154_lowpan;   struct netns_sctp sctp;   struct netns_dccp dccp;   struct netns_nf nf;   struct netns_xt xt;   struct netns_ct ct;   struct netns_nftables nft;   struct netns_nf_frag nf_frag;   struct sock *nfnl;   struct sock *nfnl_stash;   struct list_head nfnl_acct_list;   struct list_head nfct_timeout_list;   struct sk_buff_head wext_nlevents;   struct net_generic *gen;   struct netns_xfrm xfrm;   struct netns_ipvs *ipvs;   struct netns_mpls mpls;   struct sock *diag_nlsk;   atomic_t fnhe_genid; } ;   248     struct __anonstruct_possible_net_t_454 {   struct net *net; } ;   248     typedef struct __anonstruct_possible_net_t_454 possible_net_t;   383     enum fwnode_type {   FWNODE_INVALID = 0,   FWNODE_OF = 1,   FWNODE_ACPI = 2,   FWNODE_ACPI_DATA = 3,   FWNODE_ACPI_STATIC = 4,   FWNODE_PDATA = 5,   FWNODE_IRQCHIP = 6 } ;   393     struct fwnode_handle {   enum fwnode_type type;   struct fwnode_handle *secondary; } ;    32     typedef u32 phandle;    34     struct property {   char *name;   int length;   void *value;   struct property *next;   unsigned long _flags;   unsigned int unique_id;   struct bin_attribute attr; } ;    44     struct device_node {   const char *name;   const char *type;   phandle phandle;   const char *full_name;   struct fwnode_handle fwnode;   struct property *properties;   struct property *deadprops;   struct device_node *parent;   struct device_node *child;   struct device_node *sibling;   struct kobject kobj;   unsigned long _flags;   void *data; } ;  1292     struct phy_device ;  1293     struct fixed_phy_status ;  1294     enum dsa_tag_protocol {   DSA_TAG_PROTO_NONE = 0,   DSA_TAG_PROTO_DSA = 1,   DSA_TAG_PROTO_TRAILER = 2,   DSA_TAG_PROTO_EDSA = 3,   DSA_TAG_PROTO_BRCM = 4,   DSA_TAG_PROTO_QCA = 5,   DSA_TAG_LAST = 6 } ;  1304     struct dsa_chip_data {   struct device *host_dev;   int sw_addr;   struct device *netdev[12U];   int eeprom_len;   struct device_node *of_node;   char *port_names[12U];   struct device_node *port_dn[12U];   s8 rtable[4U]; } ;    80     struct dsa_platform_data {   struct device *netdev;   struct net_device *of_netdev;   int nr_chips;   struct dsa_chip_data *chip; } ;    96     struct packet_type ;    97     struct dsa_switch ;    97     struct dsa_device_ops ;    97     struct dsa_switch_tree {   struct list_head list;   struct raw_notifier_head nh;   u32 tree;   struct kref refcount;   bool applied;   struct dsa_platform_data *pd;   struct net_device *master_netdev;   int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);   struct ethtool_ops master_ethtool_ops;   const struct ethtool_ops *master_orig_ethtool_ops;   struct dsa_switch *cpu_switch;   s8 cpu_port;   struct dsa_switch *ds[4U];   const struct dsa_device_ops *tag_ops; } ;   157     struct dsa_mall_mirror_tc_entry {   u8 to_local_port;   bool ingress; } ;   174     struct dsa_port {   struct dsa_switch *ds;   unsigned int index;   const char *name;   struct net_device *netdev;   struct device_node *dn;   unsigned int ageing_time;   u8 stp_state;   struct net_device *bridge_dev; } ;   186     struct dsa_switch_ops ;   186     struct mii_bus ;   186     struct dsa_switch {   struct device *dev;   struct dsa_switch_tree *dst;   int index;   struct notifier_block nb;   void *priv;   struct dsa_chip_data *cd;   const struct dsa_switch_ops *ops;   s8 rtable[4U];   struct net_device *master_netdev;   u32 dsa_port_mask;   u32 cpu_port_mask;   u32 enabled_port_mask;   u32 phys_mii_mask;   struct mii_bus *slave_mii_bus;   size_t num_ports;   struct dsa_port ports[]; } ;   271     struct switchdev_trans ;   272     struct switchdev_obj ;   273     struct switchdev_obj_port_fdb ;   274     struct switchdev_obj_port_mdb ;   275     struct switchdev_obj_port_vlan ;   287     struct dsa_switch_ops {   const char * (*probe)(struct device *, struct device *, int, void **);   enum dsa_tag_protocol  (*get_tag_protocol)(struct dsa_switch *);   int (*setup)(struct dsa_switch *);   int (*set_addr)(struct dsa_switch *, u8 *);   u32  (*get_phy_flags)(struct dsa_switch *, int);   int (*phy_read)(struct dsa_switch *, int, int);   int (*phy_write)(struct dsa_switch *, int, int, u16 );   void (*adjust_link)(struct dsa_switch *, int, struct phy_device *);   void (*fixed_link_update)(struct dsa_switch *, int, struct fixed_phy_status *);   void (*get_strings)(struct dsa_switch *, int, uint8_t *);   void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *);   int (*get_sset_count)(struct dsa_switch *);   void (*get_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *);   int (*set_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *);   int (*suspend)(struct dsa_switch *);   int (*resume)(struct dsa_switch *);   int (*port_enable)(struct dsa_switch *, int, struct phy_device *);   void (*port_disable)(struct dsa_switch *, int, struct phy_device *);   int (*set_eee)(struct dsa_switch *, int, struct phy_device *, struct ethtool_eee *);   int (*get_eee)(struct dsa_switch *, int, struct ethtool_eee *);   int (*get_eeprom_len)(struct dsa_switch *);   int (*get_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *);   int (*set_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *);   int (*get_regs_len)(struct dsa_switch *, int);   void (*get_regs)(struct dsa_switch *, int, struct ethtool_regs *, void *);   int (*set_ageing_time)(struct dsa_switch *, unsigned int);   int (*port_bridge_join)(struct dsa_switch *, int, struct net_device *);   void (*port_bridge_leave)(struct dsa_switch *, int, struct net_device *);   void (*port_stp_state_set)(struct dsa_switch *, int, u8 );   void (*port_fast_age)(struct dsa_switch *, int);   int (*port_vlan_filtering)(struct dsa_switch *, int, bool );   int (*port_vlan_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *);   void (*port_vlan_add)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *);   int (*port_vlan_del)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *);   int (*port_vlan_dump)(struct dsa_switch *, int, struct switchdev_obj_port_vlan *, int (*)(struct switchdev_obj *));   int (*port_fdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *);   void (*port_fdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *);   int (*port_fdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *);   int (*port_fdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_fdb *, int (*)(struct switchdev_obj *));   int (*port_mdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *, struct switchdev_trans *);   void (*port_mdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *, struct switchdev_trans *);   int (*port_mdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *);   int (*port_mdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_mdb *, int (*)(struct switchdev_obj *));   int (*get_rxnfc)(struct dsa_switch *, int, struct ethtool_rxnfc *, u32 *);   int (*set_rxnfc)(struct dsa_switch *, int, struct ethtool_rxnfc *);   int (*port_mirror_add)(struct dsa_switch *, int, struct dsa_mall_mirror_tc_entry *, bool );   void (*port_mirror_del)(struct dsa_switch *, int, struct dsa_mall_mirror_tc_entry *); } ;   468     struct ieee_ets {   __u8 willing;   __u8 ets_cap;   __u8 cbs;   __u8 tc_tx_bw[8U];   __u8 tc_rx_bw[8U];   __u8 tc_tsa[8U];   __u8 prio_tc[8U];   __u8 tc_reco_bw[8U];   __u8 tc_reco_tsa[8U];   __u8 reco_prio_tc[8U]; } ;    69     struct ieee_maxrate {   __u64 tc_maxrate[8U]; } ;    87     struct ieee_qcn {   __u8 rpg_enable[8U];   __u32 rppp_max_rps[8U];   __u32 rpg_time_reset[8U];   __u32 rpg_byte_reset[8U];   __u32 rpg_threshold[8U];   __u32 rpg_max_rate[8U];   __u32 rpg_ai_rate[8U];   __u32 rpg_hai_rate[8U];   __u32 rpg_gd[8U];   __u32 rpg_min_dec_fac[8U];   __u32 rpg_min_rate[8U];   __u32 cndd_state_machine[8U]; } ;   132     struct ieee_qcn_stats {   __u64 rppp_rp_centiseconds[8U];   __u32 rppp_created_rps[8U]; } ;   144     struct ieee_pfc {   __u8 pfc_cap;   __u8 pfc_en;   __u8 mbc;   __u16 delay;   __u64 requests[8U];   __u64 indications[8U]; } ;   164     struct cee_pg {   __u8 willing;   __u8 error;   __u8 pg_en;   __u8 tcs_supported;   __u8 pg_bw[8U];   __u8 prio_pg[8U]; } ;   187     struct cee_pfc {   __u8 willing;   __u8 error;   __u8 pfc_en;   __u8 tcs_supported; } ;   202     struct dcb_app {   __u8 selector;   __u8 priority;   __u16 protocol; } ;   236     struct dcb_peer_app_info {   __u8 willing;   __u8 error; } ;    40     struct dcbnl_rtnl_ops {   int (*ieee_getets)(struct net_device *, struct ieee_ets *);   int (*ieee_setets)(struct net_device *, struct ieee_ets *);   int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *);   int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *);   int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *);   int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *);   int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *);   int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *);   int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *);   int (*ieee_getapp)(struct net_device *, struct dcb_app *);   int (*ieee_setapp)(struct net_device *, struct dcb_app *);   int (*ieee_delapp)(struct net_device *, struct dcb_app *);   int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *);   int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *);   u8  (*getstate)(struct net_device *);   u8  (*setstate)(struct net_device *, u8 );   void (*getpermhwaddr)(struct net_device *, u8 *);   void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 );   void (*setpgbwgcfgtx)(struct net_device *, int, u8 );   void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 );   void (*setpgbwgcfgrx)(struct net_device *, int, u8 );   void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);   void (*getpgbwgcfgtx)(struct net_device *, int, u8 *);   void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);   void (*getpgbwgcfgrx)(struct net_device *, int, u8 *);   void (*setpfccfg)(struct net_device *, int, u8 );   void (*getpfccfg)(struct net_device *, int, u8 *);   u8  (*setall)(struct net_device *);   u8  (*getcap)(struct net_device *, int, u8 *);   int (*getnumtcs)(struct net_device *, int, u8 *);   int (*setnumtcs)(struct net_device *, int, u8 );   u8  (*getpfcstate)(struct net_device *);   void (*setpfcstate)(struct net_device *, u8 );   void (*getbcncfg)(struct net_device *, int, u32 *);   void (*setbcncfg)(struct net_device *, int, u32 );   void (*getbcnrp)(struct net_device *, int, u8 *);   void (*setbcnrp)(struct net_device *, int, u8 );   int (*setapp)(struct net_device *, u8 , u16 , u8 );   int (*getapp)(struct net_device *, u8 , u16 );   u8  (*getfeatcfg)(struct net_device *, int, u8 *);   u8  (*setfeatcfg)(struct net_device *, int, u8 );   u8  (*getdcbx)(struct net_device *);   u8  (*setdcbx)(struct net_device *, u8 );   int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *);   int (*peer_getapptable)(struct net_device *, struct dcb_app *);   int (*cee_peer_getpg)(struct net_device *, struct cee_pg *);   int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ;    58     struct mnt_namespace ;    59     struct uts_namespace ;    60     struct ipc_namespace ;    61     struct cgroup_namespace ;    62     struct nsproxy {   atomic_t count;   struct uts_namespace *uts_ns;   struct ipc_namespace *ipc_ns;   struct mnt_namespace *mnt_ns;   struct pid_namespace *pid_ns_for_children;   struct net *net_ns;   struct cgroup_namespace *cgroup_ns; } ;    86     struct uid_gid_extent {   u32 first;   u32 lower_first;   u32 count; } ;    22     struct uid_gid_map {   u32 nr_extents;   struct uid_gid_extent extent[5U]; } ;    36     struct user_namespace {   struct uid_gid_map uid_map;   struct uid_gid_map gid_map;   struct uid_gid_map projid_map;   atomic_t count;   struct user_namespace *parent;   int level;   kuid_t owner;   kgid_t group;   struct ns_common ns;   unsigned long flags;   struct key *persistent_keyring_register;   struct rw_semaphore persistent_keyring_register_sem;   struct work_struct work;   struct ctl_table_set set;   struct ctl_table_header *sysctls;   struct ucounts *ucounts;   int ucount_max[9U]; } ;    70     struct ucounts {   struct hlist_node node;   struct user_namespace *ns;   kuid_t uid;   atomic_t count;   atomic_t ucount[9U]; } ;   635     struct cgroup ;    14     struct bpf_prog ;    14     struct cgroup_bpf {   struct bpf_prog *prog[3U];   struct bpf_prog *effective[3U];   bool disallow_override[3U]; } ;    43     struct cgroup_root ;    44     struct cgroup_subsys ;    45     struct cgroup_taskset ;    90     struct cgroup_file {   struct kernfs_node *kn; } ;    91     struct cgroup_subsys_state {   struct cgroup *cgroup;   struct cgroup_subsys *ss;   struct percpu_ref refcnt;   struct cgroup_subsys_state *parent;   struct list_head sibling;   struct list_head children;   int id;   unsigned int flags;   u64 serial_nr;   atomic_t online_cnt;   struct callback_head callback_head;   struct work_struct destroy_work; } ;   142     struct css_set {   struct cgroup_subsys_state *subsys[14U];   atomic_t refcount;   struct cgroup *dfl_cgrp;   struct list_head tasks;   struct list_head mg_tasks;   struct list_head task_iters;   struct list_head e_cset_node[14U];   struct hlist_node hlist;   struct list_head cgrp_links;   struct list_head mg_preload_node;   struct list_head mg_node;   struct cgroup *mg_src_cgrp;   struct cgroup *mg_dst_cgrp;   struct css_set *mg_dst_cset;   bool dead;   struct callback_head callback_head; } ;   222     struct cgroup {   struct cgroup_subsys_state self;   unsigned long flags;   int id;   int level;   int populated_cnt;   struct kernfs_node *kn;   struct cgroup_file procs_file;   struct cgroup_file events_file;   u16 subtree_control;   u16 subtree_ss_mask;   u16 old_subtree_control;   u16 old_subtree_ss_mask;   struct cgroup_subsys_state *subsys[14U];   struct cgroup_root *root;   struct list_head cset_links;   struct list_head e_csets[14U];   struct list_head pidlists;   struct mutex pidlist_mutex;   wait_queue_head_t offline_waitq;   struct work_struct release_agent_work;   struct cgroup_bpf bpf;   int ancestor_ids[]; } ;   310     struct cgroup_root {   struct kernfs_root *kf_root;   unsigned int subsys_mask;   int hierarchy_id;   struct cgroup cgrp;   int cgrp_ancestor_id_storage;   atomic_t nr_cgrps;   struct list_head root_list;   unsigned int flags;   struct idr cgroup_idr;   char release_agent_path[4096U];   char name[64U]; } ;   349     struct cftype {   char name[64U];   unsigned long private;   size_t max_write_len;   unsigned int flags;   unsigned int file_offset;   struct cgroup_subsys *ss;   struct list_head node;   struct kernfs_ops *kf_ops;   int (*open)(struct kernfs_open_file *);   void (*release)(struct kernfs_open_file *);   u64  (*read_u64)(struct cgroup_subsys_state *, struct cftype *);   s64  (*read_s64)(struct cgroup_subsys_state *, struct cftype *);   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 );   int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 );   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   struct lock_class_key lockdep_key; } ;   437     struct cgroup_subsys {   struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *);   int (*css_online)(struct cgroup_subsys_state *);   void (*css_offline)(struct cgroup_subsys_state *);   void (*css_released)(struct cgroup_subsys_state *);   void (*css_free)(struct cgroup_subsys_state *);   void (*css_reset)(struct cgroup_subsys_state *);   int (*can_attach)(struct cgroup_taskset *);   void (*cancel_attach)(struct cgroup_taskset *);   void (*attach)(struct cgroup_taskset *);   void (*post_attach)();   int (*can_fork)(struct task_struct *);   void (*cancel_fork)(struct task_struct *);   void (*fork)(struct task_struct *);   void (*exit)(struct task_struct *);   void (*free)(struct task_struct *);   void (*bind)(struct cgroup_subsys_state *);   bool early_init;   bool implicit_on_dfl;   bool broken_hierarchy;   bool warned_broken_hierarchy;   int id;   const char *name;   const char *legacy_name;   struct cgroup_root *root;   struct idr css_idr;   struct list_head cfts;   struct cftype *dfl_cftypes;   struct cftype *legacy_cftypes;   unsigned int depends_on; } ;   631     struct cgroup_namespace {   atomic_t count;   struct ns_common ns;   struct user_namespace *user_ns;   struct ucounts *ucounts;   struct css_set *root_cset; } ;   686     struct netprio_map {   struct callback_head rcu;   u32 priomap_len;   u32 priomap[]; } ;    42     struct nlmsghdr {   __u32 nlmsg_len;   __u16 nlmsg_type;   __u16 nlmsg_flags;   __u32 nlmsg_seq;   __u32 nlmsg_pid; } ;   144     struct nlattr {   __u16 nla_len;   __u16 nla_type; } ;   105     struct netlink_callback {   struct sk_buff *skb;   const struct nlmsghdr *nlh;   int (*start)(struct netlink_callback *);   int (*dump)(struct sk_buff *, struct netlink_callback *);   int (*done)(struct netlink_callback *);   void *data;   struct module *module;   u16 family;   u16 min_dump_alloc;   unsigned int prev_seq;   unsigned int seq;   long args[6U]; } ;   183     struct ndmsg {   __u8 ndm_family;   __u8 ndm_pad1;   __u16 ndm_pad2;   __s32 ndm_ifindex;   __u16 ndm_state;   __u8 ndm_flags;   __u8 ndm_type; } ;    41     struct rtnl_link_stats64 {   __u64 rx_packets;   __u64 tx_packets;   __u64 rx_bytes;   __u64 tx_bytes;   __u64 rx_errors;   __u64 tx_errors;   __u64 rx_dropped;   __u64 tx_dropped;   __u64 multicast;   __u64 collisions;   __u64 rx_length_errors;   __u64 rx_over_errors;   __u64 rx_crc_errors;   __u64 rx_frame_errors;   __u64 rx_fifo_errors;   __u64 rx_missed_errors;   __u64 tx_aborted_errors;   __u64 tx_carrier_errors;   __u64 tx_fifo_errors;   __u64 tx_heartbeat_errors;   __u64 tx_window_errors;   __u64 rx_compressed;   __u64 tx_compressed;   __u64 rx_nohandler; } ;   872     struct ifla_vf_stats {   __u64 rx_packets;   __u64 tx_packets;   __u64 rx_bytes;   __u64 tx_bytes;   __u64 broadcast;   __u64 multicast; } ;    16     struct ifla_vf_info {   __u32 vf;   __u8 mac[32U];   __u32 vlan;   __u32 qos;   __u32 spoofchk;   __u32 linkstate;   __u32 min_tx_rate;   __u32 max_tx_rate;   __u32 rss_query_en;   __u32 trusted;   __be16 vlan_proto; } ;   118     struct tc_stats {   __u64 bytes;   __u32 packets;   __u32 drops;   __u32 overlimits;   __u32 bps;   __u32 pps;   __u32 qlen;   __u32 backlog; } ;    96     struct tc_sizespec {   unsigned char cell_log;   unsigned char size_log;   short cell_align;   int overhead;   unsigned int linklayer;   unsigned int mpu;   unsigned int mtu;   unsigned int tsize; } ;   117     struct netpoll_info ;   118     struct wireless_dev ;   119     struct wpan_dev ;   120     struct mpls_dev ;   121     struct udp_tunnel_info ;    70     enum netdev_tx {   __NETDEV_TX_MIN = -2147483648,   NETDEV_TX_OK = 0,   NETDEV_TX_BUSY = 16 } ;   113     typedef enum netdev_tx netdev_tx_t;   132     struct net_device_stats {   unsigned long rx_packets;   unsigned long tx_packets;   unsigned long rx_bytes;   unsigned long tx_bytes;   unsigned long rx_errors;   unsigned long tx_errors;   unsigned long rx_dropped;   unsigned long tx_dropped;   unsigned long multicast;   unsigned long collisions;   unsigned long rx_length_errors;   unsigned long rx_over_errors;   unsigned long rx_crc_errors;   unsigned long rx_frame_errors;   unsigned long rx_fifo_errors;   unsigned long rx_missed_errors;   unsigned long tx_aborted_errors;   unsigned long tx_carrier_errors;   unsigned long tx_fifo_errors;   unsigned long tx_heartbeat_errors;   unsigned long tx_window_errors;   unsigned long rx_compressed;   unsigned long tx_compressed; } ;   196     struct neigh_parms ;   217     struct netdev_hw_addr_list {   struct list_head list;   int count; } ;   222     struct hh_cache {   u16 hh_len;   u16 __pad;   seqlock_t hh_lock;   unsigned long hh_data[16U]; } ;   251     struct header_ops {   int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int);   int (*parse)(const struct sk_buff *, unsigned char *);   int (*cache)(const struct neighbour *, struct hh_cache *, __be16 );   void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *);   bool  (*validate)(const char *, unsigned int); } ;   360     enum rx_handler_result {   RX_HANDLER_CONSUMED = 0,   RX_HANDLER_ANOTHER = 1,   RX_HANDLER_EXACT = 2,   RX_HANDLER_PASS = 3 } ;   408     typedef enum rx_handler_result rx_handler_result_t;   409     typedef rx_handler_result_t  rx_handler_func_t(struct sk_buff **);   530     struct Qdisc ;   530     struct netdev_queue {   struct net_device *dev;   struct Qdisc *qdisc;   struct Qdisc *qdisc_sleeping;   struct kobject kobj;   int numa_node;   unsigned long tx_maxrate;   unsigned long trans_timeout;   spinlock_t _xmit_lock;   int xmit_lock_owner;   unsigned long trans_start;   unsigned long state;   struct dql dql; } ;   601     struct rps_map {   unsigned int len;   struct callback_head rcu;   u16 cpus[0U]; } ;   613     struct rps_dev_flow {   u16 cpu;   u16 filter;   unsigned int last_qtail; } ;   625     struct rps_dev_flow_table {   unsigned int mask;   struct callback_head rcu;   struct rps_dev_flow flows[0U]; } ;   677     struct netdev_rx_queue {   struct rps_map *rps_map;   struct rps_dev_flow_table *rps_flow_table;   struct kobject kobj;   struct net_device *dev; } ;   700     struct xps_map {   unsigned int len;   unsigned int alloc_len;   struct callback_head rcu;   u16 queues[0U]; } ;   713     struct xps_dev_maps {   struct callback_head rcu;   struct xps_map *cpu_map[0U]; } ;   724     struct netdev_tc_txq {   u16 count;   u16 offset; } ;   735     struct netdev_fcoe_hbainfo {   char manufacturer[64U];   char serial_number[64U];   char hardware_version[64U];   char driver_version[64U];   char optionrom_version[64U];   char firmware_version[64U];   char model[256U];   char model_description[256U]; } ;   751     struct netdev_phys_item_id {   unsigned char id[32U];   unsigned char id_len; } ;   779     struct tc_cls_u32_offload ;   780     struct tc_cls_flower_offload ;   780     struct tc_cls_matchall_offload ;   780     struct tc_cls_bpf_offload ;   780     union __anonunion____missing_field_name_485 {   u8 tc;   struct tc_cls_u32_offload *cls_u32;   struct tc_cls_flower_offload *cls_flower;   struct tc_cls_matchall_offload *cls_mall;   struct tc_cls_bpf_offload *cls_bpf; } ;   780     struct tc_to_netdev {   unsigned int type;   union __anonunion____missing_field_name_485 __annonCompField116;   bool egress_dev; } ;   797     enum xdp_netdev_command {   XDP_SETUP_PROG = 0,   XDP_QUERY_PROG = 1 } ;   802     union __anonunion____missing_field_name_486 {   struct bpf_prog *prog;   bool prog_attached; } ;   802     struct netdev_xdp {   enum xdp_netdev_command command;   union __anonunion____missing_field_name_486 __annonCompField117; } ;   825     struct net_device_ops {   int (*ndo_init)(struct net_device *);   void (*ndo_uninit)(struct net_device *);   int (*ndo_open)(struct net_device *);   int (*ndo_stop)(struct net_device *);   netdev_tx_t  (*ndo_start_xmit)(struct sk_buff *, struct net_device *);   netdev_features_t  (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t );   u16  (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16  (*)(struct net_device *, struct sk_buff *));   void (*ndo_change_rx_flags)(struct net_device *, int);   void (*ndo_set_rx_mode)(struct net_device *);   int (*ndo_set_mac_address)(struct net_device *, void *);   int (*ndo_validate_addr)(struct net_device *);   int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int);   int (*ndo_set_config)(struct net_device *, struct ifmap *);   int (*ndo_change_mtu)(struct net_device *, int);   int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *);   void (*ndo_tx_timeout)(struct net_device *);   void (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *);   bool  (*ndo_has_offload_stats)(const struct net_device *, int);   int (*ndo_get_offload_stats)(int, const struct net_device *, void *);   struct net_device_stats * (*ndo_get_stats)(struct net_device *);   int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 );   int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 );   void (*ndo_poll_controller)(struct net_device *);   int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *);   void (*ndo_netpoll_cleanup)(struct net_device *);   int (*ndo_set_vf_mac)(struct net_device *, int, u8 *);   int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 , __be16 );   int (*ndo_set_vf_rate)(struct net_device *, int, int, int);   int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool );   int (*ndo_set_vf_trust)(struct net_device *, int, bool );   int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *);   int (*ndo_set_vf_link_state)(struct net_device *, int, int);   int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *);   int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **);   int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *);   int (*ndo_set_vf_guid)(struct net_device *, int, u64 , int);   int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool );   int (*ndo_setup_tc)(struct net_device *, u32 , __be16 , struct tc_to_netdev *);   int (*ndo_fcoe_enable)(struct net_device *);   int (*ndo_fcoe_disable)(struct net_device *);   int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int);   int (*ndo_fcoe_ddp_done)(struct net_device *, u16 );   int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int);   int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *);   int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int);   int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 );   int (*ndo_add_slave)(struct net_device *, struct net_device *);   int (*ndo_del_slave)(struct net_device *, struct net_device *);   netdev_features_t  (*ndo_fix_features)(struct net_device *, netdev_features_t );   int (*ndo_set_features)(struct net_device *, netdev_features_t );   int (*ndo_neigh_construct)(struct net_device *, struct neighbour *);   void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *);   int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 , u16 );   int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 );   int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *);   int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16 );   int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 , int);   int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16 );   int (*ndo_change_carrier)(struct net_device *, bool );   int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *);   int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t );   void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *);   void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *);   void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *);   void (*ndo_dfwd_del_station)(struct net_device *, void *);   netdev_tx_t  (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *);   int (*ndo_get_lock_subclass)(struct net_device *);   int (*ndo_set_tx_maxrate)(struct net_device *, int, u32 );   int (*ndo_get_iflink)(const struct net_device *);   int (*ndo_change_proto_down)(struct net_device *, bool );   int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *);   void (*ndo_set_rx_headroom)(struct net_device *, int);   int (*ndo_xdp)(struct net_device *, struct netdev_xdp *); } ;  1360     struct __anonstruct_adj_list_487 {   struct list_head upper;   struct list_head lower; } ;  1360     struct iw_handler_def ;  1360     struct iw_public_data ;  1360     struct switchdev_ops ;  1360     struct l3mdev_ops ;  1360     struct ndisc_ops ;  1360     struct vlan_info ;  1360     struct tipc_bearer ;  1360     struct in_device ;  1360     struct dn_dev ;  1360     struct inet6_dev ;  1360     struct tcf_proto ;  1360     struct cpu_rmap ;  1360     struct pcpu_lstats ;  1360     struct pcpu_sw_netstats ;  1360     struct pcpu_dstats ;  1360     struct pcpu_vstats ;  1360     union __anonunion____missing_field_name_488 {   void *ml_priv;   struct pcpu_lstats *lstats;   struct pcpu_sw_netstats *tstats;   struct pcpu_dstats *dstats;   struct pcpu_vstats *vstats; } ;  1360     struct garp_port ;  1360     struct mrp_port ;  1360     struct rtnl_link_ops ;  1360     struct net_device {   char name[16U];   struct hlist_node name_hlist;   char *ifalias;   unsigned long mem_end;   unsigned long mem_start;   unsigned long base_addr;   int irq;   atomic_t carrier_changes;   unsigned long state;   struct list_head dev_list;   struct list_head napi_list;   struct list_head unreg_list;   struct list_head close_list;   struct list_head ptype_all;   struct list_head ptype_specific;   struct __anonstruct_adj_list_487 adj_list;   netdev_features_t features;   netdev_features_t hw_features;   netdev_features_t wanted_features;   netdev_features_t vlan_features;   netdev_features_t hw_enc_features;   netdev_features_t mpls_features;   netdev_features_t gso_partial_features;   int ifindex;   int group;   struct net_device_stats stats;   atomic_long_t rx_dropped;   atomic_long_t tx_dropped;   atomic_long_t rx_nohandler;   const struct iw_handler_def *wireless_handlers;   struct iw_public_data *wireless_data;   const struct net_device_ops *netdev_ops;   const struct ethtool_ops *ethtool_ops;   const struct switchdev_ops *switchdev_ops;   const struct l3mdev_ops *l3mdev_ops;   const struct ndisc_ops *ndisc_ops;   const struct header_ops *header_ops;   unsigned int flags;   unsigned int priv_flags;   unsigned short gflags;   unsigned short padded;   unsigned char operstate;   unsigned char link_mode;   unsigned char if_port;   unsigned char dma;   unsigned int mtu;   unsigned int min_mtu;   unsigned int max_mtu;   unsigned short type;   unsigned short hard_header_len;   unsigned short min_header_len;   unsigned short needed_headroom;   unsigned short needed_tailroom;   unsigned char perm_addr[32U];   unsigned char addr_assign_type;   unsigned char addr_len;   unsigned short neigh_priv_len;   unsigned short dev_id;   unsigned short dev_port;   spinlock_t addr_list_lock;   unsigned char name_assign_type;   bool uc_promisc;   struct netdev_hw_addr_list uc;   struct netdev_hw_addr_list mc;   struct netdev_hw_addr_list dev_addrs;   struct kset *queues_kset;   unsigned int promiscuity;   unsigned int allmulti;   struct vlan_info *vlan_info;   struct dsa_switch_tree *dsa_ptr;   struct tipc_bearer *tipc_ptr;   void *atalk_ptr;   struct in_device *ip_ptr;   struct dn_dev *dn_ptr;   struct inet6_dev *ip6_ptr;   void *ax25_ptr;   struct wireless_dev *ieee80211_ptr;   struct wpan_dev *ieee802154_ptr;   struct mpls_dev *mpls_ptr;   unsigned char *dev_addr;   struct netdev_rx_queue *_rx;   unsigned int num_rx_queues;   unsigned int real_num_rx_queues;   unsigned long gro_flush_timeout;   rx_handler_func_t *rx_handler;   void *rx_handler_data;   struct tcf_proto *ingress_cl_list;   struct netdev_queue *ingress_queue;   struct nf_hook_entry *nf_hooks_ingress;   unsigned char broadcast[32U];   struct cpu_rmap *rx_cpu_rmap;   struct hlist_node index_hlist;   struct netdev_queue *_tx;   unsigned int num_tx_queues;   unsigned int real_num_tx_queues;   struct Qdisc *qdisc;   struct hlist_head qdisc_hash[16U];   unsigned long tx_queue_len;   spinlock_t tx_global_lock;   int watchdog_timeo;   struct xps_dev_maps *xps_maps;   struct tcf_proto *egress_cl_list;   struct timer_list watchdog_timer;   int *pcpu_refcnt;   struct list_head todo_list;   struct list_head link_watch_list;   unsigned char reg_state;   bool dismantle;   unsigned short rtnl_link_state;   void (*destructor)(struct net_device *);   struct netpoll_info *npinfo;   possible_net_t nd_net;   union __anonunion____missing_field_name_488 __annonCompField118;   struct garp_port *garp_port;   struct mrp_port *mrp_port;   struct device dev;   const struct attribute_group *sysfs_groups[4U];   const struct attribute_group *sysfs_rx_queue_group;   const struct rtnl_link_ops *rtnl_link_ops;   unsigned int gso_max_size;   u16 gso_max_segs;   const struct dcbnl_rtnl_ops *dcbnl_ops;   u8 num_tc;   struct netdev_tc_txq tc_to_txq[16U];   u8 prio_tc_map[16U];   unsigned int fcoe_ddp_xid;   struct netprio_map *priomap;   struct phy_device *phydev;   struct lock_class_key *qdisc_tx_busylock;   struct lock_class_key *qdisc_running_key;   bool proto_down; } ;  2185     struct packet_type {   __be16 type;   struct net_device *dev;   int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);   bool  (*id_match)(struct packet_type *, struct sock *);   void *af_packet_priv;   struct list_head list; } ;  2213     struct pcpu_sw_netstats {   u64 rx_packets;   u64 rx_bytes;   u64 tx_packets;   u64 tx_bytes;   struct u64_stats_sync syncp; } ;  3191     enum skb_free_reason {   SKB_REASON_CONSUMED = 0,   SKB_REASON_DROPPED = 1 } ;   162     struct if_irda_qos {   unsigned long baudrate;   unsigned short data_size;   unsigned short window_size;   unsigned short min_turn_time;   unsigned short max_turn_time;   unsigned char add_bofs;   unsigned char link_disc; } ;   188     struct if_irda_line {   __u8 dtr;   __u8 rts; } ;   194     union __anonunion_ifr_ifrn_496 {   char ifrn_name[16U]; } ;   194     union __anonunion_ifr_ifru_497 {   struct if_irda_line ifru_line;   struct if_irda_qos ifru_qos;   unsigned short ifru_flags;   unsigned int ifru_receiving;   unsigned int ifru_mode;   unsigned int ifru_dongle; } ;   194     struct if_irda_req {   union __anonunion_ifr_ifrn_496 ifr_ifrn;   union __anonunion_ifr_ifru_497 ifr_ifru; } ;    34     typedef __u32 magic_t;    16     struct cdev {   struct kobject kobj;   struct module *owner;   const struct file_operations *ops;   struct list_head list;   dev_t dev;   unsigned int count; } ;   521     struct tcmsg {   unsigned char tcm_family;   unsigned char tcm__pad1;   unsigned short tcm__pad2;   int tcm_ifindex;   __u32 tcm_handle;   __u32 tcm_parent;   __u32 tcm_info; } ;    27     struct gnet_stats_basic_packed {   __u64 bytes;   __u32 packets; } ;    51     struct gnet_stats_queue {   __u32 qlen;   __u32 backlog;   __u32 drops;   __u32 requeues;   __u32 overlimits; } ;    77     struct gnet_stats_basic_cpu {   struct gnet_stats_basic_packed bstats;   struct u64_stats_sync syncp; } ;    13     struct net_rate_estimator ;    14     struct gnet_dump {   spinlock_t *lock;   struct sk_buff *skb;   struct nlattr *tail;   int compat_tc_stats;   int compat_xstats;   int padattr;   void *xstats;   int xstats_len;   struct tc_stats tc_stats; } ;    88     struct nla_policy {   u16 type;   u16 len; } ;    25     struct rtnl_link_ops {   struct list_head list;   const char *kind;   size_t priv_size;   void (*setup)(struct net_device *);   int maxtype;   const struct nla_policy *policy;   int (*validate)(struct nlattr **, struct nlattr **);   int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **);   int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **);   void (*dellink)(struct net_device *, struct list_head *);   size_t  (*get_size)(const struct net_device *);   int (*fill_info)(struct sk_buff *, const struct net_device *);   size_t  (*get_xstats_size)(const struct net_device *);   int (*fill_xstats)(struct sk_buff *, const struct net_device *);   unsigned int (*get_num_tx_queues)();   unsigned int (*get_num_rx_queues)();   int slave_maxtype;   const struct nla_policy *slave_policy;   int (*slave_validate)(struct nlattr **, struct nlattr **);   int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **);   size_t  (*get_slave_size)(const struct net_device *, const struct net_device *);   int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *);   struct net * (*get_link_net)(const struct net_device *);   size_t  (*get_linkxstats_size)(const struct net_device *, int);   int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); } ;   162     struct Qdisc_ops ;   163     struct qdisc_walker ;   164     struct tcf_walker ;    30     struct qdisc_size_table {   struct callback_head rcu;   struct list_head list;   struct tc_sizespec szopts;   int refcnt;   u16 data[]; } ;    38     struct qdisc_skb_head {   struct sk_buff *head;   struct sk_buff *tail;   __u32 qlen;   spinlock_t lock; } ;    46     struct Qdisc {   int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);   struct sk_buff * (*dequeue)(struct Qdisc *);   unsigned int flags;   u32 limit;   const struct Qdisc_ops *ops;   struct qdisc_size_table *stab;   struct hlist_node hash;   u32 handle;   u32 parent;   void *u32_node;   struct netdev_queue *dev_queue;   struct net_rate_estimator *rate_est;   struct gnet_stats_basic_cpu *cpu_bstats;   struct gnet_stats_queue *cpu_qstats;   struct sk_buff *gso_skb;   struct qdisc_skb_head q;   struct gnet_stats_basic_packed bstats;   seqcount_t running;   struct gnet_stats_queue qstats;   unsigned long state;   struct Qdisc *next_sched;   struct sk_buff *skb_bad_txq;   struct callback_head callback_head;   int padded;   atomic_t refcnt;   spinlock_t busylock; } ;   134     struct Qdisc_class_ops {   struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);   int (*graft)(struct Qdisc *, unsigned long, struct Qdisc *, struct Qdisc **);   struct Qdisc * (*leaf)(struct Qdisc *, unsigned long);   void (*qlen_notify)(struct Qdisc *, unsigned long);   unsigned long int (*get)(struct Qdisc *, u32 );   void (*put)(struct Qdisc *, unsigned long);   int (*change)(struct Qdisc *, u32 , u32 , struct nlattr **, unsigned long *);   int (*delete)(struct Qdisc *, unsigned long);   void (*walk)(struct Qdisc *, struct qdisc_walker *);   struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);   bool  (*tcf_cl_offload)(u32 );   unsigned long int (*bind_tcf)(struct Qdisc *, unsigned long, u32 );   void (*unbind_tcf)(struct Qdisc *, unsigned long);   int (*dump)(struct Qdisc *, unsigned long, struct sk_buff *, struct tcmsg *);   int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); } ;   166     struct Qdisc_ops {   struct Qdisc_ops *next;   const struct Qdisc_class_ops *cl_ops;   char id[16U];   int priv_size;   int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);   struct sk_buff * (*dequeue)(struct Qdisc *);   struct sk_buff * (*peek)(struct Qdisc *);   int (*init)(struct Qdisc *, struct nlattr *);   void (*reset)(struct Qdisc *);   void (*destroy)(struct Qdisc *);   int (*change)(struct Qdisc *, struct nlattr *);   void (*attach)(struct Qdisc *);   int (*dump)(struct Qdisc *, struct sk_buff *);   int (*dump_stats)(struct Qdisc *, struct gnet_dump *);   struct module *owner; } ;   191     struct tcf_result {   unsigned long class;   u32 classid; } ;   197     struct tcf_proto_ops {   struct list_head head;   char kind[16U];   int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *);   int (*init)(struct tcf_proto *);   bool  (*destroy)(struct tcf_proto *, bool );   unsigned long int (*get)(struct tcf_proto *, u32 );   int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, unsigned long, u32 , struct nlattr **, unsigned long *, bool );   int (*delete)(struct tcf_proto *, unsigned long);   void (*walk)(struct tcf_proto *, struct tcf_walker *);   int (*dump)(struct net *, struct tcf_proto *, unsigned long, struct sk_buff *, struct tcmsg *);   struct module *owner; } ;   222     struct tcf_proto {   struct tcf_proto *next;   void *root;   int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *);   __be16 protocol;   u32 prio;   u32 classid;   struct Qdisc *q;   void *data;   const struct tcf_proto_ops *ops;   struct callback_head rcu; } ;   862     struct qdisc_walker {   int stop;   int skip;   int count;   int (*fn)(struct Qdisc *, unsigned long, struct qdisc_walker *); } ;    64     struct __anonstruct_qos_value_t_522 {   __u32 value;   __u16 bits; } ;    64     typedef struct __anonstruct_qos_value_t_522 qos_value_t;    65     struct qos_info {   magic_t magic;   qos_value_t baud_rate;   qos_value_t max_turn_time;   qos_value_t data_size;   qos_value_t window_size;   qos_value_t additional_bofs;   qos_value_t min_turn_time;   qos_value_t link_disc_time;   qos_value_t power; } ;    93     struct irlap_cb ;   133     struct irda_skb_cb {   unsigned int default_qdisc_pad;   magic_t magic;   __u32 next_speed;   __u16 mtt;   __u16 xbofs;   __u16 next_xbofs;   void *context;   void (*destructor)(struct sk_buff *);   __u16 xbofs_delay;   __u8 line; } ;   438     struct __anonstruct_rd_s_527 {   u8 addr_res[3U];   volatile u8 status; } ;   438     union __anonunion_rd_u_526 {   __le32 addr;   struct __anonstruct_rd_s_527 rd_s; } ;   438     struct ring_descr_hw {   volatile __le16 rd_count;   __le16 reserved;   union __anonunion_rd_u_526 rd_u; } ;   548     struct ring_descr {   struct ring_descr_hw *hw;   struct sk_buff *skb;   void *buf; } ;   654     struct vlsi_ring {   struct pci_dev *pdev;   int dir;   unsigned int len;   unsigned int size;   unsigned int mask;   atomic_t head;   atomic_t tail;   struct ring_descr *rd; } ;   707     struct vlsi_irda_dev {   struct pci_dev *pdev;   struct irlap_cb *irlap;   struct qos_info qos;   unsigned int mode;   int baud;   int new_baud;   dma_addr_t busaddr;   void *virtaddr;   struct vlsi_ring *tx_ring;   struct vlsi_ring *rx_ring;   ktime_t last_rx;   spinlock_t lock;   struct mutex mtx;   u8 resume_ok;   struct proc_dir_entry *proc_entry; } ;   734     typedef struct vlsi_irda_dev vlsi_irda_dev_t;     1     void * __builtin_memcpy(void *, const void *, unsigned long);     1     long int __builtin_expect(long, long);   252     void __read_once_size(const volatile void *p, void *res, int size);   277     void __write_once_size(volatile void *p, void *res, int size);    34     extern struct module __this_module;    72     void set_bit(long nr, volatile unsigned long *addr);   110     void clear_bit(long nr, volatile unsigned long *addr);   321     bool  constant_test_bit(long nr, const volatile unsigned long *addr);   172     int printk(const char *, ...);   282     void dump_stack();    55     void __dynamic_pr_debug(struct _ddebug *, const char *, ...);   415     int sprintf(char *, const char *, ...);     8     void ldv_dma_map_page();    87     void __bad_percpu_size();    71     void warn_slowpath_null(const char *, const int);     9     extern unsigned long vmemmap_base;    23     unsigned long int __phys_addr(unsigned long);    24     int atomic_read(const atomic_t *v);    36     void atomic_set(atomic_t *v, int i);    89     void atomic_inc(atomic_t *v);    32     void * __memcpy(void *, const void *, size_t );    57     void * __memset(void *, int, size_t );    27     s64  div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);     8     extern int __preempt_count;    20     int preempt_count();    93     void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);    32     unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *);    43     void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);   286     raw_spinlock_t * spinlock_check(spinlock_t *lock);   352     void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);   133     void __mutex_init(struct mutex *, const char *, struct lock_class_key *);   155     void mutex_lock_nested(struct mutex *, unsigned int);   195     void mutex_unlock(struct mutex *);   162     s64  ktime_divns(const ktime_t kt, s64 div);   173     s64  ktime_to_us(const ktime_t kt);   183     s64  ktime_us_delta(const ktime_t later, const ktime_t earlier);   188     ktime_t  ktime_get();   340     void outb(unsigned char value, int port);   340     unsigned char inb(int port);   341     void outw(unsigned short value, int port);   341     unsigned short int inw(int port);    87     const char * kobject_name(const struct kobject *kobj);   139     int request_threaded_irq(unsigned int, irqreturn_t  (*)(int, void *), irqreturn_t  (*)(int, void *), unsigned long, const char *, void *);   144     int request_irq(unsigned int irq, irqreturn_t  (*handler)(int, void *), unsigned long flags, const char *name, void *dev);   158     void free_irq(unsigned int, void *);   208     bool  capable(int);   109     ssize_t  seq_read(struct file *, char *, size_t , loff_t *);   110     loff_t  seq_lseek(struct file *, loff_t , int);   117     void seq_printf(struct seq_file *, const char *, ...);   135     int single_open(struct file *, int (*)(struct seq_file *, void *), void *);   137     int single_release(struct inode *, struct file *);   979     const char * dev_name(const struct device *dev);  1026     void * dev_get_drvdata(const struct device *dev);  1031     void dev_set_drvdata(struct device *dev, void *data);   154     void kfree(const void *);   330     void * __kmalloc(size_t , gfp_t );   478     void * kmalloc(size_t size, gfp_t flags);   919     int pci_bus_read_config_byte(struct pci_bus *, unsigned int, int, u8 *);   925     int pci_bus_write_config_byte(struct pci_bus *, unsigned int, int, u8 );   943     int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);   956     int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);  1011     int pci_enable_device(struct pci_dev *);  1028     void pci_disable_device(struct pci_dev *);  1031     void pci_set_master(struct pci_dev *);  1084     int pci_save_state(struct pci_dev *);  1085     void pci_restore_state(struct pci_dev *);  1098     int pci_set_power_state(struct pci_dev *, pci_power_t );  1099     pci_power_t  pci_choose_state(struct pci_dev *, pm_message_t );  1157     int pci_request_regions(struct pci_dev *, const char *);  1159     void pci_release_regions(struct pci_dev *);  1212     int __pci_register_driver(struct pci_driver *, struct module *, const char *);  1221     void pci_unregister_driver(struct pci_driver *);  1650     void * pci_get_drvdata(struct pci_dev *pdev);  1655     void pci_set_drvdata(struct pci_dev *pdev, void *data);  1663     const char * pci_name(const struct pci_dev *pdev);    37     void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool );    44     void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool );    53     void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *);    66     void debug_dma_sync_single_for_cpu(struct device *, dma_addr_t , size_t , int);    70     void debug_dma_sync_single_for_device(struct device *, dma_addr_t , size_t , int);   131     void kmemcheck_mark_initialized(void *address, unsigned int n);   144     int valid_dma_direction(int dma_direction);    28     extern const struct dma_map_ops *dma_ops;    30     const struct dma_map_ops * get_arch_dma_ops(struct bus_type *bus);    35     bool  arch_dma_alloc_attrs(struct device **, gfp_t *);    39     int dma_supported(struct device *, u64 );   175     const struct dma_map_ops * get_dma_ops(struct device *dev);   200     dma_addr_t  ldv_dma_map_single_attrs_5(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs);   200     dma_addr_t  dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs);   223     void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs);   335     void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);   347     void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);   476     void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs);   517     void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag);   523     void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle);   575     int dma_set_mask(struct device *dev, u64 mask);   680     void * dma_zalloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag);    23     void * pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);    31     void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);    38     dma_addr_t  pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);    44     void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);    79     void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);    86     void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);   113     int pci_set_dma_mask(struct pci_dev *dev, u64 mask);     8     void __udelay(unsigned long);    10     void __const_udelay(unsigned long);   233     int net_ratelimit();  1927     unsigned char * skb_put(struct sk_buff *, unsigned int);  2030     void skb_reserve(struct sk_buff *skb, int len);  2198     void skb_reset_mac_header(struct sk_buff *skb);  2441     struct sk_buff * __netdev_alloc_skb(struct net_device *, unsigned int, gfp_t );  2457     struct sk_buff * netdev_alloc_skb(struct net_device *dev, unsigned int length);  2471     struct sk_buff * dev_alloc_skb(unsigned int length);  3173     void skb_copy_from_linear_data(const struct sk_buff *skb, void *to, const unsigned int len);  1927     struct netdev_queue * netdev_get_tx_queue(const struct net_device *dev, unsigned int index);  2022     void * netdev_priv(const struct net_device *dev);  2425     void free_netdev(struct net_device *);  2801     void netif_tx_start_queue(struct netdev_queue *dev_queue);  2812     void netif_start_queue(struct net_device *dev);  2827     void netif_tx_wake_queue(struct netdev_queue *);  2836     void netif_wake_queue(struct net_device *dev);  2851     void netif_tx_stop_queue(struct netdev_queue *dev_queue);  2863     void netif_stop_queue(struct net_device *dev);  2870     bool  netif_tx_queue_stopped(const struct netdev_queue *dev_queue);  2881     bool  netif_queue_stopped(const struct net_device *dev);  3055     bool  netif_running(const struct net_device *dev);  3198     void __dev_kfree_skb_any(struct sk_buff *, enum skb_free_reason );  3229     void dev_kfree_skb_any(struct sk_buff *skb);  3239     int netif_rx(struct sk_buff *);  3240     int netif_rx_ni(struct sk_buff *);  3353     bool  netif_carrier_ok(const struct net_device *dev);  3426     bool  netif_device_present(struct net_device *dev);  3431     void netif_device_detach(struct net_device *);  3433     void netif_device_attach(struct net_device *);  3690     int register_netdev(struct net_device *);  3691     void unregister_netdev(struct net_device *);    19     struct proc_dir_entry * proc_mkdir(const char *, struct proc_dir_entry *);    26     struct proc_dir_entry * proc_create_data(const char *, umode_t , struct proc_dir_entry *, const struct file_operations *, void *);    38     void proc_set_size(struct proc_dir_entry *, loff_t );    40     void * PDE_DATA(const struct inode *);    43     void remove_proc_entry(const char *, struct proc_dir_entry *);    83     void irda_init_max_qos_capabilies(struct qos_info *);    88     void irda_qos_bits_to_value(struct qos_info *);   214     struct irlap_cb * irlap_open(struct net_device *, struct qos_info *, const char *);   216     void irlap_close(struct irlap_cb *);   219     void irda_device_set_media_busy(struct net_device *, int);   229     struct net_device * alloc_irdadev(int);   239     __u16  irda_get_mtt(const struct sk_buff *skb);   252     __u32  irda_get_next_speed(const struct sk_buff *skb);    54     int async_wrap_skb(struct sk_buff *, __u8 *, int);     8     u16  crc_ccitt(u16 , const u8 *, size_t );   425     unsigned int calc_width_bits(unsigned int baudrate, unsigned int widthselect, unsigned int clockselect);   594     int rd_is_active(struct ring_descr *rd);   599     void rd_activate(struct ring_descr *rd);   604     void rd_set_status(struct ring_descr *rd, u8 s);   609     void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s);   632     void rd_set_count(struct ring_descr *rd, u16 c);   637     u8  rd_get_status(struct ring_descr *rd);   642     dma_addr_t  rd_get_addr(struct ring_descr *rd);   650     u16  rd_get_count(struct ring_descr *rd);   680     struct ring_descr * ring_last(struct vlsi_ring *r);   688     struct ring_descr * ring_put(struct vlsi_ring *r);   694     struct ring_descr * ring_first(struct vlsi_ring *r);   702     struct ring_descr * ring_get(struct vlsi_ring *r);    61     char drivername[8U] = { 'v', 'l', 's', 'i', '_', 'i', 'r', '\x0' };    63     const struct pci_device_id vlsi_irda_table[2U] = { { 4100U, 261U, 4294967295U, 4294967295U, 851968U, 16776960U, 0UL } };    75     const struct pci_device_id __mod_pci__vlsi_irda_table_device_table[2U] = {  };    86     int clksrc = 0;    98     int ringsize[2U] = { 8, 8 };   111     int sirpulse = 1;   122     int qos_mtt_bits = 7;   128     void vlsi_reg_debug(unsigned int iobase, const char *s);   138     void vlsi_ring_debug(struct vlsi_ring *r);   160     struct proc_dir_entry *vlsi_proc_root = (struct proc_dir_entry *)0;   164     void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev);   180     void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev);   297     void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r);   334     int vlsi_seq_show(struct seq_file *seq, void *v);   371     int vlsi_seq_open(struct inode *inode, struct file *file);   376     const struct file_operations vlsi_proc_fops = { &__this_module, &seq_lseek, &seq_read, 0, 0, 0, 0, 0, 0, 0, 0, 0, &vlsi_seq_open, 0, &single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };   392     struct vlsi_ring * vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap, unsigned int size, unsigned int len, int dir);   449     int vlsi_free_ring(struct vlsi_ring *r);   469     int vlsi_create_hwif(vlsi_irda_dev_t *idev);   507     int vlsi_destroy_hwif(vlsi_irda_dev_t *idev);   524     int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd);   597     void vlsi_fill_rx(struct vlsi_ring *r);   623     void vlsi_rx_interrupt(struct net_device *ndev);   672     void vlsi_unarm_rx(vlsi_irda_dev_t *idev);   721     int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd);   750     int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned int iobase);   844     netdev_tx_t  vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev);  1040     void vlsi_tx_interrupt(struct net_device *ndev);  1100     void vlsi_unarm_tx(vlsi_irda_dev_t *idev);  1142     int vlsi_start_clock(struct pci_dev *pdev);  1205     void vlsi_stop_clock(struct pci_dev *pdev);  1230     void vlsi_clear_regs(unsigned int iobase);  1239     int vlsi_init_chip(struct pci_dev *pdev);  1292     int vlsi_start_hw(vlsi_irda_dev_t *idev);  1324     int vlsi_stop_hw(vlsi_irda_dev_t *idev);  1354     void vlsi_tx_timeout(struct net_device *ndev);  1379     int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);  1428     irqreturn_t  vlsi_interrupt(int irq, void *dev_instance);  1470     int vlsi_open(struct net_device *ndev);  1530     int vlsi_close(struct net_device *ndev);  1553     const struct net_device_ops vlsi_netdev_ops = { 0, 0, &vlsi_open, &vlsi_close, &vlsi_hard_start_xmit, 0, 0, 0, 0, 0, 0, &vlsi_ioctl, 0, 0, 0, &vlsi_tx_timeout, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };  1561     int vlsi_irda_init(struct net_device *ndev);  1608     int vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id);  1679     void vlsi_irda_remove(struct pci_dev *pdev);  1714     int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state);  1755     int vlsi_irda_resume(struct pci_dev *pdev);  1808     struct pci_driver vlsi_irda_driver = { { 0, 0 }, (const char *)(&drivername), (const struct pci_device_id *)(&vlsi_irda_table), &vlsi_irda_probe, &vlsi_irda_remove, &vlsi_irda_suspend, 0, 0, &vlsi_irda_resume, 0, 0, 0, { 0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { { { { { { 0 } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 } } };  1821     int vlsi_mod_init();  1865     void vlsi_mod_exit();  1891     void ldv_check_final_state();  1894     void ldv_check_return_value(int);  1897     void ldv_check_return_value_probe(int);  1900     void ldv_initialize();  1903     void ldv_handler_precall();  1906     int nondet_int();  1909     int LDV_IN_INTERRUPT = 0;  1912     void ldv_main0_sequence_infinite_withcheck_stateful();    10     void ldv_error();     7     bool  ldv_is_err(const void *ptr);    14     void * ldv_err_ptr(long error);    21     long int ldv_ptr_err(const void *ptr);    28     bool  ldv_is_err_or_null(const void *ptr);     5     int LDV_DMA_MAP_CALLS = 0;    16     void ldv_dma_mapping_error();           return ;         }        {      1914     struct inode *var_group1;  1915     struct file *var_group2;  1916     int res_vlsi_seq_open_6;  1917     struct net_device *var_group3;  1918     int res_vlsi_open_29;  1919     int res_vlsi_close_30;  1920     struct sk_buff *var_group4;  1921     struct ifreq *var_group5;  1922     int var_vlsi_ioctl_27_p2;  1923     struct pci_dev *var_group6;  1924     const struct pci_device_id *var_vlsi_irda_probe_32_p1;  1925     int res_vlsi_irda_probe_32;  1926     struct pm_message var_vlsi_irda_suspend_34_p1;  1927     int var_vlsi_interrupt_28_p0;  1928     void *var_vlsi_interrupt_28_p1;  1929     int ldv_s_vlsi_proc_fops_file_operations;  1930     int ldv_s_vlsi_netdev_ops_net_device_ops;  1931     int ldv_s_vlsi_irda_driver_pci_driver;  1932     int tmp;  1933     int tmp___0;  1934     int tmp___1;  2204     ldv_s_vlsi_proc_fops_file_operations = 0;  2206     ldv_s_vlsi_netdev_ops_net_device_ops = 0;  2209     ldv_s_vlsi_irda_driver_pci_driver = 0;  2171     LDV_IN_INTERRUPT = 1;  2180     ldv_initialize() { /* Function call is skipped due to function is undefined */}  2201     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}           {  1823       int i;  1824       int ret;  1825       int tmp;  1826       int tmp___0;  1831       i = 0;  1831       goto ldv_53813;  1833       goto ldv_53812;  1832       ldv_53812:;  1832       switch (ringsize[i]);  1835       fall through1836 fall through 1837 fall through  1838       goto ldv_53810;  1847       ldv_53810:;  1831       i = i + 1;  1832       ldv_53813:;  1833       goto ldv_53812;  1832       ldv_53812:;  1832       switch (ringsize[i]);             default 1840       tmp___0 = net_ratelimit() { /* Function call is skipped due to function is undefined */}  1840       char *__CPAchecker_TMP_0;  1841       __CPAchecker_TMP_0 = (char *)"rx";  1840       printk("\f%s: invalid %s ringsize %d, using default=8\n", (char *)(&drivername), __CPAchecker_TMP_0, ringsize[i]) { /* Function call is skipped due to function is undefined */}  1844       ringsize[i] = 8;  1845       goto ldv_53810;  1847       ldv_53810:;  1831       i = i + 1;  1832       ldv_53813:;  1849       sirpulse = sirpulse != 0;  1855       vlsi_proc_root = proc_mkdir("driver/vlsi_ir", (struct proc_dir_entry *)0) { /* Function call is skipped due to function is undefined */}  1857       ret = __pci_register_driver(&vlsi_irda_driver, &__this_module, "vlsi_ir") { /* Function call is skipped due to function is undefined */}           } 2215     goto ldv_53878;  2215     tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */}  2220     goto ldv_53877;  2216     ldv_53877:;  2221     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}  2221     switch (tmp___0);  2278     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}           {         } 1472       vlsi_irda_dev_t *idev;  1473       void *tmp;  1474       int err;  1475       char hwname[32U];  1476       int tmp___0;  1477       int tmp___1;  1478       int tmp___2;  1479       int tmp___3;  1480       int tmp___4;             {  2024         return ((void *)dev) + 3136U;;             } 1472       idev = (vlsi_irda_dev_t *)tmp;  1473       err = -11;  1476       tmp___1 = pci_request_regions(idev->pdev, (const char *)(&drivername)) { /* Function call is skipped due to function is undefined */}  1480       ndev->base_addr = (unsigned long)(((idev->pdev->resource)[0]).start);  1481       int __CPAchecker_TMP_0 = (int)(idev->pdev->irq);  1481       ndev->irq = __CPAchecker_TMP_0;  1487       int __CPAchecker_TMP_1 = (int)(ndev->base_addr);             { 340 Ignored inline assembler code   341         return ;;             } 1489       unsigned int __CPAchecker_TMP_2 = (unsigned int)(ndev->irq);  1489       -request_irq(__CPAchecker_TMP_2, &vlsi_interrupt, 128UL, (const char *)(&drivername), (void *)ndev)             {   147         int tmp;   147         tmp = request_threaded_irq(irq, handler, (irqreturn_t  (*)(int, void *))0, flags, name, dev) { /* Function call is skipped due to function is undefined */}   147         return tmp;;             }            {           }  471         char *ringarea;   472         struct ring_descr_hw *hwmap;   473         void *tmp;   474         struct vlsi_ring *tmp___0;   474         idev->virtaddr = (void *)0;   475         idev->busaddr = 0ULL;               {    26           void *tmp;    26           struct device *__CPAchecker_TMP_0;    26           assume(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0)));    26           __CPAchecker_TMP_0 = &(hwdev->dev);                 {   683             void *ret;   684             void *tmp;                   {   520               void *tmp;                     {   479                 const struct dma_map_ops *ops;   480                 const struct dma_map_ops *tmp;   481                 void *cpu_addr;   482                 long tmp___0;   483                 _Bool tmp___1;   484                 int tmp___2;                       {   177                   const struct dma_map_ops *tmp;   177                   assume(!(((unsigned long)dev) != ((unsigned long)((struct device *)0))));   179                   struct bus_type *__CPAchecker_TMP_1;   179                   assume(!(((unsigned long)dev) != ((unsigned long)((struct device *)0))));   179                   __CPAchecker_TMP_1 = (struct bus_type *)0;                         {    32                     return dma_ops;;                         }  179                   return tmp;;                       }  480                 ops = tmp;   483                 tmp___0 = __builtin_expect(((unsigned long)ops) == ((unsigned long)((const struct dma_map_ops *)0)), 0L) { /* Function call is skipped due to function is undefined */}   483                 assume(!(tmp___0 != 0L));   488                 tmp___1 = arch_dma_alloc_attrs(&dev, &flag) { /* Function call is skipped due to function is undefined */}   488                 assume(!(tmp___1 == 0));   488                 tmp___2 = 0;   488                 assume(tmp___2 == 0);   490                 unsigned long __CPAchecker_TMP_0 = (unsigned long)(ops->alloc);   490                 assume(!(__CPAchecker_TMP_0 == ((unsigned long)((void * (*)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long))0))));   493                 cpu_addr = (*(ops->alloc))(dev, size, dma_handle, flag, attrs);   494                 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr) { /* Function call is skipped due to function is undefined */}   495                 return cpu_addr;;                     }  520               return tmp;;                   }  683             ret = tmp;   685             return ret;;                 }   26           return tmp;;               }  477         ringarea = (char *)tmp;   482         hwmap = (struct ring_descr_hw *)ringarea;               {             }  395           struct vlsi_ring *r;   396           struct ring_descr *rd;   397           unsigned int i;   398           unsigned int j;   399           unsigned long long busaddr;   400           void *tmp;   401           int tmp___0;                 {   480             void *tmp___2;   495             tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}   495             return tmp___2;;                 }  403           r = (struct vlsi_ring *)tmp;   406           __memset((void *)r, 0, 40UL) { /* Function call is skipped due to function is undefined */}   408           r->pdev = pdev;   409           r->dir = dir;   410           r->len = len;   411           r->rd = ((struct ring_descr *)r) + 1U;   412           r->mask = size - 1U;   413           r->size = size;                 {    38             union __anonunion___u_44 __u;    38             __u.__val = i;                   {   279               switch (size);   280               assume(!(size == 1));   281               assume(!(size == 2));   282               assume(size == 4);   282               *((volatile __u32 *)p) = *((__u32 *)res);   282               goto ldv_905;   290               return ;;                   }   40             return ;;                 }                {    38             union __anonunion___u_44 __u;    38             __u.__val = i;                   {   279               switch (size);   280               assume(!(size == 1));   281               assume(!(size == 2));   282               assume(size == 4);   282               *((volatile __u32 *)p) = *((__u32 *)res);   282               goto ldv_905;   290               return ;;                   }   40             return ;;                 }  417           i = 0U;   417           goto ldv_53502;   419           goto ldv_53501;   418           ldv_53501:;   418           rd = (r->rd) + ((unsigned long)i);   419           __memset((void *)rd, 0, 24UL) { /* Function call is skipped due to function is undefined */}   420           rd->hw = hwmap + ((unsigned long)i);                 {   480             void *tmp___2;   495             tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}   495             return tmp___2;;                 }  422           unsigned long __CPAchecker_TMP_0 = (unsigned long)(rd->buf);                 {    41             unsigned long long tmp;    40             struct device *__CPAchecker_TMP_0;    40             assume(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0)));    40             __CPAchecker_TMP_0 = &(hwdev->dev);                   {    38               unsigned long long tmp;                     {                   }  204                 const struct dma_map_ops *ops;   205                 const struct dma_map_ops *tmp;   206                 unsigned long long addr;   207                 int tmp___0;   208                 long tmp___1;   209                 unsigned long tmp___2;   210                 unsigned long tmp___3;                       {   177                   const struct dma_map_ops *tmp;   177                   assume(!(((unsigned long)dev) != ((unsigned long)((struct device *)0))));   179                   struct bus_type *__CPAchecker_TMP_1;   179                   assume(!(((unsigned long)dev) != ((unsigned long)((struct device *)0))));   179                   __CPAchecker_TMP_1 = (struct bus_type *)0;                         {    32                     return dma_ops;;                         }  179                   return tmp;;                       }  205                 ops = tmp;                       {   133                   return ;;                       }                      {   146                   int __CPAchecker_TMP_0;   146                   assume(!(dma_direction == 0));   146                   assume(!(dma_direction == 1));   146                   assume(dma_direction == 2);                         __CPAchecker_TMP_0 = 1;   146                   return __CPAchecker_TMP_0;;                       }  209                 tmp___1 = __builtin_expect(tmp___0 == 0, 0L) { /* Function call is skipped due to function is undefined */}   209                 assume(!(tmp___1 != 0L));   210                 tmp___2 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */}   210                 addr = (*(ops->map_page))(dev, (struct page *)((tmp___2 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, dir, attrs);   213                 tmp___3 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */}   213                 debug_dma_map_page(dev, (struct page *)((tmp___3 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, (int)dir, addr, 1) { /* Function call is skipped due to function is undefined */}   216                 return addr;;                     }   40             return tmp;;                 }                {   611             int tmp;   617             assume(!((a >> 24) != 0ULL));   624             a = a & 16777215ULL;   627             rd->hw->rd_u.addr = (unsigned int)a;   628             Ignored inline assembler code                  {   606               rd->hw->rd_u.rd_s.status = s;   607               return ;;                   }  630             return ;;                 }  444           rd->skb = (struct sk_buff *)0;   417           i = i + 1U;   418           ldv_53502:;   419           goto ldv_53501;   418           ldv_53501:;   418           rd = (r->rd) + ((unsigned long)i);   419           __memset((void *)rd, 0, 24UL) { /* Function call is skipped due to function is undefined */}   420           rd->hw = hwmap + ((unsigned long)i);                 {   480             void *tmp___2;   495             tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}   495             return tmp___2;;                 }  422           unsigned long __CPAchecker_TMP_0 = (unsigned long)(rd->buf);                 {               }   41             unsigned long long tmp;    40             struct device *__CPAchecker_TMP_0;    40             assume(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0)));    40             __CPAchecker_TMP_0 = &(hwdev->dev);                 } |              Source code         
     1 #ifndef _ASM_X86_ATOMIC_H
    2 #define _ASM_X86_ATOMIC_H
    3 
    4 #include <linux/compiler.h>
    5 #include <linux/types.h>
    6 #include <asm/alternative.h>
    7 #include <asm/cmpxchg.h>
    8 #include <asm/rmwcc.h>
    9 #include <asm/barrier.h>
   10 
   11 /*
   12  * Atomic operations that C can't guarantee us.  Useful for
   13  * resource counting etc..
   14  */
   15 
   16 #define ATOMIC_INIT(i)	{ (i) }
   17 
   18 /**
   19  * atomic_read - read atomic variable
   20  * @v: pointer of type atomic_t
   21  *
   22  * Atomically reads the value of @v.
   23  */
   24 static __always_inline int atomic_read(const atomic_t *v)
   25 {
   26 	return READ_ONCE((v)->counter);
   27 }
   28 
   29 /**
   30  * atomic_set - set atomic variable
   31  * @v: pointer of type atomic_t
   32  * @i: required value
   33  *
   34  * Atomically sets the value of @v to @i.
   35  */
   36 static __always_inline void atomic_set(atomic_t *v, int i)
   37 {
   38 	WRITE_ONCE(v->counter, i);
   39 }
   40 
   41 /**
   42  * atomic_add - add integer to atomic variable
   43  * @i: integer value to add
   44  * @v: pointer of type atomic_t
   45  *
   46  * Atomically adds @i to @v.
   47  */
   48 static __always_inline void atomic_add(int i, atomic_t *v)
   49 {
   50 	asm volatile(LOCK_PREFIX "addl %1,%0"
   51 		     : "+m" (v->counter)
   52 		     : "ir" (i));
   53 }
   54 
   55 /**
   56  * atomic_sub - subtract integer from atomic variable
   57  * @i: integer value to subtract
   58  * @v: pointer of type atomic_t
   59  *
   60  * Atomically subtracts @i from @v.
   61  */
   62 static __always_inline void atomic_sub(int i, atomic_t *v)
   63 {
   64 	asm volatile(LOCK_PREFIX "subl %1,%0"
   65 		     : "+m" (v->counter)
   66 		     : "ir" (i));
   67 }
   68 
   69 /**
   70  * atomic_sub_and_test - subtract value from variable and test result
   71  * @i: integer value to subtract
   72  * @v: pointer of type atomic_t
   73  *
   74  * Atomically subtracts @i from @v and returns
   75  * true if the result is zero, or false for all
   76  * other cases.
   77  */
   78 static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
   79 {
   80 	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
   81 }
   82 
   83 /**
   84  * atomic_inc - increment atomic variable
   85  * @v: pointer of type atomic_t
   86  *
   87  * Atomically increments @v by 1.
   88  */
   89 static __always_inline void atomic_inc(atomic_t *v)
   90 {
   91 	asm volatile(LOCK_PREFIX "incl %0"
   92 		     : "+m" (v->counter));
   93 }
   94 
   95 /**
   96  * atomic_dec - decrement atomic variable
   97  * @v: pointer of type atomic_t
   98  *
   99  * Atomically decrements @v by 1.
  100  */
  101 static __always_inline void atomic_dec(atomic_t *v)
  102 {
  103 	asm volatile(LOCK_PREFIX "decl %0"
  104 		     : "+m" (v->counter));
  105 }
  106 
  107 /**
  108  * atomic_dec_and_test - decrement and test
  109  * @v: pointer of type atomic_t
  110  *
  111  * Atomically decrements @v by 1 and
  112  * returns true if the result is 0, or false for all other
  113  * cases.
  114  */
  115 static __always_inline bool atomic_dec_and_test(atomic_t *v)
  116 {
  117 	GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
  118 }
  119 
  120 /**
  121  * atomic_inc_and_test - increment and test
  122  * @v: pointer of type atomic_t
  123  *
  124  * Atomically increments @v by 1
  125  * and returns true if the result is zero, or false for all
  126  * other cases.
  127  */
  128 static __always_inline bool atomic_inc_and_test(atomic_t *v)
  129 {
  130 	GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
  131 }
  132 
  133 /**
  134  * atomic_add_negative - add and test if negative
  135  * @i: integer value to add
  136  * @v: pointer of type atomic_t
  137  *
  138  * Atomically adds @i to @v and returns true
  139  * if the result is negative, or false when
  140  * result is greater than or equal to zero.
  141  */
  142 static __always_inline bool atomic_add_negative(int i, atomic_t *v)
  143 {
  144 	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
  145 }
  146 
  147 /**
  148  * atomic_add_return - add integer and return
  149  * @i: integer value to add
  150  * @v: pointer of type atomic_t
  151  *
  152  * Atomically adds @i to @v and returns @i + @v
  153  */
  154 static __always_inline int atomic_add_return(int i, atomic_t *v)
  155 {
  156 	return i + xadd(&v->counter, i);
  157 }
  158 
  159 /**
  160  * atomic_sub_return - subtract integer and return
  161  * @v: pointer of type atomic_t
  162  * @i: integer value to subtract
  163  *
  164  * Atomically subtracts @i from @v and returns @v - @i
  165  */
  166 static __always_inline int atomic_sub_return(int i, atomic_t *v)
  167 {
  168 	return atomic_add_return(-i, v);
  169 }
  170 
  171 #define atomic_inc_return(v)  (atomic_add_return(1, v))
  172 #define atomic_dec_return(v)  (atomic_sub_return(1, v))
  173 
  174 static __always_inline int atomic_fetch_add(int i, atomic_t *v)
  175 {
  176 	return xadd(&v->counter, i);
  177 }
  178 
  179 static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
  180 {
  181 	return xadd(&v->counter, -i);
  182 }
  183 
  184 static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  185 {
  186 	return cmpxchg(&v->counter, old, new);
  187 }
  188 
  189 static inline int atomic_xchg(atomic_t *v, int new)
  190 {
  191 	return xchg(&v->counter, new);
  192 }
  193 
  194 #define ATOMIC_OP(op)							\
  195 static inline void atomic_##op(int i, atomic_t *v)			\
  196 {									\
  197 	asm volatile(LOCK_PREFIX #op"l %1,%0"				\
  198 			: "+m" (v->counter)				\
  199 			: "ir" (i)					\
  200 			: "memory");					\
  201 }
  202 
  203 #define ATOMIC_FETCH_OP(op, c_op)					\
  204 static inline int atomic_fetch_##op(int i, atomic_t *v)		\
  205 {									\
  206 	int old, val = atomic_read(v);					\
  207 	for (;;) {							\
  208 		old = atomic_cmpxchg(v, val, val c_op i);		\
  209 		if (old == val)						\
  210 			break;						\
  211 		val = old;						\
  212 	}								\
  213 	return old;							\
  214 }
  215 
  216 #define ATOMIC_OPS(op, c_op)						\
  217 	ATOMIC_OP(op)							\
  218 	ATOMIC_FETCH_OP(op, c_op)
  219 
  220 ATOMIC_OPS(and, &)
  221 ATOMIC_OPS(or , |)
  222 ATOMIC_OPS(xor, ^)
  223 
  224 #undef ATOMIC_OPS
  225 #undef ATOMIC_FETCH_OP
  226 #undef ATOMIC_OP
  227 
  228 /**
  229  * __atomic_add_unless - add unless the number is already a given value
  230  * @v: pointer of type atomic_t
  231  * @a: the amount to add to v...
  232  * @u: ...unless v is equal to u.
  233  *
  234  * Atomically adds @a to @v, so long as @v was not already @u.
  235  * Returns the old value of @v.
  236  */
  237 static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
  238 {
  239 	int c, old;
  240 	c = atomic_read(v);
  241 	for (;;) {
  242 		if (unlikely(c == (u)))
  243 			break;
  244 		old = atomic_cmpxchg((v), c, c + (a));
  245 		if (likely(old == c))
  246 			break;
  247 		c = old;
  248 	}
  249 	return c;
  250 }
  251 
  252 /**
  253  * atomic_inc_short - increment of a short integer
  254  * @v: pointer to type int
  255  *
  256  * Atomically adds 1 to @v
  257  * Returns the new value of @u
  258  */
  259 static __always_inline short int atomic_inc_short(short int *v)
  260 {
  261 	asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
  262 	return *v;
  263 }
  264 
  265 #ifdef CONFIG_X86_32
  266 # include <asm/atomic64_32.h>
  267 #else
  268 # include <asm/atomic64_64.h>
  269 #endif
  270 
  271 #endif /* _ASM_X86_ATOMIC_H */                 1 #ifndef _ASM_X86_DMA_MAPPING_H
    2 #define _ASM_X86_DMA_MAPPING_H
    3 
    4 /*
    5  * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
    6  * Documentation/DMA-API.txt for documentation.
    7  */
    8 
    9 #include <linux/kmemcheck.h>
   10 #include <linux/scatterlist.h>
   11 #include <linux/dma-debug.h>
   12 #include <asm/io.h>
   13 #include <asm/swiotlb.h>
   14 #include <linux/dma-contiguous.h>
   15 
   16 #ifdef CONFIG_ISA
   17 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
   18 #else
   19 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
   20 #endif
   21 
   22 #define DMA_ERROR_CODE	0
   23 
   24 extern int iommu_merge;
   25 extern struct device x86_dma_fallback_dev;
   26 extern int panic_on_overflow;
   27 
   28 extern const struct dma_map_ops *dma_ops;
   29 
   30 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
   31 {
   32 	return dma_ops;
   33 }
   34 
   35 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
   36 #define arch_dma_alloc_attrs arch_dma_alloc_attrs
   37 
   38 #define HAVE_ARCH_DMA_SUPPORTED 1
   39 extern int dma_supported(struct device *hwdev, u64 mask);
   40 
   41 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
   42 					dma_addr_t *dma_addr, gfp_t flag,
   43 					unsigned long attrs);
   44 
   45 extern void dma_generic_free_coherent(struct device *dev, size_t size,
   46 				      void *vaddr, dma_addr_t dma_addr,
   47 				      unsigned long attrs);
   48 
   49 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
   50 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
   51 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
   52 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
   53 #else
   54 
   55 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
   56 {
   57 	if (!dev->dma_mask)
   58 		return 0;
   59 
   60 	return addr + size - 1 <= *dev->dma_mask;
   61 }
   62 
   63 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
   64 {
   65 	return paddr;
   66 }
   67 
   68 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
   69 {
   70 	return daddr;
   71 }
   72 #endif /* CONFIG_X86_DMA_REMAP */
   73 
   74 static inline void
   75 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
   76 	enum dma_data_direction dir)
   77 {
   78 	flush_write_buffers();
   79 }
   80 
   81 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
   82 						    gfp_t gfp)
   83 {
   84 	unsigned long dma_mask = 0;
   85 
   86 	dma_mask = dev->coherent_dma_mask;
   87 	if (!dma_mask)
   88 		dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
   89 
   90 	return dma_mask;
   91 }
   92 
   93 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
   94 {
   95 	unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
   96 
   97 	if (dma_mask <= DMA_BIT_MASK(24))
   98 		gfp |= GFP_DMA;
   99 #ifdef CONFIG_X86_64
  100 	if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
  101 		gfp |= GFP_DMA32;
  102 #endif
  103        return gfp;
  104 }
  105 
  106 #endif                 1 #ifndef _ASM_X86_IO_H
    2 #define _ASM_X86_IO_H
    3 
    4 /*
    5  * This file contains the definitions for the x86 IO instructions
    6  * inb/inw/inl/outb/outw/outl and the "string versions" of the same
    7  * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
    8  * versions of the single-IO instructions (inb_p/inw_p/..).
    9  *
   10  * This file is not meant to be obfuscating: it's just complicated
   11  * to (a) handle it all in a way that makes gcc able to optimize it
   12  * as well as possible and (b) trying to avoid writing the same thing
   13  * over and over again with slight variations and possibly making a
   14  * mistake somewhere.
   15  */
   16 
   17 /*
   18  * Thanks to James van Artsdalen for a better timing-fix than
   19  * the two short jumps: using outb's to a nonexistent port seems
   20  * to guarantee better timings even on fast machines.
   21  *
   22  * On the other hand, I'd like to be sure of a non-existent port:
   23  * I feel a bit unsafe about using 0x80 (should be safe, though)
   24  *
   25  *		Linus
   26  */
   27 
   28  /*
   29   *  Bit simplified and optimized by Jan Hubicka
   30   *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
   31   *
   32   *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
   33   *  isa_read[wl] and isa_write[wl] fixed
   34   *  - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
   35   */
   36 
   37 #define ARCH_HAS_IOREMAP_WC
   38 #define ARCH_HAS_IOREMAP_WT
   39 
   40 #include <linux/string.h>
   41 #include <linux/compiler.h>
   42 #include <asm/page.h>
   43 #include <asm/early_ioremap.h>
   44 #include <asm/pgtable_types.h>
   45 
   46 #define build_mmio_read(name, size, type, reg, barrier) \
   47 static inline type name(const volatile void __iomem *addr) \
   48 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \
   49 :"m" (*(volatile type __force *)addr) barrier); return ret; }
   50 
   51 #define build_mmio_write(name, size, type, reg, barrier) \
   52 static inline void name(type val, volatile void __iomem *addr) \
   53 { asm volatile("mov" size " %0,%1": :reg (val), \
   54 "m" (*(volatile type __force *)addr) barrier); }
   55 
   56 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
   57 build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
   58 build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
   59 
   60 build_mmio_read(__readb, "b", unsigned char, "=q", )
   61 build_mmio_read(__readw, "w", unsigned short, "=r", )
   62 build_mmio_read(__readl, "l", unsigned int, "=r", )
   63 
   64 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
   65 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
   66 build_mmio_write(writel, "l", unsigned int, "r", :"memory")
   67 
   68 build_mmio_write(__writeb, "b", unsigned char, "q", )
   69 build_mmio_write(__writew, "w", unsigned short, "r", )
   70 build_mmio_write(__writel, "l", unsigned int, "r", )
   71 
   72 #define readb_relaxed(a) __readb(a)
   73 #define readw_relaxed(a) __readw(a)
   74 #define readl_relaxed(a) __readl(a)
   75 #define __raw_readb __readb
   76 #define __raw_readw __readw
   77 #define __raw_readl __readl
   78 
   79 #define writeb_relaxed(v, a) __writeb(v, a)
   80 #define writew_relaxed(v, a) __writew(v, a)
   81 #define writel_relaxed(v, a) __writel(v, a)
   82 #define __raw_writeb __writeb
   83 #define __raw_writew __writew
   84 #define __raw_writel __writel
   85 
   86 #define mmiowb() barrier()
   87 
   88 #ifdef CONFIG_X86_64
   89 
   90 build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
   91 build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
   92 
   93 #define readq_relaxed(a)	readq(a)
   94 #define writeq_relaxed(v, a)	writeq(v, a)
   95 
   96 #define __raw_readq(a)		readq(a)
   97 #define __raw_writeq(val, addr)	writeq(val, addr)
   98 
   99 /* Let people know that we have them */
  100 #define readq			readq
  101 #define writeq			writeq
  102 
  103 #endif
  104 
  105 /**
  106  *	virt_to_phys	-	map virtual addresses to physical
  107  *	@address: address to remap
  108  *
  109  *	The returned physical address is the physical (CPU) mapping for
  110  *	the memory address given. It is only valid to use this function on
  111  *	addresses directly mapped or allocated via kmalloc.
  112  *
  113  *	This function does not give bus mappings for DMA transfers. In
  114  *	almost all conceivable cases a device driver should not be using
  115  *	this function
  116  */
  117 
  118 static inline phys_addr_t virt_to_phys(volatile void *address)
  119 {
  120 	return __pa(address);
  121 }
  122 
  123 /**
  124  *	phys_to_virt	-	map physical address to virtual
  125  *	@address: address to remap
  126  *
  127  *	The returned virtual address is a current CPU mapping for
  128  *	the memory address given. It is only valid to use this function on
  129  *	addresses that have a kernel mapping
  130  *
  131  *	This function does not handle bus mappings for DMA transfers. In
  132  *	almost all conceivable cases a device driver should not be using
  133  *	this function
  134  */
  135 
  136 static inline void *phys_to_virt(phys_addr_t address)
  137 {
  138 	return __va(address);
  139 }
  140 
  141 /*
  142  * Change "struct page" to physical address.
  143  */
  144 #define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
  145 
  146 /*
  147  * ISA I/O bus memory addresses are 1:1 with the physical address.
  148  * However, we truncate the address to unsigned int to avoid undesirable
  149  * promitions in legacy drivers.
  150  */
  151 static inline unsigned int isa_virt_to_bus(volatile void *address)
  152 {
  153 	return (unsigned int)virt_to_phys(address);
  154 }
  155 #define isa_page_to_bus(page)	((unsigned int)page_to_phys(page))
  156 #define isa_bus_to_virt		phys_to_virt
  157 
  158 /*
  159  * However PCI ones are not necessarily 1:1 and therefore these interfaces
  160  * are forbidden in portable PCI drivers.
  161  *
  162  * Allow them on x86 for legacy drivers, though.
  163  */
  164 #define virt_to_bus virt_to_phys
  165 #define bus_to_virt phys_to_virt
  166 
  167 /*
  168  * The default ioremap() behavior is non-cached; if you need something
  169  * else, you probably want one of the following.
  170  */
  171 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
  172 extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
  173 #define ioremap_uc ioremap_uc
  174 
  175 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
  176 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val);
  177 
  178 /**
  179  * ioremap     -   map bus memory into CPU space
  180  * @offset:    bus address of the memory
  181  * @size:      size of the resource to map
  182  *
  183  * ioremap performs a platform specific sequence of operations to
  184  * make bus memory CPU accessible via the readb/readw/readl/writeb/
  185  * writew/writel functions and the other mmio helpers. The returned
  186  * address is not guaranteed to be usable directly as a virtual
  187  * address.
  188  *
  189  * If the area you are trying to map is a PCI BAR you should have a
  190  * look at pci_iomap().
  191  */
  192 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
  193 {
  194 	return ioremap_nocache(offset, size);
  195 }
  196 
  197 extern void iounmap(volatile void __iomem *addr);
  198 
  199 extern void set_iounmap_nonlazy(void);
  200 
  201 #ifdef __KERNEL__
  202 
  203 #include <asm-generic/iomap.h>
  204 
  205 /*
  206  * Convert a virtual cached pointer to an uncached pointer
  207  */
  208 #define xlate_dev_kmem_ptr(p)	p
  209 
  210 /**
  211  * memset_io	Set a range of I/O memory to a constant value
  212  * @addr:	The beginning of the I/O-memory range to set
  213  * @val:	The value to set the memory to
  214  * @count:	The number of bytes to set
  215  *
  216  * Set a range of I/O memory to a given value.
  217  */
  218 static inline void
  219 memset_io(volatile void __iomem *addr, unsigned char val, size_t count)
  220 {
  221 	memset((void __force *)addr, val, count);
  222 }
  223 
  224 /**
  225  * memcpy_fromio	Copy a block of data from I/O memory
  226  * @dst:		The (RAM) destination for the copy
  227  * @src:		The (I/O memory) source for the data
  228  * @count:		The number of bytes to copy
  229  *
  230  * Copy a block of data from I/O memory.
  231  */
  232 static inline void
  233 memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
  234 {
  235 	memcpy(dst, (const void __force *)src, count);
  236 }
  237 
  238 /**
  239  * memcpy_toio		Copy a block of data into I/O memory
  240  * @dst:		The (I/O memory) destination for the copy
  241  * @src:		The (RAM) source for the data
  242  * @count:		The number of bytes to copy
  243  *
  244  * Copy a block of data to I/O memory.
  245  */
  246 static inline void
  247 memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
  248 {
  249 	memcpy((void __force *)dst, src, count);
  250 }
  251 
  252 /*
  253  * ISA space is 'always mapped' on a typical x86 system, no need to
  254  * explicitly ioremap() it. The fact that the ISA IO space is mapped
  255  * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
  256  * are physical addresses. The following constant pointer can be
  257  * used as the IO-area pointer (it can be iounmapped as well, so the
  258  * analogy with PCI is quite large):
  259  */
  260 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
  261 
  262 /*
  263  *	Cache management
  264  *
  265  *	This needed for two cases
  266  *	1. Out of order aware processors
  267  *	2. Accidentally out of order processors (PPro errata #51)
  268  */
  269 
  270 static inline void flush_write_buffers(void)
  271 {
  272 #if defined(CONFIG_X86_PPRO_FENCE)
  273 	asm volatile("lock; addl $0,0(%%esp)": : :"memory");
  274 #endif
  275 }
  276 
  277 #endif /* __KERNEL__ */
  278 
  279 extern void native_io_delay(void);
  280 
  281 extern int io_delay_type;
  282 extern void io_delay_init(void);
  283 
  284 #if defined(CONFIG_PARAVIRT)
  285 #include <asm/paravirt.h>
  286 #else
  287 
  288 static inline void slow_down_io(void)
  289 {
  290 	native_io_delay();
  291 #ifdef REALLY_SLOW_IO
  292 	native_io_delay();
  293 	native_io_delay();
  294 	native_io_delay();
  295 #endif
  296 }
  297 
  298 #endif
  299 
  300 #define BUILDIO(bwl, bw, type)						\
  301 static inline void out##bwl(unsigned type value, int port)		\
  302 {									\
  303 	asm volatile("out" #bwl " %" #bw "0, %w1"			\
  304 		     : : "a"(value), "Nd"(port));			\
  305 }									\
  306 									\
  307 static inline unsigned type in##bwl(int port)				\
  308 {									\
  309 	unsigned type value;						\
  310 	asm volatile("in" #bwl " %w1, %" #bw "0"			\
  311 		     : "=a"(value) : "Nd"(port));			\
  312 	return value;							\
  313 }									\
  314 									\
  315 static inline void out##bwl##_p(unsigned type value, int port)		\
  316 {									\
  317 	out##bwl(value, port);						\
  318 	slow_down_io();							\
  319 }									\
  320 									\
  321 static inline unsigned type in##bwl##_p(int port)			\
  322 {									\
  323 	unsigned type value = in##bwl(port);				\
  324 	slow_down_io();							\
  325 	return value;							\
  326 }									\
  327 									\
  328 static inline void outs##bwl(int port, const void *addr, unsigned long count) \
  329 {									\
  330 	asm volatile("rep; outs" #bwl					\
  331 		     : "+S"(addr), "+c"(count) : "d"(port));		\
  332 }									\
  333 									\
  334 static inline void ins##bwl(int port, void *addr, unsigned long count)	\
  335 {									\
  336 	asm volatile("rep; ins" #bwl					\
  337 		     : "+D"(addr), "+c"(count) : "d"(port));		\
  338 }
  339 
  340 BUILDIO(b, b, char)
  341 BUILDIO(w, w, short)
  342 BUILDIO(l, , int)
  343 
  344 extern void *xlate_dev_mem_ptr(phys_addr_t phys);
  345 extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
  346 
  347 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
  348 				enum page_cache_mode pcm);
  349 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
  350 extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
  351 
  352 extern bool is_early_ioremap_ptep(pte_t *ptep);
  353 
  354 #ifdef CONFIG_XEN
  355 #include <xen/xen.h>
  356 struct bio_vec;
  357 
  358 extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
  359 				      const struct bio_vec *vec2);
  360 
  361 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2)				\
  362 	(__BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&				\
  363 	 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
  364 #endif	/* CONFIG_XEN */
  365 
  366 #define IO_SPACE_LIMIT 0xffff
  367 
  368 #ifdef CONFIG_MTRR
  369 extern int __must_check arch_phys_wc_index(int handle);
  370 #define arch_phys_wc_index arch_phys_wc_index
  371 
  372 extern int __must_check arch_phys_wc_add(unsigned long base,
  373 					 unsigned long size);
  374 extern void arch_phys_wc_del(int handle);
  375 #define arch_phys_wc_add arch_phys_wc_add
  376 #endif
  377 
  378 #ifdef CONFIG_X86_PAT
  379 extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
  380 extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
  381 #define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
  382 #endif
  383 
  384 #endif /* _ASM_X86_IO_H */                 1 
    2 /*********************************************************************
    3  *
    4  *	vlsi_ir.c:	VLSI82C147 PCI IrDA controller driver for Linux
    5  *
    6  *	Copyright (c) 2001-2003 Martin Diehl
    7  *
    8  *	This program is free software; you can redistribute it and/or 
    9  *	modify it under the terms of the GNU General Public License as 
   10  *	published by the Free Software Foundation; either version 2 of 
   11  *	the License, or (at your option) any later version.
   12  *
   13  *	This program is distributed in the hope that it will be useful,
   14  *	but WITHOUT ANY WARRANTY; without even the implied warranty of
   15  *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
   16  *	GNU General Public License for more details.
   17  *
   18  *	You should have received a copy of the GNU General Public License 
   19  *	along with this program; if not, see <http://www.gnu.org/licenses/>.
   20  *
   21  ********************************************************************/
   22 
   23 #include <linux/module.h>
   24  
   25 #define DRIVER_NAME 		"vlsi_ir"
   26 #define DRIVER_VERSION		"v0.5"
   27 #define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
   28 #define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
   29 
   30 MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
   31 MODULE_AUTHOR(DRIVER_AUTHOR);
   32 MODULE_LICENSE("GPL");
   33 
   34 /********************************************************/
   35 
   36 #include <linux/kernel.h>
   37 #include <linux/ktime.h>
   38 #include <linux/init.h>
   39 #include <linux/interrupt.h>
   40 #include <linux/pci.h>
   41 #include <linux/slab.h>
   42 #include <linux/netdevice.h>
   43 #include <linux/skbuff.h>
   44 #include <linux/delay.h>
   45 #include <linux/proc_fs.h>
   46 #include <linux/seq_file.h>
   47 #include <linux/math64.h>
   48 #include <linux/mutex.h>
   49 #include <linux/uaccess.h>
   50 #include <asm/byteorder.h>
   51 
   52 #include <net/irda/irda.h>
   53 #include <net/irda/irda_device.h>
   54 #include <net/irda/wrapper.h>
   55 #include <net/irda/crc.h>
   56 
   57 #include "vlsi_ir.h"
   58 
   59 /********************************************************/
   60 
   61 static /* const */ char drivername[] = DRIVER_NAME;
   62 
   63 static const struct pci_device_id vlsi_irda_table[] = {
   64 	{
   65 		.class =        PCI_CLASS_WIRELESS_IRDA << 8,
   66 		.class_mask =	PCI_CLASS_SUBCLASS_MASK << 8, 
   67 		.vendor =       PCI_VENDOR_ID_VLSI,
   68 		.device =       PCI_DEVICE_ID_VLSI_82C147,
   69 		.subvendor = 	PCI_ANY_ID,
   70 		.subdevice =	PCI_ANY_ID,
   71 	},
   72 	{ /* all zeroes */ }
   73 };
   74 
   75 MODULE_DEVICE_TABLE(pci, vlsi_irda_table);
   76 
   77 /********************************************************/
   78 
   79 /*	clksrc: which clock source to be used
   80  *		0: auto - try PLL, fallback to 40MHz XCLK
   81  *		1: on-chip 48MHz PLL
   82  *		2: external 48MHz XCLK
   83  *		3: external 40MHz XCLK (HP OB-800)
   84  */
   85 
   86 static int clksrc = 0;			/* default is 0(auto) */
   87 module_param(clksrc, int, 0);
   88 MODULE_PARM_DESC(clksrc, "clock input source selection");
   89 
   90 /*	ringsize: size of the tx and rx descriptor rings
   91  *		independent for tx and rx
   92  *		specify as ringsize=tx[,rx]
   93  *		allowed values: 4, 8, 16, 32, 64
   94  *		Due to the IrDA 1.x max. allowed window size=7,
   95  *		there should be no gain when using rings larger than 8
   96  */
   97 
   98 static int ringsize[] = {8,8};		/* default is tx=8 / rx=8 */
   99 module_param_array(ringsize, int, NULL, 0);
  100 MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size");
  101 
  102 /*	sirpulse: tuning of the SIR pulse width within IrPHY 1.3 limits
  103  *		0: very short, 1.5us (exception: 6us at 2.4 kbaud)
  104  *		1: nominal 3/16 bittime width
  105  *	note: IrDA compliant peer devices should be happy regardless
  106  *		which one is used. Primary goal is to save some power
  107  *		on the sender's side - at 9.6kbaud for example the short
  108  *		pulse width saves more than 90% of the transmitted IR power.
  109  */
  110 
  111 static int sirpulse = 1;		/* default is 3/16 bittime */
  112 module_param(sirpulse, int, 0);
  113 MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning");
  114 
  115 /*	qos_mtt_bits: encoded min-turn-time value we require the peer device
  116  *		 to use before transmitting to us. "Type 1" (per-station)
  117  *		 bitfield according to IrLAP definition (section 6.6.8)
  118  *		 Don't know which transceiver is used by my OB800 - the
  119  *		 pretty common HP HDLS-1100 requires 1 msec - so lets use this.
  120  */
  121 
  122 static int qos_mtt_bits = 0x07;		/* default is 1 ms or more */
  123 module_param(qos_mtt_bits, int, 0);
  124 MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time");
  125 
  126 /********************************************************/
  127 
  128 static void vlsi_reg_debug(unsigned iobase, const char *s)
  129 {
  130 	int	i;
  131 
  132 	printk(KERN_DEBUG "%s: ", s);
  133 	for (i = 0; i < 0x20; i++)
  134 		printk("%02x", (unsigned)inb((iobase+i)));
  135 	printk("\n");
  136 }
  137 
  138 static void vlsi_ring_debug(struct vlsi_ring *r)
  139 {
  140 	struct ring_descr *rd;
  141 	unsigned i;
  142 
  143 	printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
  144 		__func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw);
  145 	printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__,
  146 		atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask);
  147 	for (i = 0; i < r->size; i++) {
  148 		rd = &r->rd[i];
  149 		printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i);
  150 		printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
  151 		printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n",
  152 			__func__, (unsigned) rd_get_status(rd),
  153 			(unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
  154 	}
  155 }
  156 
  157 /********************************************************/
  158 
  159 /* needed regardless of CONFIG_PROC_FS */
  160 static struct proc_dir_entry *vlsi_proc_root = NULL;
  161 
  162 #ifdef CONFIG_PROC_FS
  163 
  164 static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev)
  165 {
  166 	unsigned iobase = pci_resource_start(pdev, 0);
  167 	unsigned i;
  168 
  169 	seq_printf(seq, "\n%s (vid/did: [%04x:%04x])\n",
  170 		   pci_name(pdev), (int)pdev->vendor, (int)pdev->device);
  171 	seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state);
  172 	seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n",
  173 		   pdev->irq, (unsigned)pci_resource_start(pdev, 0), (unsigned long long)pdev->dma_mask);
  174 	seq_printf(seq, "hw registers: ");
  175 	for (i = 0; i < 0x20; i++)
  176 		seq_printf(seq, "%02x", (unsigned)inb((iobase+i)));
  177 	seq_printf(seq, "\n");
  178 }
  179 		
  180 static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
  181 {
  182 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
  183 	u8 byte;
  184 	u16 word;
  185 	s32 sec, usec;
  186 	unsigned iobase = ndev->base_addr;
  187 
  188 	seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name,
  189 		netif_device_present(ndev) ? "attached" : "detached", 
  190 		netif_running(ndev) ? "running" : "not running",
  191 		netif_carrier_ok(ndev) ? "carrier ok" : "no carrier",
  192 		netif_queue_stopped(ndev) ? "queue stopped" : "queue running");
  193 
  194 	if (!netif_running(ndev))
  195 		return;
  196 
  197 	seq_printf(seq, "\nhw-state:\n");
  198 	pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte);
  199 	seq_printf(seq, "IRMISC:%s%s%s uart%s",
  200 		(byte&IRMISC_IRRAIL) ? " irrail" : "",
  201 		(byte&IRMISC_IRPD) ? " irpd" : "",
  202 		(byte&IRMISC_UARTTST) ? " uarttest" : "",
  203 		(byte&IRMISC_UARTEN) ? "@" : " disabled\n");
  204 	if (byte&IRMISC_UARTEN) {
  205 		seq_printf(seq, "0x%s\n",
  206 			(byte&2) ? ((byte&1) ? "3e8" : "2e8")
  207 				 : ((byte&1) ? "3f8" : "2f8"));
  208 	}
  209 	pci_read_config_byte(idev->pdev, VLSI_PCI_CLKCTL, &byte);
  210 	seq_printf(seq, "CLKCTL: PLL %s%s%s / clock %s / wakeup %s\n",
  211 		(byte&CLKCTL_PD_INV) ? "powered" : "down",
  212 		(byte&CLKCTL_LOCK) ? " locked" : "",
  213 		(byte&CLKCTL_EXTCLK) ? ((byte&CLKCTL_XCKSEL)?" / 40 MHz XCLK":" / 48 MHz XCLK") : "",
  214 		(byte&CLKCTL_CLKSTP) ? "stopped" : "running",
  215 		(byte&CLKCTL_WAKE) ? "enabled" : "disabled");
  216 	pci_read_config_byte(idev->pdev, VLSI_PCI_MSTRPAGE, &byte);
  217 	seq_printf(seq, "MSTRPAGE: 0x%02x\n", (unsigned)byte);
  218 
  219 	byte = inb(iobase+VLSI_PIO_IRINTR);
  220 	seq_printf(seq, "IRINTR:%s%s%s%s%s%s%s%s\n",
  221 		(byte&IRINTR_ACTEN) ? " ACTEN" : "",
  222 		(byte&IRINTR_RPKTEN) ? " RPKTEN" : "",
  223 		(byte&IRINTR_TPKTEN) ? " TPKTEN" : "",
  224 		(byte&IRINTR_OE_EN) ? " OE_EN" : "",
  225 		(byte&IRINTR_ACTIVITY) ? " ACTIVITY" : "",
  226 		(byte&IRINTR_RPKTINT) ? " RPKTINT" : "",
  227 		(byte&IRINTR_TPKTINT) ? " TPKTINT" : "",
  228 		(byte&IRINTR_OE_INT) ? " OE_INT" : "");
  229 	word = inw(iobase+VLSI_PIO_RINGPTR);
  230 	seq_printf(seq, "RINGPTR: rx=%u / tx=%u\n", RINGPTR_GET_RX(word), RINGPTR_GET_TX(word));
  231 	word = inw(iobase+VLSI_PIO_RINGBASE);
  232 	seq_printf(seq, "RINGBASE: busmap=0x%08x\n",
  233 		((unsigned)word << 10)|(MSTRPAGE_VALUE<<24));
  234 	word = inw(iobase+VLSI_PIO_RINGSIZE);
  235 	seq_printf(seq, "RINGSIZE: rx=%u / tx=%u\n", RINGSIZE_TO_RXSIZE(word),
  236 		RINGSIZE_TO_TXSIZE(word));
  237 
  238 	word = inw(iobase+VLSI_PIO_IRCFG);
  239 	seq_printf(seq, "IRCFG:%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
  240 		(word&IRCFG_LOOP) ? " LOOP" : "",
  241 		(word&IRCFG_ENTX) ? " ENTX" : "",
  242 		(word&IRCFG_ENRX) ? " ENRX" : "",
  243 		(word&IRCFG_MSTR) ? " MSTR" : "",
  244 		(word&IRCFG_RXANY) ? " RXANY" : "",
  245 		(word&IRCFG_CRC16) ? " CRC16" : "",
  246 		(word&IRCFG_FIR) ? " FIR" : "",
  247 		(word&IRCFG_MIR) ? " MIR" : "",
  248 		(word&IRCFG_SIR) ? " SIR" : "",
  249 		(word&IRCFG_SIRFILT) ? " SIRFILT" : "",
  250 		(word&IRCFG_SIRTEST) ? " SIRTEST" : "",
  251 		(word&IRCFG_TXPOL) ? " TXPOL" : "",
  252 		(word&IRCFG_RXPOL) ? " RXPOL" : "");
  253 	word = inw(iobase+VLSI_PIO_IRENABLE);
  254 	seq_printf(seq, "IRENABLE:%s%s%s%s%s%s%s%s\n",
  255 		(word&IRENABLE_PHYANDCLOCK) ? " PHYANDCLOCK" : "",
  256 		(word&IRENABLE_CFGER) ? " CFGERR" : "",
  257 		(word&IRENABLE_FIR_ON) ? " FIR_ON" : "",
  258 		(word&IRENABLE_MIR_ON) ? " MIR_ON" : "",
  259 		(word&IRENABLE_SIR_ON) ? " SIR_ON" : "",
  260 		(word&IRENABLE_ENTXST) ? " ENTXST" : "",
  261 		(word&IRENABLE_ENRXST) ? " ENRXST" : "",
  262 		(word&IRENABLE_CRC16_ON) ? " CRC16_ON" : "");
  263 	word = inw(iobase+VLSI_PIO_PHYCTL);
  264 	seq_printf(seq, "PHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n",
  265 		(unsigned)PHYCTL_TO_BAUD(word),
  266 		(unsigned)PHYCTL_TO_PLSWID(word),
  267 		(unsigned)PHYCTL_TO_PREAMB(word));
  268 	word = inw(iobase+VLSI_PIO_NPHYCTL);
  269 	seq_printf(seq, "NPHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n",
  270 		(unsigned)PHYCTL_TO_BAUD(word),
  271 		(unsigned)PHYCTL_TO_PLSWID(word),
  272 		(unsigned)PHYCTL_TO_PREAMB(word));
  273 	word = inw(iobase+VLSI_PIO_MAXPKT);
  274 	seq_printf(seq, "MAXPKT: max. rx packet size = %u\n", word);
  275 	word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
  276 	seq_printf(seq, "RCVBCNT: rx-fifo filling level = %u\n", word);
  277 
  278 	seq_printf(seq, "\nsw-state:\n");
  279 	seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud, 
  280 		(idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"));
  281 	sec = div_s64_rem(ktime_us_delta(ktime_get(), idev->last_rx),
  282 			  USEC_PER_SEC, &usec);
  283 	seq_printf(seq, "last rx: %ul.%06u sec\n", sec, usec);
  284 
  285 	seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
  286 		ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors,
  287 		ndev->stats.rx_dropped);
  288 	seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n",
  289 		ndev->stats.rx_over_errors, ndev->stats.rx_length_errors,
  290 		ndev->stats.rx_frame_errors, ndev->stats.rx_crc_errors);
  291 	seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n",
  292 		ndev->stats.tx_packets, ndev->stats.tx_bytes, ndev->stats.tx_errors,
  293 		ndev->stats.tx_dropped, ndev->stats.tx_fifo_errors);
  294 
  295 }
  296 		
  297 static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r)
  298 {
  299 	struct ring_descr *rd;
  300 	unsigned i, j;
  301 	int h, t;
  302 
  303 	seq_printf(seq, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
  304 		r->size, r->mask, r->len, r->dir, r->rd[0].hw);
  305 	h = atomic_read(&r->head) & r->mask;
  306 	t = atomic_read(&r->tail) & r->mask;
  307 	seq_printf(seq, "head = %d / tail = %d ", h, t);
  308 	if (h == t)
  309 		seq_printf(seq, "(empty)\n");
  310 	else {
  311 		if (((t+1)&r->mask) == h)
  312 			seq_printf(seq, "(full)\n");
  313 		else
  314 			seq_printf(seq, "(level = %d)\n", ((unsigned)(t-h) & r->mask)); 
  315 		rd = &r->rd[h];
  316 		j = (unsigned) rd_get_count(rd);
  317 		seq_printf(seq, "current: rd = %d / status = %02x / len = %u\n",
  318 				h, (unsigned)rd_get_status(rd), j);
  319 		if (j > 0) {
  320 			seq_printf(seq, "   data: %*ph\n",
  321 				   min_t(unsigned, j, 20), rd->buf);
  322 		}
  323 	}
  324 	for (i = 0; i < r->size; i++) {
  325 		rd = &r->rd[i];
  326 		seq_printf(seq, "> ring descr %u: ", i);
  327 		seq_printf(seq, "skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
  328 		seq_printf(seq, "  hw: status=%02x count=%u busaddr=0x%08x\n",
  329 			(unsigned) rd_get_status(rd),
  330 			(unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
  331 	}
  332 }
  333 
  334 static int vlsi_seq_show(struct seq_file *seq, void *v)
  335 {
  336 	struct net_device *ndev = seq->private;
  337 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
  338 	unsigned long flags;
  339 
  340 	seq_printf(seq, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION);
  341 	seq_printf(seq, "clksrc: %s\n", 
  342 		(clksrc>=2) ? ((clksrc==3)?"40MHz XCLK":"48MHz XCLK")
  343 			    : ((clksrc==1)?"48MHz PLL":"autodetect"));
  344 	seq_printf(seq, "ringsize: tx=%d / rx=%d\n",
  345 		ringsize[0], ringsize[1]);
  346 	seq_printf(seq, "sirpulse: %s\n", (sirpulse)?"3/16 bittime":"short");
  347 	seq_printf(seq, "qos_mtt_bits: 0x%02x\n", (unsigned)qos_mtt_bits);
  348 
  349 	spin_lock_irqsave(&idev->lock, flags);
  350 	if (idev->pdev != NULL) {
  351 		vlsi_proc_pdev(seq, idev->pdev);
  352 
  353 		if (idev->pdev->current_state == 0)
  354 			vlsi_proc_ndev(seq, ndev);
  355 		else
  356 			seq_printf(seq, "\nPCI controller down - resume_ok = %d\n",
  357 				idev->resume_ok);
  358 		if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) {
  359 			seq_printf(seq, "\n--------- RX ring -----------\n\n");
  360 			vlsi_proc_ring(seq, idev->rx_ring);
  361 			seq_printf(seq, "\n--------- TX ring -----------\n\n");
  362 			vlsi_proc_ring(seq, idev->tx_ring);
  363 		}
  364 	}
  365 	seq_printf(seq, "\n");
  366 	spin_unlock_irqrestore(&idev->lock, flags);
  367 
  368 	return 0;
  369 }
  370 
  371 static int vlsi_seq_open(struct inode *inode, struct file *file)
  372 {
  373 	return single_open(file, vlsi_seq_show, PDE_DATA(inode));
  374 }
  375 
  376 static const struct file_operations vlsi_proc_fops = {
  377 	.owner	 = THIS_MODULE,
  378 	.open    = vlsi_seq_open,
  379 	.read    = seq_read,
  380 	.llseek  = seq_lseek,
  381 	.release = single_release,
  382 };
  383 
  384 #define VLSI_PROC_FOPS		(&vlsi_proc_fops)
  385 
  386 #else	/* CONFIG_PROC_FS */
  387 #define VLSI_PROC_FOPS		NULL
  388 #endif
  389 
  390 /********************************************************/
  391 
  392 static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap,
  393 						unsigned size, unsigned len, int dir)
  394 {
  395 	struct vlsi_ring *r;
  396 	struct ring_descr *rd;
  397 	unsigned	i, j;
  398 	dma_addr_t	busaddr;
  399 
  400 	if (!size  ||  ((size-1)&size)!=0)	/* must be >0 and power of 2 */
  401 		return NULL;
  402 
  403 	r = kmalloc(sizeof(*r) + size * sizeof(struct ring_descr), GFP_KERNEL);
  404 	if (!r)
  405 		return NULL;
  406 	memset(r, 0, sizeof(*r));
  407 
  408 	r->pdev = pdev;
  409 	r->dir = dir;
  410 	r->len = len;
  411 	r->rd = (struct ring_descr *)(r+1);
  412 	r->mask = size - 1;
  413 	r->size = size;
  414 	atomic_set(&r->head, 0);
  415 	atomic_set(&r->tail, 0);
  416 
  417 	for (i = 0; i < size; i++) {
  418 		rd = r->rd + i;
  419 		memset(rd, 0, sizeof(*rd));
  420 		rd->hw = hwmap + i;
  421 		rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
  422 		if (rd->buf == NULL ||
  423 		    !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
  424 			if (rd->buf) {
  425 				net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
  426 						    __func__, rd->buf);
  427 				kfree(rd->buf);
  428 				rd->buf = NULL;
  429 			}
  430 			for (j = 0; j < i; j++) {
  431 				rd = r->rd + j;
  432 				busaddr = rd_get_addr(rd);
  433 				rd_set_addr_status(rd, 0, 0);
  434 				if (busaddr)
  435 					pci_unmap_single(pdev, busaddr, len, dir);
  436 				kfree(rd->buf);
  437 				rd->buf = NULL;
  438 			}
  439 			kfree(r);
  440 			return NULL;
  441 		}
  442 		rd_set_addr_status(rd, busaddr, 0);
  443 		/* initially, the dma buffer is owned by the CPU */
  444 		rd->skb = NULL;
  445 	}
  446 	return r;
  447 }
  448 
  449 static int vlsi_free_ring(struct vlsi_ring *r)
  450 {
  451 	struct ring_descr *rd;
  452 	unsigned	i;
  453 	dma_addr_t	busaddr;
  454 
  455 	for (i = 0; i < r->size; i++) {
  456 		rd = r->rd + i;
  457 		if (rd->skb)
  458 			dev_kfree_skb_any(rd->skb);
  459 		busaddr = rd_get_addr(rd);
  460 		rd_set_addr_status(rd, 0, 0);
  461 		if (busaddr)
  462 			pci_unmap_single(r->pdev, busaddr, r->len, r->dir);
  463 		kfree(rd->buf);
  464 	}
  465 	kfree(r);
  466 	return 0;
  467 }
  468 
  469 static int vlsi_create_hwif(vlsi_irda_dev_t *idev)
  470 {
  471 	char 			*ringarea;
  472 	struct ring_descr_hw	*hwmap;
  473 
  474 	idev->virtaddr = NULL;
  475 	idev->busaddr = 0;
  476 
  477 	ringarea = pci_zalloc_consistent(idev->pdev, HW_RING_AREA_SIZE,
  478 					 &idev->busaddr);
  479 	if (!ringarea)
  480 		goto out;
  481 
  482 	hwmap = (struct ring_descr_hw *)ringarea;
  483 	idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1],
  484 					XFER_BUF_SIZE, PCI_DMA_FROMDEVICE);
  485 	if (idev->rx_ring == NULL)
  486 		goto out_unmap;
  487 
  488 	hwmap += MAX_RING_DESCR;
  489 	idev->tx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[0],
  490 					XFER_BUF_SIZE, PCI_DMA_TODEVICE);
  491 	if (idev->tx_ring == NULL)
  492 		goto out_free_rx;
  493 
  494 	idev->virtaddr = ringarea;
  495 	return 0;
  496 
  497 out_free_rx:
  498 	vlsi_free_ring(idev->rx_ring);
  499 out_unmap:
  500 	idev->rx_ring = idev->tx_ring = NULL;
  501 	pci_free_consistent(idev->pdev, HW_RING_AREA_SIZE, ringarea, idev->busaddr);
  502 	idev->busaddr = 0;
  503 out:
  504 	return -ENOMEM;
  505 }
  506 
  507 static int vlsi_destroy_hwif(vlsi_irda_dev_t *idev)
  508 {
  509 	vlsi_free_ring(idev->rx_ring);
  510 	vlsi_free_ring(idev->tx_ring);
  511 	idev->rx_ring = idev->tx_ring = NULL;
  512 
  513 	if (idev->busaddr)
  514 		pci_free_consistent(idev->pdev,HW_RING_AREA_SIZE,idev->virtaddr,idev->busaddr);
  515 
  516 	idev->virtaddr = NULL;
  517 	idev->busaddr = 0;
  518 
  519 	return 0;
  520 }
  521 
  522 /********************************************************/
  523 
  524 static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
  525 {
  526 	u16		status;
  527 	int		crclen, len = 0;
  528 	struct sk_buff	*skb;
  529 	int		ret = 0;
  530 	struct net_device *ndev = pci_get_drvdata(r->pdev);
  531 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
  532 
  533 	pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
  534 	/* dma buffer now owned by the CPU */
  535 	status = rd_get_status(rd);
  536 	if (status & RD_RX_ERROR) {
  537 		if (status & RD_RX_OVER)  
  538 			ret |= VLSI_RX_OVER;
  539 		if (status & RD_RX_LENGTH)  
  540 			ret |= VLSI_RX_LENGTH;
  541 		if (status & RD_RX_PHYERR)  
  542 			ret |= VLSI_RX_FRAME;
  543 		if (status & RD_RX_CRCERR)  
  544 			ret |= VLSI_RX_CRC;
  545 		goto done;
  546 	}
  547 
  548 	len = rd_get_count(rd);
  549 	crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);
  550 	len -= crclen;		/* remove trailing CRC */
  551 	if (len <= 0) {
  552 		pr_debug("%s: strange frame (len=%d)\n", __func__, len);
  553 		ret |= VLSI_RX_DROP;
  554 		goto done;
  555 	}
  556 
  557 	if (idev->mode == IFF_SIR) {	/* hw checks CRC in MIR, FIR mode */
  558 
  559 		/* rd->buf is a streaming PCI_DMA_FROMDEVICE map. Doing the
  560 		 * endian-adjustment there just in place will dirty a cache line
  561 		 * which belongs to the map and thus we must be sure it will
  562 		 * get flushed before giving the buffer back to hardware.
  563 		 * vlsi_fill_rx() will do this anyway - but here we rely on.
  564 		 */
  565 		le16_to_cpus(rd->buf+len);
  566 		if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) {
  567 			pr_debug("%s: crc error\n", __func__);
  568 			ret |= VLSI_RX_CRC;
  569 			goto done;
  570 		}
  571 	}
  572 
  573 	if (!rd->skb) {
  574 		net_warn_ratelimited("%s: rx packet lost\n", __func__);
  575 		ret |= VLSI_RX_DROP;
  576 		goto done;
  577 	}
  578 
  579 	skb = rd->skb;
  580 	rd->skb = NULL;
  581 	skb->dev = ndev;
  582 	memcpy(skb_put(skb,len), rd->buf, len);
  583 	skb_reset_mac_header(skb);
  584 	if (in_interrupt())
  585 		netif_rx(skb);
  586 	else
  587 		netif_rx_ni(skb);
  588 
  589 done:
  590 	rd_set_status(rd, 0);
  591 	rd_set_count(rd, 0);
  592 	/* buffer still owned by CPU */
  593 
  594 	return (ret) ? -ret : len;
  595 }
  596 
  597 static void vlsi_fill_rx(struct vlsi_ring *r)
  598 {
  599 	struct ring_descr *rd;
  600 
  601 	for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) {
  602 		if (rd_is_active(rd)) {
  603 			net_warn_ratelimited("%s: driver bug: rx descr race with hw\n",
  604 					     __func__);
  605 			vlsi_ring_debug(r);
  606 			break;
  607 		}
  608 		if (!rd->skb) {
  609 			rd->skb = dev_alloc_skb(IRLAP_SKB_ALLOCSIZE);
  610 			if (rd->skb) {
  611 				skb_reserve(rd->skb,1);
  612 				rd->skb->protocol = htons(ETH_P_IRDA);
  613 			}
  614 			else
  615 				break;	/* probably not worth logging? */
  616 		}
  617 		/* give dma buffer back to busmaster */
  618 		pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir);
  619 		rd_activate(rd);
  620 	}
  621 }
  622 
  623 static void vlsi_rx_interrupt(struct net_device *ndev)
  624 {
  625 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
  626 	struct vlsi_ring *r = idev->rx_ring;
  627 	struct ring_descr *rd;
  628 	int ret;
  629 
  630 	for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
  631 
  632 		if (rd_is_active(rd))
  633 			break;
  634 
  635 		ret = vlsi_process_rx(r, rd);
  636 
  637 		if (ret < 0) {
  638 			ret = -ret;
  639 			ndev->stats.rx_errors++;
  640 			if (ret & VLSI_RX_DROP)  
  641 				ndev->stats.rx_dropped++;
  642 			if (ret & VLSI_RX_OVER)  
  643 				ndev->stats.rx_over_errors++;
  644 			if (ret & VLSI_RX_LENGTH)  
  645 				ndev->stats.rx_length_errors++;
  646 			if (ret & VLSI_RX_FRAME)  
  647 				ndev->stats.rx_frame_errors++;
  648 			if (ret & VLSI_RX_CRC)  
  649 				ndev->stats.rx_crc_errors++;
  650 		}
  651 		else if (ret > 0) {
  652 			ndev->stats.rx_packets++;
  653 			ndev->stats.rx_bytes += ret;
  654 		}
  655 	}
  656 
  657 	idev->last_rx = ktime_get(); /* remember "now" for later mtt delay */
  658 
  659 	vlsi_fill_rx(r);
  660 
  661 	if (ring_first(r) == NULL) {
  662 		/* we are in big trouble, if this should ever happen */
  663 		net_err_ratelimited("%s: rx ring exhausted!\n", __func__);
  664 		vlsi_ring_debug(r);
  665 	}
  666 	else
  667 		outw(0, ndev->base_addr+VLSI_PIO_PROMPT);
  668 }
  669 
  670 /* caller must have stopped the controller from busmastering */
  671 
  672 static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
  673 {
  674 	struct net_device *ndev = pci_get_drvdata(idev->pdev);
  675 	struct vlsi_ring *r = idev->rx_ring;
  676 	struct ring_descr *rd;
  677 	int ret;
  678 
  679 	for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
  680 
  681 		ret = 0;
  682 		if (rd_is_active(rd)) {
  683 			rd_set_status(rd, 0);
  684 			if (rd_get_count(rd)) {
  685 				pr_debug("%s - dropping rx packet\n", __func__);
  686 				ret = -VLSI_RX_DROP;
  687 			}
  688 			rd_set_count(rd, 0);
  689 			pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
  690 			if (rd->skb) {
  691 				dev_kfree_skb_any(rd->skb);
  692 				rd->skb = NULL;
  693 			}
  694 		}
  695 		else
  696 			ret = vlsi_process_rx(r, rd);
  697 
  698 		if (ret < 0) {
  699 			ret = -ret;
  700 			ndev->stats.rx_errors++;
  701 			if (ret & VLSI_RX_DROP)  
  702 				ndev->stats.rx_dropped++;
  703 			if (ret & VLSI_RX_OVER)  
  704 				ndev->stats.rx_over_errors++;
  705 			if (ret & VLSI_RX_LENGTH)  
  706 				ndev->stats.rx_length_errors++;
  707 			if (ret & VLSI_RX_FRAME)  
  708 				ndev->stats.rx_frame_errors++;
  709 			if (ret & VLSI_RX_CRC)  
  710 				ndev->stats.rx_crc_errors++;
  711 		}
  712 		else if (ret > 0) {
  713 			ndev->stats.rx_packets++;
  714 			ndev->stats.rx_bytes += ret;
  715 		}
  716 	}
  717 }
  718 
  719 /********************************************************/
  720 
  721 static int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd)
  722 {
  723 	u16		status;
  724 	int		len;
  725 	int		ret;
  726 
  727 	pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
  728 	/* dma buffer now owned by the CPU */
  729 	status = rd_get_status(rd);
  730 	if (status & RD_TX_UNDRN)
  731 		ret = VLSI_TX_FIFO;
  732 	else
  733 		ret = 0;
  734 	rd_set_status(rd, 0);
  735 
  736 	if (rd->skb) {
  737 		len = rd->skb->len;
  738 		dev_kfree_skb_any(rd->skb);
  739 		rd->skb = NULL;
  740 	}
  741 	else	/* tx-skb already freed? - should never happen */
  742 		len = rd_get_count(rd);		/* incorrect for SIR! (due to wrapping) */
  743 
  744 	rd_set_count(rd, 0);
  745 	/* dma buffer still owned by the CPU */
  746 
  747 	return (ret) ? -ret : len;
  748 }
  749 
  750 static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
  751 {
  752 	u16 nphyctl;
  753 	u16 config;
  754 	unsigned mode;
  755 	int	ret;
  756 	int	baudrate;
  757 	int	fifocnt;
  758 
  759 	baudrate = idev->new_baud;
  760 	pr_debug("%s: %d -> %d\n", __func__, idev->baud, idev->new_baud);
  761 	if (baudrate == 4000000) {
  762 		mode = IFF_FIR;
  763 		config = IRCFG_FIR;
  764 		nphyctl = PHYCTL_FIR;
  765 	}
  766 	else if (baudrate == 1152000) {
  767 		mode = IFF_MIR;
  768 		config = IRCFG_MIR | IRCFG_CRC16;
  769 		nphyctl = PHYCTL_MIR(clksrc==3);
  770 	}
  771 	else {
  772 		mode = IFF_SIR;
  773 		config = IRCFG_SIR | IRCFG_SIRFILT  | IRCFG_RXANY;
  774 		switch(baudrate) {
  775 			default:
  776 				net_warn_ratelimited("%s: undefined baudrate %d - fallback to 9600!\n",
  777 						     __func__, baudrate);
  778 				baudrate = 9600;
  779 				/* fallthru */
  780 			case 2400:
  781 			case 9600:
  782 			case 19200:
  783 			case 38400:
  784 			case 57600:
  785 			case 115200:
  786 				nphyctl = PHYCTL_SIR(baudrate,sirpulse,clksrc==3);
  787 				break;
  788 		}
  789 	}
  790 	config |= IRCFG_MSTR | IRCFG_ENRX;
  791 
  792 	fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
  793 	if (fifocnt != 0) {
  794 		pr_debug("%s: rx fifo not empty(%d)\n", __func__, fifocnt);
  795 	}
  796 
  797 	outw(0, iobase+VLSI_PIO_IRENABLE);
  798 	outw(config, iobase+VLSI_PIO_IRCFG);
  799 	outw(nphyctl, iobase+VLSI_PIO_NPHYCTL);
  800 	wmb();
  801 	outw(IRENABLE_PHYANDCLOCK, iobase+VLSI_PIO_IRENABLE);
  802 	mb();
  803 
  804 	udelay(1);	/* chip applies IRCFG on next rising edge of its 8MHz clock */
  805 
  806 	/* read back settings for validation */
  807 
  808 	config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK;
  809 
  810 	if (mode == IFF_FIR)
  811 		config ^= IRENABLE_FIR_ON;
  812 	else if (mode == IFF_MIR)
  813 		config ^= (IRENABLE_MIR_ON|IRENABLE_CRC16_ON);
  814 	else
  815 		config ^= IRENABLE_SIR_ON;
  816 
  817 	if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) {
  818 		net_warn_ratelimited("%s: failed to set %s mode!\n",
  819 				     __func__,
  820 				     mode == IFF_SIR ? "SIR" :
  821 				     mode == IFF_MIR ? "MIR" : "FIR");
  822 		ret = -1;
  823 	}
  824 	else {
  825 		if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) {
  826 			net_warn_ratelimited("%s: failed to apply baudrate %d\n",
  827 					     __func__, baudrate);
  828 			ret = -1;
  829 		}
  830 		else {
  831 			idev->mode = mode;
  832 			idev->baud = baudrate;
  833 			idev->new_baud = 0;
  834 			ret = 0;
  835 		}
  836 	}
  837 
  838 	if (ret)
  839 		vlsi_reg_debug(iobase,__func__);
  840 
  841 	return ret;
  842 }
  843 
  844 static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
  845 					      struct net_device *ndev)
  846 {
  847 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
  848 	struct vlsi_ring	*r = idev->tx_ring;
  849 	struct ring_descr *rd;
  850 	unsigned long flags;
  851 	unsigned iobase = ndev->base_addr;
  852 	u8 status;
  853 	u16 config;
  854 	int mtt, diff;
  855 	int len, speed;
  856 	char *msg = NULL;
  857 
  858 	speed = irda_get_next_speed(skb);
  859 	spin_lock_irqsave(&idev->lock, flags);
  860 	if (speed != -1  &&  speed != idev->baud) {
  861 		netif_stop_queue(ndev);
  862 		idev->new_baud = speed;
  863 		status = RD_TX_CLRENTX;  /* stop tx-ring after this frame */
  864 	}
  865 	else
  866 		status = 0;
  867 
  868 	if (skb->len == 0) {
  869 		/* handle zero packets - should be speed change */
  870 		if (status == 0) {
  871 			msg = "bogus zero-length packet";
  872 			goto drop_unlock;
  873 		}
  874 
  875 		/* due to the completely asynch tx operation we might have
  876 		 * IrLAP racing with the hardware here, f.e. if the controller
  877 		 * is just sending the last packet with current speed while
  878 		 * the LAP is already switching the speed using synchronous
  879 		 * len=0 packet. Immediate execution would lead to hw lockup
  880 		 * requiring a powercycle to reset. Good candidate to trigger
  881 		 * this is the final UA:RSP packet after receiving a DISC:CMD
  882 		 * when getting the LAP down.
  883 		 * Note that we are not protected by the queue_stop approach
  884 		 * because the final UA:RSP arrives _without_ request to apply
  885 		 * new-speed-after-this-packet - hence the driver doesn't know
  886 		 * this was the last packet and doesn't stop the queue. So the
  887 		 * forced switch to default speed from LAP gets through as fast
  888 		 * as only some 10 usec later while the UA:RSP is still processed
  889 		 * by the hardware and we would get screwed.
  890 		 */
  891 
  892 		if (ring_first(idev->tx_ring) == NULL) {
  893 			/* no race - tx-ring already empty */
  894 			vlsi_set_baud(idev, iobase);
  895 			netif_wake_queue(ndev);
  896 		}
  897 		else
  898 			;
  899 			/* keep the speed change pending like it would
  900 			 * for any len>0 packet. tx completion interrupt
  901 			 * will apply it when the tx ring becomes empty.
  902 			 */
  903 		spin_unlock_irqrestore(&idev->lock, flags);
  904 		dev_kfree_skb_any(skb);
  905 		return NETDEV_TX_OK;
  906 	}
  907 
  908 	/* sanity checks - simply drop the packet */
  909 
  910 	rd = ring_last(r);
  911 	if (!rd) {
  912 		msg = "ring full, but queue wasn't stopped";
  913 		goto drop_unlock;
  914 	}
  915 
  916 	if (rd_is_active(rd)) {
  917 		msg = "entry still owned by hw";
  918 		goto drop_unlock;
  919 	}
  920 
  921 	if (!rd->buf) {
  922 		msg = "tx ring entry without pci buffer";
  923 		goto drop_unlock;
  924 	}
  925 
  926 	if (rd->skb) {
  927 		msg = "ring entry with old skb still attached";
  928 		goto drop_unlock;
  929 	}
  930 
  931 	/* no need for serialization or interrupt disable during mtt */
  932 	spin_unlock_irqrestore(&idev->lock, flags);
  933 
  934 	if ((mtt = irda_get_mtt(skb)) > 0) {
  935 		diff = ktime_us_delta(ktime_get(), idev->last_rx);
  936 		if (mtt > diff)
  937 			udelay(mtt - diff);
  938 			/* must not sleep here - called under netif_tx_lock! */
  939 	}
  940 
  941 	/* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu()
  942 	 * after subsequent tx-completion
  943 	 */
  944 
  945 	if (idev->mode == IFF_SIR) {
  946 		status |= RD_TX_DISCRC;		/* no hw-crc creation */
  947 		len = async_wrap_skb(skb, rd->buf, r->len);
  948 
  949 		/* Some rare worst case situation in SIR mode might lead to
  950 		 * potential buffer overflow. The wrapper detects this, returns
  951 		 * with a shortened frame (without FCS/EOF) but doesn't provide
  952 		 * any error indication about the invalid packet which we are
  953 		 * going to transmit.
  954 		 * Therefore we log if the buffer got filled to the point, where the
  955 		 * wrapper would abort, i.e. when there are less than 5 bytes left to
  956 		 * allow appending the FCS/EOF.
  957 		 */
  958 
  959 		if (len >= r->len-5)
  960 			net_warn_ratelimited("%s: possible buffer overflow with SIR wrapping!\n",
  961 					     __func__);
  962 	}
  963 	else {
  964 		/* hw deals with MIR/FIR mode wrapping */
  965 		status |= RD_TX_PULSE;		/* send 2 us highspeed indication pulse */
  966 		len = skb->len;
  967 		if (len > r->len) {
  968 			msg = "frame exceeds tx buffer length";
  969 			goto drop;
  970 		}
  971 		else
  972 			skb_copy_from_linear_data(skb, rd->buf, len);
  973 	}
  974 
  975 	rd->skb = skb;			/* remember skb for tx-complete stats */
  976 
  977 	rd_set_count(rd, len);
  978 	rd_set_status(rd, status);	/* not yet active! */
  979 
  980 	/* give dma buffer back to busmaster-hw (flush caches to make
  981 	 * CPU-driven changes visible from the pci bus).
  982 	 */
  983 
  984 	pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir);
  985 
  986 /*	Switching to TX mode here races with the controller
  987  *	which may stop TX at any time when fetching an inactive descriptor
  988  *	or one with CLR_ENTX set. So we switch on TX only, if TX was not running
  989  *	_after_ the new descriptor was activated on the ring. This ensures
  990  *	we will either find TX already stopped or we can be sure, there
  991  *	will be a TX-complete interrupt even if the chip stopped doing
  992  *	TX just after we found it still running. The ISR will then find
  993  *	the non-empty ring and restart TX processing. The enclosing
  994  *	spinlock provides the correct serialization to prevent race with isr.
  995  */
  996 
  997 	spin_lock_irqsave(&idev->lock,flags);
  998 
  999 	rd_activate(rd);
 1000 
 1001 	if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
 1002 		int fifocnt;
 1003 
 1004 		fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
 1005 		if (fifocnt != 0) {
 1006 			pr_debug("%s: rx fifo not empty(%d)\n",
 1007 				 __func__, fifocnt);
 1008 		}
 1009 
 1010 		config = inw(iobase+VLSI_PIO_IRCFG);
 1011 		mb();
 1012 		outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
 1013 		wmb();
 1014 		outw(0, iobase+VLSI_PIO_PROMPT);
 1015 	}
 1016 
 1017 	if (ring_put(r) == NULL) {
 1018 		netif_stop_queue(ndev);
 1019 		pr_debug("%s: tx ring full - queue stopped\n", __func__);
 1020 	}
 1021 	spin_unlock_irqrestore(&idev->lock, flags);
 1022 
 1023 	return NETDEV_TX_OK;
 1024 
 1025 drop_unlock:
 1026 	spin_unlock_irqrestore(&idev->lock, flags);
 1027 drop:
 1028 	net_warn_ratelimited("%s: dropping packet - %s\n", __func__, msg);
 1029 	dev_kfree_skb_any(skb);
 1030 	ndev->stats.tx_errors++;
 1031 	ndev->stats.tx_dropped++;
 1032 	/* Don't even think about returning NET_XMIT_DROP (=1) here!
 1033 	 * In fact any retval!=0 causes the packet scheduler to requeue the
 1034 	 * packet for later retry of transmission - which isn't exactly
 1035 	 * what we want after we've just called dev_kfree_skb_any ;-)
 1036 	 */
 1037 	return NETDEV_TX_OK;
 1038 }
 1039 
 1040 static void vlsi_tx_interrupt(struct net_device *ndev)
 1041 {
 1042 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 1043 	struct vlsi_ring	*r = idev->tx_ring;
 1044 	struct ring_descr	*rd;
 1045 	unsigned	iobase;
 1046 	int	ret;
 1047 	u16	config;
 1048 
 1049 	for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
 1050 
 1051 		if (rd_is_active(rd))
 1052 			break;
 1053 
 1054 		ret = vlsi_process_tx(r, rd);
 1055 
 1056 		if (ret < 0) {
 1057 			ret = -ret;
 1058 			ndev->stats.tx_errors++;
 1059 			if (ret & VLSI_TX_DROP)
 1060 				ndev->stats.tx_dropped++;
 1061 			if (ret & VLSI_TX_FIFO)
 1062 				ndev->stats.tx_fifo_errors++;
 1063 		}
 1064 		else if (ret > 0){
 1065 			ndev->stats.tx_packets++;
 1066 			ndev->stats.tx_bytes += ret;
 1067 		}
 1068 	}
 1069 
 1070 	iobase = ndev->base_addr;
 1071 
 1072 	if (idev->new_baud  &&  rd == NULL)	/* tx ring empty and speed change pending */
 1073 		vlsi_set_baud(idev, iobase);
 1074 
 1075 	config = inw(iobase+VLSI_PIO_IRCFG);
 1076 	if (rd == NULL)			/* tx ring empty: re-enable rx */
 1077 		outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG);
 1078 
 1079 	else if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
 1080 		int fifocnt;
 1081 
 1082 		fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
 1083 		if (fifocnt != 0) {
 1084 			pr_debug("%s: rx fifo not empty(%d)\n",
 1085 				 __func__, fifocnt);
 1086 		}
 1087 		outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
 1088 	}
 1089 
 1090 	outw(0, iobase+VLSI_PIO_PROMPT);
 1091 
 1092 	if (netif_queue_stopped(ndev)  &&  !idev->new_baud) {
 1093 		netif_wake_queue(ndev);
 1094 		pr_debug("%s: queue awoken\n", __func__);
 1095 	}
 1096 }
 1097 
 1098 /* caller must have stopped the controller from busmastering */
 1099 
 1100 static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
 1101 {
 1102 	struct net_device *ndev = pci_get_drvdata(idev->pdev);
 1103 	struct vlsi_ring *r = idev->tx_ring;
 1104 	struct ring_descr *rd;
 1105 	int ret;
 1106 
 1107 	for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
 1108 
 1109 		ret = 0;
 1110 		if (rd_is_active(rd)) {
 1111 			rd_set_status(rd, 0);
 1112 			rd_set_count(rd, 0);
 1113 			pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
 1114 			if (rd->skb) {
 1115 				dev_kfree_skb_any(rd->skb);
 1116 				rd->skb = NULL;
 1117 			}
 1118 			pr_debug("%s - dropping tx packet\n", __func__);
 1119 			ret = -VLSI_TX_DROP;
 1120 		}
 1121 		else
 1122 			ret = vlsi_process_tx(r, rd);
 1123 
 1124 		if (ret < 0) {
 1125 			ret = -ret;
 1126 			ndev->stats.tx_errors++;
 1127 			if (ret & VLSI_TX_DROP)
 1128 				ndev->stats.tx_dropped++;
 1129 			if (ret & VLSI_TX_FIFO)
 1130 				ndev->stats.tx_fifo_errors++;
 1131 		}
 1132 		else if (ret > 0){
 1133 			ndev->stats.tx_packets++;
 1134 			ndev->stats.tx_bytes += ret;
 1135 		}
 1136 	}
 1137 
 1138 }
 1139 
 1140 /********************************************************/
 1141 
 1142 static int vlsi_start_clock(struct pci_dev *pdev)
 1143 {
 1144 	u8	clkctl, lock;
 1145 	int	i, count;
 1146 
 1147 	if (clksrc < 2) { /* auto or PLL: try PLL */
 1148 		clkctl = CLKCTL_PD_INV | CLKCTL_CLKSTP;
 1149 		pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
 1150 
 1151 		/* procedure to detect PLL lock synchronisation:
 1152 		 * after 0.5 msec initial delay we expect to find 3 PLL lock
 1153 		 * indications within 10 msec for successful PLL detection.
 1154 		 */
 1155 		udelay(500);
 1156 		count = 0;
 1157 		for (i = 500; i <= 10000; i += 50) { /* max 10 msec */
 1158 			pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock);
 1159 			if (lock&CLKCTL_LOCK) {
 1160 				if (++count >= 3)
 1161 					break;
 1162 			}
 1163 			udelay(50);
 1164 		}
 1165 		if (count < 3) {
 1166 			if (clksrc == 1) { /* explicitly asked for PLL hence bail out */
 1167 				net_err_ratelimited("%s: no PLL or failed to lock!\n",
 1168 						    __func__);
 1169 				clkctl = CLKCTL_CLKSTP;
 1170 				pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
 1171 				return -1;
 1172 			}
 1173 			else			/* was: clksrc=0(auto) */
 1174 				clksrc = 3;	/* fallback to 40MHz XCLK (OB800) */
 1175 
 1176 			pr_debug("%s: PLL not locked, fallback to clksrc=%d\n",
 1177 				 __func__, clksrc);
 1178 		}
 1179 		else
 1180 			clksrc = 1;	/* got successful PLL lock */
 1181 	}
 1182 
 1183 	if (clksrc != 1) {
 1184 		/* we get here if either no PLL detected in auto-mode or
 1185 		   an external clock source was explicitly specified */
 1186 
 1187 		clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP;
 1188 		if (clksrc == 3)
 1189 			clkctl |= CLKCTL_XCKSEL;	
 1190 		pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
 1191 
 1192 		/* no way to test for working XCLK */
 1193 	}
 1194 	else
 1195 		pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
 1196 
 1197 	/* ok, now going to connect the chip with the clock source */
 1198 
 1199 	clkctl &= ~CLKCTL_CLKSTP;
 1200 	pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
 1201 
 1202 	return 0;
 1203 }
 1204 
 1205 static void vlsi_stop_clock(struct pci_dev *pdev)
 1206 {
 1207 	u8	clkctl;
 1208 
 1209 	/* disconnect chip from clock source */
 1210 	pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
 1211 	clkctl |= CLKCTL_CLKSTP;
 1212 	pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
 1213 
 1214 	/* disable all clock sources */
 1215 	clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_PD_INV);
 1216 	pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
 1217 }
 1218 
 1219 /********************************************************/
 1220 
 1221 /* writing all-zero to the VLSI PCI IO register area seems to prevent
 1222  * some occasional situations where the hardware fails (symptoms are 
 1223  * what appears as stalled tx/rx state machines, i.e. everything ok for
 1224  * receive or transmit but hw makes no progress or is unable to access
 1225  * the bus memory locations).
 1226  * Best place to call this is immediately after/before the internal clock
 1227  * gets started/stopped.
 1228  */
 1229 
 1230 static inline void vlsi_clear_regs(unsigned iobase)
 1231 {
 1232 	unsigned	i;
 1233 	const unsigned	chip_io_extent = 32;
 1234 
 1235 	for (i = 0; i < chip_io_extent; i += sizeof(u16))
 1236 		outw(0, iobase + i);
 1237 }
 1238 
 1239 static int vlsi_init_chip(struct pci_dev *pdev)
 1240 {
 1241 	struct net_device *ndev = pci_get_drvdata(pdev);
 1242 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 1243 	unsigned	iobase;
 1244 	u16 ptr;
 1245 
 1246 	/* start the clock and clean the registers */
 1247 
 1248 	if (vlsi_start_clock(pdev)) {
 1249 		net_err_ratelimited("%s: no valid clock source\n", __func__);
 1250 		return -1;
 1251 	}
 1252 	iobase = ndev->base_addr;
 1253 	vlsi_clear_regs(iobase);
 1254 
 1255 	outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */
 1256 
 1257 	outw(0, iobase+VLSI_PIO_IRENABLE);	/* disable IrPHY-interface */
 1258 
 1259 	/* disable everything, particularly IRCFG_MSTR - (also resetting the RING_PTR) */
 1260 
 1261 	outw(0, iobase+VLSI_PIO_IRCFG);
 1262 	wmb();
 1263 
 1264 	outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT);  /* max possible value=0x0fff */
 1265 
 1266 	outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE);
 1267 
 1268 	outw(TX_RX_TO_RINGSIZE(idev->tx_ring->size, idev->rx_ring->size),
 1269 		iobase+VLSI_PIO_RINGSIZE);	
 1270 
 1271 	ptr = inw(iobase+VLSI_PIO_RINGPTR);
 1272 	atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr));
 1273 	atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr));
 1274 	atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr));
 1275 	atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr));
 1276 
 1277 	vlsi_set_baud(idev, iobase);	/* idev->new_baud used as provided by caller */
 1278 
 1279 	outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR);	/* just in case - w/c pending IRQ's */
 1280 	wmb();
 1281 
 1282 	/* DO NOT BLINDLY ENABLE IRINTR_ACTEN!
 1283 	 * basically every received pulse fires an ACTIVITY-INT
 1284 	 * leading to >>1000 INT's per second instead of few 10
 1285 	 */
 1286 
 1287 	outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR);
 1288 
 1289 	return 0;
 1290 }
 1291 
 1292 static int vlsi_start_hw(vlsi_irda_dev_t *idev)
 1293 {
 1294 	struct pci_dev *pdev = idev->pdev;
 1295 	struct net_device *ndev = pci_get_drvdata(pdev);
 1296 	unsigned iobase = ndev->base_addr;
 1297 	u8 byte;
 1298 
 1299 	/* we don't use the legacy UART, disable its address decoding */
 1300 
 1301 	pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte);
 1302 	byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST);
 1303 	pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte);
 1304 
 1305 	/* enable PCI busmaster access to our 16MB page */
 1306 
 1307 	pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE);
 1308 	pci_set_master(pdev);
 1309 
 1310 	if (vlsi_init_chip(pdev) < 0) {
 1311 		pci_disable_device(pdev);
 1312 		return -1;
 1313 	}
 1314 
 1315 	vlsi_fill_rx(idev->rx_ring);
 1316 
 1317 	idev->last_rx = ktime_get();	/* first mtt may start from now on */
 1318 
 1319 	outw(0, iobase+VLSI_PIO_PROMPT);	/* kick hw state machine */
 1320 
 1321 	return 0;
 1322 }
 1323 
 1324 static int vlsi_stop_hw(vlsi_irda_dev_t *idev)
 1325 {
 1326 	struct pci_dev *pdev = idev->pdev;
 1327 	struct net_device *ndev = pci_get_drvdata(pdev);
 1328 	unsigned iobase = ndev->base_addr;
 1329 	unsigned long flags;
 1330 
 1331 	spin_lock_irqsave(&idev->lock,flags);
 1332 	outw(0, iobase+VLSI_PIO_IRENABLE);
 1333 	outw(0, iobase+VLSI_PIO_IRCFG);			/* disable everything */
 1334 
 1335 	/* disable and w/c irqs */
 1336 	outb(0, iobase+VLSI_PIO_IRINTR);
 1337 	wmb();
 1338 	outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR);
 1339 	spin_unlock_irqrestore(&idev->lock,flags);
 1340 
 1341 	vlsi_unarm_tx(idev);
 1342 	vlsi_unarm_rx(idev);
 1343 
 1344 	vlsi_clear_regs(iobase);
 1345 	vlsi_stop_clock(pdev);
 1346 
 1347 	pci_disable_device(pdev);
 1348 
 1349 	return 0;
 1350 }
 1351 
 1352 /**************************************************************/
 1353 
 1354 static void vlsi_tx_timeout(struct net_device *ndev)
 1355 {
 1356 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 1357 
 1358 
 1359 	vlsi_reg_debug(ndev->base_addr, __func__);
 1360 	vlsi_ring_debug(idev->tx_ring);
 1361 
 1362 	if (netif_running(ndev))
 1363 		netif_stop_queue(ndev);
 1364 
 1365 	vlsi_stop_hw(idev);
 1366 
 1367 	/* now simply restart the whole thing */
 1368 
 1369 	if (!idev->new_baud)
 1370 		idev->new_baud = idev->baud;		/* keep current baudrate */
 1371 
 1372 	if (vlsi_start_hw(idev))
 1373 		net_err_ratelimited("%s: failed to restart hw - %s(%s) unusable!\n",
 1374 				    __func__, pci_name(idev->pdev), ndev->name);
 1375 	else
 1376 		netif_start_queue(ndev);
 1377 }
 1378 
 1379 static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 1380 {
 1381 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 1382 	struct if_irda_req *irq = (struct if_irda_req *) rq;
 1383 	unsigned long flags;
 1384 	u16 fifocnt;
 1385 	int ret = 0;
 1386 
 1387 	switch (cmd) {
 1388 		case SIOCSBANDWIDTH:
 1389 			if (!capable(CAP_NET_ADMIN)) {
 1390 				ret = -EPERM;
 1391 				break;
 1392 			}
 1393 			spin_lock_irqsave(&idev->lock, flags);
 1394 			idev->new_baud = irq->ifr_baudrate;
 1395 			/* when called from userland there might be a minor race window here
 1396 			 * if the stack tries to change speed concurrently - which would be
 1397 			 * pretty strange anyway with the userland having full control...
 1398 			 */
 1399 			vlsi_set_baud(idev, ndev->base_addr);
 1400 			spin_unlock_irqrestore(&idev->lock, flags);
 1401 			break;
 1402 		case SIOCSMEDIABUSY:
 1403 			if (!capable(CAP_NET_ADMIN)) {
 1404 				ret = -EPERM;
 1405 				break;
 1406 			}
 1407 			irda_device_set_media_busy(ndev, TRUE);
 1408 			break;
 1409 		case SIOCGRECEIVING:
 1410 			/* the best we can do: check whether there are any bytes in rx fifo.
 1411 			 * The trustable window (in case some data arrives just afterwards)
 1412 			 * may be as short as 1usec or so at 4Mbps.
 1413 			 */
 1414 			fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
 1415 			irq->ifr_receiving = (fifocnt!=0) ? 1 : 0;
 1416 			break;
 1417 		default:
 1418 			net_warn_ratelimited("%s: notsupp - cmd=%04x\n",
 1419 					     __func__, cmd);
 1420 			ret = -EOPNOTSUPP;
 1421 	}	
 1422 	
 1423 	return ret;
 1424 }
 1425 
 1426 /********************************************************/
 1427 
 1428 static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)
 1429 {
 1430 	struct net_device *ndev = dev_instance;
 1431 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 1432 	unsigned	iobase;
 1433 	u8		irintr;
 1434 	int 		boguscount = 5;
 1435 	unsigned long	flags;
 1436 	int		handled = 0;
 1437 
 1438 	iobase = ndev->base_addr;
 1439 	spin_lock_irqsave(&idev->lock,flags);
 1440 	do {
 1441 		irintr = inb(iobase+VLSI_PIO_IRINTR);
 1442 		mb();
 1443 		outb(irintr, iobase+VLSI_PIO_IRINTR);	/* acknowledge asap */
 1444 
 1445 		if (!(irintr&=IRINTR_INT_MASK))		/* not our INT - probably shared */
 1446 			break;
 1447 
 1448 		handled = 1;
 1449 
 1450 		if (unlikely(!(irintr & ~IRINTR_ACTIVITY)))
 1451 			break;				/* nothing todo if only activity */
 1452 
 1453 		if (irintr&IRINTR_RPKTINT)
 1454 			vlsi_rx_interrupt(ndev);
 1455 
 1456 		if (irintr&IRINTR_TPKTINT)
 1457 			vlsi_tx_interrupt(ndev);
 1458 
 1459 	} while (--boguscount > 0);
 1460 	spin_unlock_irqrestore(&idev->lock,flags);
 1461 
 1462 	if (boguscount <= 0)
 1463 		net_info_ratelimited("%s: too much work in interrupt!\n",
 1464 				     __func__);
 1465 	return IRQ_RETVAL(handled);
 1466 }
 1467 
 1468 /********************************************************/
 1469 
 1470 static int vlsi_open(struct net_device *ndev)
 1471 {
 1472 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 1473 	int	err = -EAGAIN;
 1474 	char	hwname[32];
 1475 
 1476 	if (pci_request_regions(idev->pdev, drivername)) {
 1477 		net_warn_ratelimited("%s: io resource busy\n", __func__);
 1478 		goto errout;
 1479 	}
 1480 	ndev->base_addr = pci_resource_start(idev->pdev,0);
 1481 	ndev->irq = idev->pdev->irq;
 1482 
 1483 	/* under some rare occasions the chip apparently comes up with
 1484 	 * IRQ's pending. We better w/c pending IRQ and disable them all
 1485 	 */
 1486 
 1487 	outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR);
 1488 
 1489 	if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED,
 1490 			drivername, ndev)) {
 1491 		net_warn_ratelimited("%s: couldn't get IRQ: %d\n",
 1492 				     __func__, ndev->irq);
 1493 		goto errout_io;
 1494 	}
 1495 
 1496 	if ((err = vlsi_create_hwif(idev)) != 0)
 1497 		goto errout_irq;
 1498 
 1499 	sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr);
 1500 	idev->irlap = irlap_open(ndev,&idev->qos,hwname);
 1501 	if (!idev->irlap)
 1502 		goto errout_free_ring;
 1503 
 1504 	idev->last_rx = ktime_get();  /* first mtt may start from now on */
 1505 
 1506 	idev->new_baud = 9600;		/* start with IrPHY using 9600(SIR) mode */
 1507 
 1508 	if ((err = vlsi_start_hw(idev)) != 0)
 1509 		goto errout_close_irlap;
 1510 
 1511 	netif_start_queue(ndev);
 1512 
 1513 	net_info_ratelimited("%s: device %s operational\n",
 1514 			     __func__, ndev->name);
 1515 
 1516 	return 0;
 1517 
 1518 errout_close_irlap:
 1519 	irlap_close(idev->irlap);
 1520 errout_free_ring:
 1521 	vlsi_destroy_hwif(idev);
 1522 errout_irq:
 1523 	free_irq(ndev->irq,ndev);
 1524 errout_io:
 1525 	pci_release_regions(idev->pdev);
 1526 errout:
 1527 	return err;
 1528 }
 1529 
 1530 static int vlsi_close(struct net_device *ndev)
 1531 {
 1532 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 1533 
 1534 	netif_stop_queue(ndev);
 1535 
 1536 	if (idev->irlap)
 1537 		irlap_close(idev->irlap);
 1538 	idev->irlap = NULL;
 1539 
 1540 	vlsi_stop_hw(idev);
 1541 
 1542 	vlsi_destroy_hwif(idev);
 1543 
 1544 	free_irq(ndev->irq,ndev);
 1545 
 1546 	pci_release_regions(idev->pdev);
 1547 
 1548 	net_info_ratelimited("%s: device %s stopped\n", __func__, ndev->name);
 1549 
 1550 	return 0;
 1551 }
 1552 
 1553 static const struct net_device_ops vlsi_netdev_ops = {
 1554 	.ndo_open       = vlsi_open,
 1555 	.ndo_stop       = vlsi_close,
 1556 	.ndo_start_xmit = vlsi_hard_start_xmit,
 1557 	.ndo_do_ioctl   = vlsi_ioctl,
 1558 	.ndo_tx_timeout = vlsi_tx_timeout,
 1559 };
 1560 
 1561 static int vlsi_irda_init(struct net_device *ndev)
 1562 {
 1563 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 1564 	struct pci_dev *pdev = idev->pdev;
 1565 
 1566 	ndev->irq = pdev->irq;
 1567 	ndev->base_addr = pci_resource_start(pdev,0);
 1568 
 1569 	/* PCI busmastering
 1570 	 * see include file for details why we need these 2 masks, in this order!
 1571 	 */
 1572 
 1573 	if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) ||
 1574 	    pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) {
 1575 		net_err_ratelimited("%s: aborting due to PCI BM-DMA address limitations\n",
 1576 				    __func__);
 1577 		return -1;
 1578 	}
 1579 
 1580 	irda_init_max_qos_capabilies(&idev->qos);
 1581 
 1582 	/* the VLSI82C147 does not support 576000! */
 1583 
 1584 	idev->qos.baud_rate.bits = IR_2400 | IR_9600
 1585 		| IR_19200 | IR_38400 | IR_57600 | IR_115200
 1586 		| IR_1152000 | (IR_4000000 << 8);
 1587 
 1588 	idev->qos.min_turn_time.bits = qos_mtt_bits;
 1589 
 1590 	irda_qos_bits_to_value(&idev->qos);
 1591 
 1592 	/* currently no public media definitions for IrDA */
 1593 
 1594 	ndev->flags |= IFF_PORTSEL | IFF_AUTOMEDIA;
 1595 	ndev->if_port = IF_PORT_UNKNOWN;
 1596  
 1597 	ndev->netdev_ops = &vlsi_netdev_ops;
 1598 	ndev->watchdog_timeo  = 500*HZ/1000;	/* max. allowed turn time for IrLAP */
 1599 
 1600 	SET_NETDEV_DEV(ndev, &pdev->dev);
 1601 
 1602 	return 0;
 1603 }	
 1604 
 1605 /**************************************************************/
 1606 
 1607 static int
 1608 vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 1609 {
 1610 	struct net_device	*ndev;
 1611 	vlsi_irda_dev_t		*idev;
 1612 
 1613 	if (pci_enable_device(pdev))
 1614 		goto out;
 1615 	else
 1616 		pdev->current_state = 0; /* hw must be running now */
 1617 
 1618 	net_info_ratelimited("%s: IrDA PCI controller %s detected\n",
 1619 			     drivername, pci_name(pdev));
 1620 
 1621 	if ( !pci_resource_start(pdev,0) ||
 1622 	     !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) {
 1623 		net_err_ratelimited("%s: bar 0 invalid", __func__);
 1624 		goto out_disable;
 1625 	}
 1626 
 1627 	ndev = alloc_irdadev(sizeof(*idev));
 1628 	if (ndev==NULL) {
 1629 		net_err_ratelimited("%s: Unable to allocate device memory.\n",
 1630 				    __func__);
 1631 		goto out_disable;
 1632 	}
 1633 
 1634 	idev = netdev_priv(ndev);
 1635 
 1636 	spin_lock_init(&idev->lock);
 1637 	mutex_init(&idev->mtx);
 1638 	mutex_lock(&idev->mtx);
 1639 	idev->pdev = pdev;
 1640 
 1641 	if (vlsi_irda_init(ndev) < 0)
 1642 		goto out_freedev;
 1643 
 1644 	if (register_netdev(ndev) < 0) {
 1645 		net_err_ratelimited("%s: register_netdev failed\n", __func__);
 1646 		goto out_freedev;
 1647 	}
 1648 
 1649 	if (vlsi_proc_root != NULL) {
 1650 		struct proc_dir_entry *ent;
 1651 
 1652 		ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO,
 1653 				       vlsi_proc_root, VLSI_PROC_FOPS, ndev);
 1654 		if (!ent) {
 1655 			net_warn_ratelimited("%s: failed to create proc entry\n",
 1656 					     __func__);
 1657 		} else {
 1658 			proc_set_size(ent, 0);
 1659 		}
 1660 		idev->proc_entry = ent;
 1661 	}
 1662 	net_info_ratelimited("%s: registered device %s\n",
 1663 			     drivername, ndev->name);
 1664 
 1665 	pci_set_drvdata(pdev, ndev);
 1666 	mutex_unlock(&idev->mtx);
 1667 
 1668 	return 0;
 1669 
 1670 out_freedev:
 1671 	mutex_unlock(&idev->mtx);
 1672 	free_netdev(ndev);
 1673 out_disable:
 1674 	pci_disable_device(pdev);
 1675 out:
 1676 	return -ENODEV;
 1677 }
 1678 
 1679 static void vlsi_irda_remove(struct pci_dev *pdev)
 1680 {
 1681 	struct net_device *ndev = pci_get_drvdata(pdev);
 1682 	vlsi_irda_dev_t *idev;
 1683 
 1684 	if (!ndev) {
 1685 		net_err_ratelimited("%s: lost netdevice?\n", drivername);
 1686 		return;
 1687 	}
 1688 
 1689 	unregister_netdev(ndev);
 1690 
 1691 	idev = netdev_priv(ndev);
 1692 	mutex_lock(&idev->mtx);
 1693 	if (idev->proc_entry) {
 1694 		remove_proc_entry(ndev->name, vlsi_proc_root);
 1695 		idev->proc_entry = NULL;
 1696 	}
 1697 	mutex_unlock(&idev->mtx);
 1698 
 1699 	free_netdev(ndev);
 1700 
 1701 	net_info_ratelimited("%s: %s removed\n", drivername, pci_name(pdev));
 1702 }
 1703 
 1704 #ifdef CONFIG_PM
 1705 
 1706 /* The Controller doesn't provide PCI PM capabilities as defined by PCI specs.
 1707  * Some of the Linux PCI-PM code however depends on this, for example in
 1708  * pci_set_power_state(). So we have to take care to perform the required
 1709  * operations on our own (particularly reflecting the pdev->current_state)
 1710  * otherwise we might get cheated by pci-pm.
 1711  */
 1712 
 1713 
 1714 static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
 1715 {
 1716 	struct net_device *ndev = pci_get_drvdata(pdev);
 1717 	vlsi_irda_dev_t *idev;
 1718 
 1719 	if (!ndev) {
 1720 		net_err_ratelimited("%s - %s: no netdevice\n",
 1721 				    __func__, pci_name(pdev));
 1722 		return 0;
 1723 	}
 1724 	idev = netdev_priv(ndev);
 1725 	mutex_lock(&idev->mtx);
 1726 	if (pdev->current_state != 0) {			/* already suspended */
 1727 		if (state.event > pdev->current_state) {	/* simply go deeper */
 1728 			pci_set_power_state(pdev, pci_choose_state(pdev, state));
 1729 			pdev->current_state = state.event;
 1730 		}
 1731 		else
 1732 			net_err_ratelimited("%s - %s: invalid suspend request %u -> %u\n",
 1733 					    __func__, pci_name(pdev),
 1734 					    pdev->current_state, state.event);
 1735 		mutex_unlock(&idev->mtx);
 1736 		return 0;
 1737 	}
 1738 
 1739 	if (netif_running(ndev)) {
 1740 		netif_device_detach(ndev);
 1741 		vlsi_stop_hw(idev);
 1742 		pci_save_state(pdev);
 1743 		if (!idev->new_baud)
 1744 			/* remember speed settings to restore on resume */
 1745 			idev->new_baud = idev->baud;
 1746 	}
 1747 
 1748 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
 1749 	pdev->current_state = state.event;
 1750 	idev->resume_ok = 1;
 1751 	mutex_unlock(&idev->mtx);
 1752 	return 0;
 1753 }
 1754 
 1755 static int vlsi_irda_resume(struct pci_dev *pdev)
 1756 {
 1757 	struct net_device *ndev = pci_get_drvdata(pdev);
 1758 	vlsi_irda_dev_t	*idev;
 1759 
 1760 	if (!ndev) {
 1761 		net_err_ratelimited("%s - %s: no netdevice\n",
 1762 				    __func__, pci_name(pdev));
 1763 		return 0;
 1764 	}
 1765 	idev = netdev_priv(ndev);
 1766 	mutex_lock(&idev->mtx);
 1767 	if (pdev->current_state == 0) {
 1768 		mutex_unlock(&idev->mtx);
 1769 		net_warn_ratelimited("%s - %s: already resumed\n",
 1770 				     __func__, pci_name(pdev));
 1771 		return 0;
 1772 	}
 1773 	
 1774 	pci_set_power_state(pdev, PCI_D0);
 1775 	pdev->current_state = PM_EVENT_ON;
 1776 
 1777 	if (!idev->resume_ok) {
 1778 		/* should be obsolete now - but used to happen due to:
 1779 		 * - pci layer initially setting pdev->current_state = 4 (unknown)
 1780 		 * - pci layer did not walk the save_state-tree (might be APM problem)
 1781 		 *   so we could not refuse to suspend from undefined state
 1782 		 * - vlsi_irda_suspend detected invalid state and refused to save
 1783 		 *   configuration for resume - but was too late to stop suspending
 1784 		 * - vlsi_irda_resume got screwed when trying to resume from garbage
 1785 		 *
 1786 		 * now we explicitly set pdev->current_state = 0 after enabling the
 1787 		 * device and independently resume_ok should catch any garbage config.
 1788 		 */
 1789 		net_warn_ratelimited("%s - hm, nothing to resume?\n", __func__);
 1790 		mutex_unlock(&idev->mtx);
 1791 		return 0;
 1792 	}
 1793 
 1794 	if (netif_running(ndev)) {
 1795 		pci_restore_state(pdev);
 1796 		vlsi_start_hw(idev);
 1797 		netif_device_attach(ndev);
 1798 	}
 1799 	idev->resume_ok = 0;
 1800 	mutex_unlock(&idev->mtx);
 1801 	return 0;
 1802 }
 1803 
 1804 #endif /* CONFIG_PM */
 1805 
 1806 /*********************************************************/
 1807 
 1808 static struct pci_driver vlsi_irda_driver = {
 1809 	.name		= drivername,
 1810 	.id_table	= vlsi_irda_table,
 1811 	.probe		= vlsi_irda_probe,
 1812 	.remove		= vlsi_irda_remove,
 1813 #ifdef CONFIG_PM
 1814 	.suspend	= vlsi_irda_suspend,
 1815 	.resume		= vlsi_irda_resume,
 1816 #endif
 1817 };
 1818 
 1819 #define PROC_DIR ("driver/" DRIVER_NAME)
 1820 
 1821 static int __init vlsi_mod_init(void)
 1822 {
 1823 	int	i, ret;
 1824 
 1825 	if (clksrc < 0  ||  clksrc > 3) {
 1826 		net_err_ratelimited("%s: invalid clksrc=%d\n",
 1827 				    drivername, clksrc);
 1828 		return -1;
 1829 	}
 1830 
 1831 	for (i = 0; i < 2; i++) {
 1832 		switch(ringsize[i]) {
 1833 			case 4:
 1834 			case 8:
 1835 			case 16:
 1836 			case 32:
 1837 			case 64:
 1838 				break;
 1839 			default:
 1840 				net_warn_ratelimited("%s: invalid %s ringsize %d, using default=8\n",
 1841 						     drivername,
 1842 						     i ? "rx" : "tx",
 1843 						     ringsize[i]);
 1844 				ringsize[i] = 8;
 1845 				break;
 1846 		}
 1847 	} 
 1848 
 1849 	sirpulse = !!sirpulse;
 1850 
 1851 	/* proc_mkdir returns NULL if !CONFIG_PROC_FS.
 1852 	 * Failure to create the procfs entry is handled like running
 1853 	 * without procfs - it's not required for the driver to work.
 1854 	 */
 1855 	vlsi_proc_root = proc_mkdir(PROC_DIR, NULL);
 1856 
 1857 	ret = pci_register_driver(&vlsi_irda_driver);
 1858 
 1859 	if (ret && vlsi_proc_root)
 1860 		remove_proc_entry(PROC_DIR, NULL);
 1861 	return ret;
 1862 
 1863 }
 1864 
 1865 static void __exit vlsi_mod_exit(void)
 1866 {
 1867 	pci_unregister_driver(&vlsi_irda_driver);
 1868 	if (vlsi_proc_root)
 1869 		remove_proc_entry(PROC_DIR, NULL);
 1870 }
 1871 
 1872 module_init(vlsi_mod_init);
 1873 module_exit(vlsi_mod_exit);
 1874 
 1875 
 1876 
 1877 
 1878 
 1879 /* LDV_COMMENT_BEGIN_MAIN */
 1880 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
 1881 
 1882 /*###########################################################################*/
 1883 
 1884 /*############## Driver Environment Generator 0.2 output ####################*/
 1885 
 1886 /*###########################################################################*/
 1887 
 1888 
 1889 
 1890 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
 1891 void ldv_check_final_state(void);
 1892 
 1893 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
 1894 void ldv_check_return_value(int res);
 1895 
 1896 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
 1897 void ldv_check_return_value_probe(int res);
 1898 
 1899 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
 1900 void ldv_initialize(void);
 1901 
 1902 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
 1903 void ldv_handler_precall(void);
 1904 
 1905 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
 1906 int nondet_int(void);
 1907 
 1908 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
 1909 int LDV_IN_INTERRUPT;
 1910 
 1911 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
 1912 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
 1913 
 1914 
 1915 
 1916 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
 1917 	/*============================= VARIABLE DECLARATION PART   =============================*/
 1918 	/** STRUCT: struct type: file_operations, struct name: vlsi_proc_fops **/
 1919 	/* content: static int vlsi_seq_open(struct inode *inode, struct file *file)*/
 1920 	/* LDV_COMMENT_BEGIN_PREP */
 1921 	#define DRIVER_NAME 		"vlsi_ir"
 1922 	#define DRIVER_VERSION		"v0.5"
 1923 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 1924 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 1925 	#ifdef CONFIG_PROC_FS
 1926 	/* LDV_COMMENT_END_PREP */
 1927 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_seq_open" */
 1928 	struct inode * var_group1;
 1929 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_seq_open" */
 1930 	struct file * var_group2;
 1931 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vlsi_seq_open" */
 1932 	static int res_vlsi_seq_open_6;
 1933 	/* LDV_COMMENT_BEGIN_PREP */
 1934 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 1935 	#else	
 1936 	#define VLSI_PROC_FOPS		NULL
 1937 	#endif
 1938 	#ifdef CONFIG_PM
 1939 	#endif 
 1940 	#ifdef CONFIG_PM
 1941 	#endif
 1942 	#define PROC_DIR ("driver/" DRIVER_NAME)
 1943 	/* LDV_COMMENT_END_PREP */
 1944 
 1945 	/** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/
 1946 	/* content: static int vlsi_open(struct net_device *ndev)*/
 1947 	/* LDV_COMMENT_BEGIN_PREP */
 1948 	#define DRIVER_NAME 		"vlsi_ir"
 1949 	#define DRIVER_VERSION		"v0.5"
 1950 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 1951 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 1952 	#ifdef CONFIG_PROC_FS
 1953 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 1954 	#else	
 1955 	#define VLSI_PROC_FOPS		NULL
 1956 	#endif
 1957 	/* LDV_COMMENT_END_PREP */
 1958 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_open" */
 1959 	struct net_device * var_group3;
 1960 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vlsi_open" */
 1961 	static int res_vlsi_open_29;
 1962 	/* LDV_COMMENT_BEGIN_PREP */
 1963 	#ifdef CONFIG_PM
 1964 	#endif 
 1965 	#ifdef CONFIG_PM
 1966 	#endif
 1967 	#define PROC_DIR ("driver/" DRIVER_NAME)
 1968 	/* LDV_COMMENT_END_PREP */
 1969 	/* content: static int vlsi_close(struct net_device *ndev)*/
 1970 	/* LDV_COMMENT_BEGIN_PREP */
 1971 	#define DRIVER_NAME 		"vlsi_ir"
 1972 	#define DRIVER_VERSION		"v0.5"
 1973 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 1974 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 1975 	#ifdef CONFIG_PROC_FS
 1976 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 1977 	#else	
 1978 	#define VLSI_PROC_FOPS		NULL
 1979 	#endif
 1980 	/* LDV_COMMENT_END_PREP */
 1981 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vlsi_close" */
 1982 	static int res_vlsi_close_30;
 1983 	/* LDV_COMMENT_BEGIN_PREP */
 1984 	#ifdef CONFIG_PM
 1985 	#endif 
 1986 	#ifdef CONFIG_PM
 1987 	#endif
 1988 	#define PROC_DIR ("driver/" DRIVER_NAME)
 1989 	/* LDV_COMMENT_END_PREP */
 1990 	/* content: static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)*/
 1991 	/* LDV_COMMENT_BEGIN_PREP */
 1992 	#define DRIVER_NAME 		"vlsi_ir"
 1993 	#define DRIVER_VERSION		"v0.5"
 1994 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 1995 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 1996 	#ifdef CONFIG_PROC_FS
 1997 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 1998 	#else	
 1999 	#define VLSI_PROC_FOPS		NULL
 2000 	#endif
 2001 	/* LDV_COMMENT_END_PREP */
 2002 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_hard_start_xmit" */
 2003 	struct sk_buff * var_group4;
 2004 	/* LDV_COMMENT_BEGIN_PREP */
 2005 	#ifdef CONFIG_PM
 2006 	#endif 
 2007 	#ifdef CONFIG_PM
 2008 	#endif
 2009 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2010 	/* LDV_COMMENT_END_PREP */
 2011 	/* content: static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)*/
 2012 	/* LDV_COMMENT_BEGIN_PREP */
 2013 	#define DRIVER_NAME 		"vlsi_ir"
 2014 	#define DRIVER_VERSION		"v0.5"
 2015 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2016 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2017 	#ifdef CONFIG_PROC_FS
 2018 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2019 	#else	
 2020 	#define VLSI_PROC_FOPS		NULL
 2021 	#endif
 2022 	/* LDV_COMMENT_END_PREP */
 2023 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_ioctl" */
 2024 	struct ifreq * var_group5;
 2025 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_ioctl" */
 2026 	int  var_vlsi_ioctl_27_p2;
 2027 	/* LDV_COMMENT_BEGIN_PREP */
 2028 	#ifdef CONFIG_PM
 2029 	#endif 
 2030 	#ifdef CONFIG_PM
 2031 	#endif
 2032 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2033 	/* LDV_COMMENT_END_PREP */
 2034 	/* content: static void vlsi_tx_timeout(struct net_device *ndev)*/
 2035 	/* LDV_COMMENT_BEGIN_PREP */
 2036 	#define DRIVER_NAME 		"vlsi_ir"
 2037 	#define DRIVER_VERSION		"v0.5"
 2038 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2039 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2040 	#ifdef CONFIG_PROC_FS
 2041 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2042 	#else	
 2043 	#define VLSI_PROC_FOPS		NULL
 2044 	#endif
 2045 	/* LDV_COMMENT_END_PREP */
 2046 	/* LDV_COMMENT_BEGIN_PREP */
 2047 	#ifdef CONFIG_PM
 2048 	#endif 
 2049 	#ifdef CONFIG_PM
 2050 	#endif
 2051 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2052 	/* LDV_COMMENT_END_PREP */
 2053 
 2054 	/** STRUCT: struct type: pci_driver, struct name: vlsi_irda_driver **/
 2055 	/* content: static int vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)*/
 2056 	/* LDV_COMMENT_BEGIN_PREP */
 2057 	#define DRIVER_NAME 		"vlsi_ir"
 2058 	#define DRIVER_VERSION		"v0.5"
 2059 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2060 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2061 	#ifdef CONFIG_PROC_FS
 2062 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2063 	#else	
 2064 	#define VLSI_PROC_FOPS		NULL
 2065 	#endif
 2066 	/* LDV_COMMENT_END_PREP */
 2067 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_irda_probe" */
 2068 	struct pci_dev * var_group6;
 2069 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_irda_probe" */
 2070 	const struct pci_device_id * var_vlsi_irda_probe_32_p1;
 2071 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vlsi_irda_probe" */
 2072 	static int res_vlsi_irda_probe_32;
 2073 	/* LDV_COMMENT_BEGIN_PREP */
 2074 	#ifdef CONFIG_PM
 2075 	#endif 
 2076 	#ifdef CONFIG_PM
 2077 	#endif
 2078 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2079 	/* LDV_COMMENT_END_PREP */
 2080 	/* content: static void vlsi_irda_remove(struct pci_dev *pdev)*/
 2081 	/* LDV_COMMENT_BEGIN_PREP */
 2082 	#define DRIVER_NAME 		"vlsi_ir"
 2083 	#define DRIVER_VERSION		"v0.5"
 2084 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2085 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2086 	#ifdef CONFIG_PROC_FS
 2087 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2088 	#else	
 2089 	#define VLSI_PROC_FOPS		NULL
 2090 	#endif
 2091 	/* LDV_COMMENT_END_PREP */
 2092 	/* LDV_COMMENT_BEGIN_PREP */
 2093 	#ifdef CONFIG_PM
 2094 	#endif 
 2095 	#ifdef CONFIG_PM
 2096 	#endif
 2097 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2098 	/* LDV_COMMENT_END_PREP */
 2099 	/* content: static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)*/
 2100 	/* LDV_COMMENT_BEGIN_PREP */
 2101 	#define DRIVER_NAME 		"vlsi_ir"
 2102 	#define DRIVER_VERSION		"v0.5"
 2103 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2104 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2105 	#ifdef CONFIG_PROC_FS
 2106 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2107 	#else	
 2108 	#define VLSI_PROC_FOPS		NULL
 2109 	#endif
 2110 	#ifdef CONFIG_PM
 2111 	/* LDV_COMMENT_END_PREP */
 2112 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_irda_suspend" */
 2113 	pm_message_t  var_vlsi_irda_suspend_34_p1;
 2114 	/* LDV_COMMENT_BEGIN_PREP */
 2115 	#endif 
 2116 	#ifdef CONFIG_PM
 2117 	#endif
 2118 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2119 	/* LDV_COMMENT_END_PREP */
 2120 	/* content: static int vlsi_irda_resume(struct pci_dev *pdev)*/
 2121 	/* LDV_COMMENT_BEGIN_PREP */
 2122 	#define DRIVER_NAME 		"vlsi_ir"
 2123 	#define DRIVER_VERSION		"v0.5"
 2124 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2125 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2126 	#ifdef CONFIG_PROC_FS
 2127 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2128 	#else	
 2129 	#define VLSI_PROC_FOPS		NULL
 2130 	#endif
 2131 	#ifdef CONFIG_PM
 2132 	/* LDV_COMMENT_END_PREP */
 2133 	/* LDV_COMMENT_BEGIN_PREP */
 2134 	#endif 
 2135 	#ifdef CONFIG_PM
 2136 	#endif
 2137 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2138 	/* LDV_COMMENT_END_PREP */
 2139 
 2140 	/** CALLBACK SECTION request_irq **/
 2141 	/* content: static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)*/
 2142 	/* LDV_COMMENT_BEGIN_PREP */
 2143 	#define DRIVER_NAME 		"vlsi_ir"
 2144 	#define DRIVER_VERSION		"v0.5"
 2145 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2146 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2147 	#ifdef CONFIG_PROC_FS
 2148 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2149 	#else	
 2150 	#define VLSI_PROC_FOPS		NULL
 2151 	#endif
 2152 	/* LDV_COMMENT_END_PREP */
 2153 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_interrupt" */
 2154 	int  var_vlsi_interrupt_28_p0;
 2155 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_interrupt" */
 2156 	void * var_vlsi_interrupt_28_p1;
 2157 	/* LDV_COMMENT_BEGIN_PREP */
 2158 	#ifdef CONFIG_PM
 2159 	#endif 
 2160 	#ifdef CONFIG_PM
 2161 	#endif
 2162 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2163 	/* LDV_COMMENT_END_PREP */
 2164 
 2165 
 2166 
 2167 
 2168 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 2169 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 2170 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 2171 	LDV_IN_INTERRUPT=1;
 2172 
 2173 
 2174 
 2175 
 2176 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 2177 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 2178 	/*============================= FUNCTION CALL SECTION       =============================*/
 2179 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 2180 	ldv_initialize();
 2181 
 2182 	/** INIT: init_type: ST_MODULE_INIT **/
 2183 	/* content: static int __init vlsi_mod_init(void)*/
 2184 	/* LDV_COMMENT_BEGIN_PREP */
 2185 	#define DRIVER_NAME 		"vlsi_ir"
 2186 	#define DRIVER_VERSION		"v0.5"
 2187 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2188 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2189 	#ifdef CONFIG_PROC_FS
 2190 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2191 	#else	
 2192 	#define VLSI_PROC_FOPS		NULL
 2193 	#endif
 2194 	#ifdef CONFIG_PM
 2195 	#endif 
 2196 	#ifdef CONFIG_PM
 2197 	#endif
 2198 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2199 	/* LDV_COMMENT_END_PREP */
 2200 	/* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
 2201 	ldv_handler_precall();
 2202 	 if(vlsi_mod_init()) 
 2203 		goto ldv_final;
 2204 	int ldv_s_vlsi_proc_fops_file_operations = 0;
 2205 
 2206 	int ldv_s_vlsi_netdev_ops_net_device_ops = 0;
 2207 	
 2208 
 2209 	int ldv_s_vlsi_irda_driver_pci_driver = 0;
 2210 	
 2211 
 2212 	
 2213 
 2214 
 2215 	while(  nondet_int()
 2216 		|| !(ldv_s_vlsi_proc_fops_file_operations == 0)
 2217 		|| !(ldv_s_vlsi_netdev_ops_net_device_ops == 0)
 2218 		|| !(ldv_s_vlsi_irda_driver_pci_driver == 0)
 2219 	) {
 2220 
 2221 		switch(nondet_int()) {
 2222 
 2223 			case 0: {
 2224 
 2225 				/** STRUCT: struct type: file_operations, struct name: vlsi_proc_fops **/
 2226 				if(ldv_s_vlsi_proc_fops_file_operations==0) {
 2227 
 2228 				/* content: static int vlsi_seq_open(struct inode *inode, struct file *file)*/
 2229 				/* LDV_COMMENT_BEGIN_PREP */
 2230 				#define DRIVER_NAME 		"vlsi_ir"
 2231 				#define DRIVER_VERSION		"v0.5"
 2232 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2233 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2234 				#ifdef CONFIG_PROC_FS
 2235 				/* LDV_COMMENT_END_PREP */
 2236 				/* LDV_COMMENT_FUNCTION_CALL Function from field "open" from driver structure with callbacks "vlsi_proc_fops". Standart function test for correct return result. */
 2237 				ldv_handler_precall();
 2238 				res_vlsi_seq_open_6 = vlsi_seq_open( var_group1, var_group2);
 2239 				 ldv_check_return_value(res_vlsi_seq_open_6);
 2240 				 if(res_vlsi_seq_open_6) 
 2241 					goto ldv_module_exit;
 2242 				/* LDV_COMMENT_BEGIN_PREP */
 2243 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2244 				#else	
 2245 				#define VLSI_PROC_FOPS		NULL
 2246 				#endif
 2247 				#ifdef CONFIG_PM
 2248 				#endif 
 2249 				#ifdef CONFIG_PM
 2250 				#endif
 2251 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2252 				/* LDV_COMMENT_END_PREP */
 2253 				ldv_s_vlsi_proc_fops_file_operations=0;
 2254 
 2255 				}
 2256 
 2257 			}
 2258 
 2259 			break;
 2260 			case 1: {
 2261 
 2262 				/** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/
 2263 				if(ldv_s_vlsi_netdev_ops_net_device_ops==0) {
 2264 
 2265 				/* content: static int vlsi_open(struct net_device *ndev)*/
 2266 				/* LDV_COMMENT_BEGIN_PREP */
 2267 				#define DRIVER_NAME 		"vlsi_ir"
 2268 				#define DRIVER_VERSION		"v0.5"
 2269 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2270 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2271 				#ifdef CONFIG_PROC_FS
 2272 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2273 				#else	
 2274 				#define VLSI_PROC_FOPS		NULL
 2275 				#endif
 2276 				/* LDV_COMMENT_END_PREP */
 2277 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "vlsi_netdev_ops". Standart function test for correct return result. */
 2278 				ldv_handler_precall();
 2279 				res_vlsi_open_29 = vlsi_open( var_group3);
 2280 				 ldv_check_return_value(res_vlsi_open_29);
 2281 				 if(res_vlsi_open_29 < 0) 
 2282 					goto ldv_module_exit;
 2283 				/* LDV_COMMENT_BEGIN_PREP */
 2284 				#ifdef CONFIG_PM
 2285 				#endif 
 2286 				#ifdef CONFIG_PM
 2287 				#endif
 2288 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2289 				/* LDV_COMMENT_END_PREP */
 2290 				ldv_s_vlsi_netdev_ops_net_device_ops++;
 2291 
 2292 				}
 2293 
 2294 			}
 2295 
 2296 			break;
 2297 			case 2: {
 2298 
 2299 				/** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/
 2300 				if(ldv_s_vlsi_netdev_ops_net_device_ops==1) {
 2301 
 2302 				/* content: static int vlsi_close(struct net_device *ndev)*/
 2303 				/* LDV_COMMENT_BEGIN_PREP */
 2304 				#define DRIVER_NAME 		"vlsi_ir"
 2305 				#define DRIVER_VERSION		"v0.5"
 2306 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2307 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2308 				#ifdef CONFIG_PROC_FS
 2309 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2310 				#else	
 2311 				#define VLSI_PROC_FOPS		NULL
 2312 				#endif
 2313 				/* LDV_COMMENT_END_PREP */
 2314 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "vlsi_netdev_ops". Standart function test for correct return result. */
 2315 				ldv_handler_precall();
 2316 				res_vlsi_close_30 = vlsi_close( var_group3);
 2317 				 ldv_check_return_value(res_vlsi_close_30);
 2318 				 if(res_vlsi_close_30) 
 2319 					goto ldv_module_exit;
 2320 				/* LDV_COMMENT_BEGIN_PREP */
 2321 				#ifdef CONFIG_PM
 2322 				#endif 
 2323 				#ifdef CONFIG_PM
 2324 				#endif
 2325 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2326 				/* LDV_COMMENT_END_PREP */
 2327 				ldv_s_vlsi_netdev_ops_net_device_ops=0;
 2328 
 2329 				}
 2330 
 2331 			}
 2332 
 2333 			break;
 2334 			case 3: {
 2335 
 2336 				/** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/
 2337 				
 2338 
 2339 				/* content: static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)*/
 2340 				/* LDV_COMMENT_BEGIN_PREP */
 2341 				#define DRIVER_NAME 		"vlsi_ir"
 2342 				#define DRIVER_VERSION		"v0.5"
 2343 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2344 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2345 				#ifdef CONFIG_PROC_FS
 2346 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2347 				#else	
 2348 				#define VLSI_PROC_FOPS		NULL
 2349 				#endif
 2350 				/* LDV_COMMENT_END_PREP */
 2351 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_start_xmit" from driver structure with callbacks "vlsi_netdev_ops" */
 2352 				ldv_handler_precall();
 2353 				vlsi_hard_start_xmit( var_group4, var_group3);
 2354 				/* LDV_COMMENT_BEGIN_PREP */
 2355 				#ifdef CONFIG_PM
 2356 				#endif 
 2357 				#ifdef CONFIG_PM
 2358 				#endif
 2359 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2360 				/* LDV_COMMENT_END_PREP */
 2361 				
 2362 
 2363 				
 2364 
 2365 			}
 2366 
 2367 			break;
 2368 			case 4: {
 2369 
 2370 				/** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/
 2371 				
 2372 
 2373 				/* content: static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)*/
 2374 				/* LDV_COMMENT_BEGIN_PREP */
 2375 				#define DRIVER_NAME 		"vlsi_ir"
 2376 				#define DRIVER_VERSION		"v0.5"
 2377 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2378 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2379 				#ifdef CONFIG_PROC_FS
 2380 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2381 				#else	
 2382 				#define VLSI_PROC_FOPS		NULL
 2383 				#endif
 2384 				/* LDV_COMMENT_END_PREP */
 2385 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_do_ioctl" from driver structure with callbacks "vlsi_netdev_ops" */
 2386 				ldv_handler_precall();
 2387 				vlsi_ioctl( var_group3, var_group5, var_vlsi_ioctl_27_p2);
 2388 				/* LDV_COMMENT_BEGIN_PREP */
 2389 				#ifdef CONFIG_PM
 2390 				#endif 
 2391 				#ifdef CONFIG_PM
 2392 				#endif
 2393 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2394 				/* LDV_COMMENT_END_PREP */
 2395 				
 2396 
 2397 				
 2398 
 2399 			}
 2400 
 2401 			break;
 2402 			case 5: {
 2403 
 2404 				/** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/
 2405 				
 2406 
 2407 				/* content: static void vlsi_tx_timeout(struct net_device *ndev)*/
 2408 				/* LDV_COMMENT_BEGIN_PREP */
 2409 				#define DRIVER_NAME 		"vlsi_ir"
 2410 				#define DRIVER_VERSION		"v0.5"
 2411 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2412 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2413 				#ifdef CONFIG_PROC_FS
 2414 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2415 				#else	
 2416 				#define VLSI_PROC_FOPS		NULL
 2417 				#endif
 2418 				/* LDV_COMMENT_END_PREP */
 2419 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_tx_timeout" from driver structure with callbacks "vlsi_netdev_ops" */
 2420 				ldv_handler_precall();
 2421 				vlsi_tx_timeout( var_group3);
 2422 				/* LDV_COMMENT_BEGIN_PREP */
 2423 				#ifdef CONFIG_PM
 2424 				#endif 
 2425 				#ifdef CONFIG_PM
 2426 				#endif
 2427 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2428 				/* LDV_COMMENT_END_PREP */
 2429 				
 2430 
 2431 				
 2432 
 2433 			}
 2434 
 2435 			break;
 2436 			case 6: {
 2437 
 2438 				/** STRUCT: struct type: pci_driver, struct name: vlsi_irda_driver **/
 2439 				if(ldv_s_vlsi_irda_driver_pci_driver==0) {
 2440 
 2441 				/* content: static int vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)*/
 2442 				/* LDV_COMMENT_BEGIN_PREP */
 2443 				#define DRIVER_NAME 		"vlsi_ir"
 2444 				#define DRIVER_VERSION		"v0.5"
 2445 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2446 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2447 				#ifdef CONFIG_PROC_FS
 2448 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2449 				#else	
 2450 				#define VLSI_PROC_FOPS		NULL
 2451 				#endif
 2452 				/* LDV_COMMENT_END_PREP */
 2453 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "vlsi_irda_driver". Standart function test for correct return result. */
 2454 				res_vlsi_irda_probe_32 = vlsi_irda_probe( var_group6, var_vlsi_irda_probe_32_p1);
 2455 				 ldv_check_return_value(res_vlsi_irda_probe_32);
 2456 				 ldv_check_return_value_probe(res_vlsi_irda_probe_32);
 2457 				 if(res_vlsi_irda_probe_32) 
 2458 					goto ldv_module_exit;
 2459 				/* LDV_COMMENT_BEGIN_PREP */
 2460 				#ifdef CONFIG_PM
 2461 				#endif 
 2462 				#ifdef CONFIG_PM
 2463 				#endif
 2464 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2465 				/* LDV_COMMENT_END_PREP */
 2466 				ldv_s_vlsi_irda_driver_pci_driver++;
 2467 
 2468 				}
 2469 
 2470 			}
 2471 
 2472 			break;
 2473 			case 7: {
 2474 
 2475 				/** STRUCT: struct type: pci_driver, struct name: vlsi_irda_driver **/
 2476 				if(ldv_s_vlsi_irda_driver_pci_driver==1) {
 2477 
 2478 				/* content: static void vlsi_irda_remove(struct pci_dev *pdev)*/
 2479 				/* LDV_COMMENT_BEGIN_PREP */
 2480 				#define DRIVER_NAME 		"vlsi_ir"
 2481 				#define DRIVER_VERSION		"v0.5"
 2482 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2483 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2484 				#ifdef CONFIG_PROC_FS
 2485 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2486 				#else	
 2487 				#define VLSI_PROC_FOPS		NULL
 2488 				#endif
 2489 				/* LDV_COMMENT_END_PREP */
 2490 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "vlsi_irda_driver" */
 2491 				ldv_handler_precall();
 2492 				vlsi_irda_remove( var_group6);
 2493 				/* LDV_COMMENT_BEGIN_PREP */
 2494 				#ifdef CONFIG_PM
 2495 				#endif 
 2496 				#ifdef CONFIG_PM
 2497 				#endif
 2498 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2499 				/* LDV_COMMENT_END_PREP */
 2500 				ldv_s_vlsi_irda_driver_pci_driver=0;
 2501 
 2502 				}
 2503 
 2504 			}
 2505 
 2506 			break;
 2507 			case 8: {
 2508 
 2509 				/** STRUCT: struct type: pci_driver, struct name: vlsi_irda_driver **/
 2510 				
 2511 
 2512 				/* content: static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)*/
 2513 				/* LDV_COMMENT_BEGIN_PREP */
 2514 				#define DRIVER_NAME 		"vlsi_ir"
 2515 				#define DRIVER_VERSION		"v0.5"
 2516 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2517 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2518 				#ifdef CONFIG_PROC_FS
 2519 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2520 				#else	
 2521 				#define VLSI_PROC_FOPS		NULL
 2522 				#endif
 2523 				#ifdef CONFIG_PM
 2524 				/* LDV_COMMENT_END_PREP */
 2525 				/* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "vlsi_irda_driver" */
 2526 				ldv_handler_precall();
 2527 				vlsi_irda_suspend( var_group6, var_vlsi_irda_suspend_34_p1);
 2528 				/* LDV_COMMENT_BEGIN_PREP */
 2529 				#endif 
 2530 				#ifdef CONFIG_PM
 2531 				#endif
 2532 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2533 				/* LDV_COMMENT_END_PREP */
 2534 				
 2535 
 2536 				
 2537 
 2538 			}
 2539 
 2540 			break;
 2541 			case 9: {
 2542 
 2543 				/** STRUCT: struct type: pci_driver, struct name: vlsi_irda_driver **/
 2544 				
 2545 
 2546 				/* content: static int vlsi_irda_resume(struct pci_dev *pdev)*/
 2547 				/* LDV_COMMENT_BEGIN_PREP */
 2548 				#define DRIVER_NAME 		"vlsi_ir"
 2549 				#define DRIVER_VERSION		"v0.5"
 2550 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2551 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2552 				#ifdef CONFIG_PROC_FS
 2553 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2554 				#else	
 2555 				#define VLSI_PROC_FOPS		NULL
 2556 				#endif
 2557 				#ifdef CONFIG_PM
 2558 				/* LDV_COMMENT_END_PREP */
 2559 				/* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "vlsi_irda_driver" */
 2560 				ldv_handler_precall();
 2561 				vlsi_irda_resume( var_group6);
 2562 				/* LDV_COMMENT_BEGIN_PREP */
 2563 				#endif 
 2564 				#ifdef CONFIG_PM
 2565 				#endif
 2566 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2567 				/* LDV_COMMENT_END_PREP */
 2568 				
 2569 
 2570 				
 2571 
 2572 			}
 2573 
 2574 			break;
 2575 			case 10: {
 2576 
 2577 				/** CALLBACK SECTION request_irq **/
 2578 				LDV_IN_INTERRUPT=2;
 2579 
 2580 				/* content: static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)*/
 2581 				/* LDV_COMMENT_BEGIN_PREP */
 2582 				#define DRIVER_NAME 		"vlsi_ir"
 2583 				#define DRIVER_VERSION		"v0.5"
 2584 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2585 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2586 				#ifdef CONFIG_PROC_FS
 2587 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2588 				#else	
 2589 				#define VLSI_PROC_FOPS		NULL
 2590 				#endif
 2591 				/* LDV_COMMENT_END_PREP */
 2592 				/* LDV_COMMENT_FUNCTION_CALL */
 2593 				ldv_handler_precall();
 2594 				vlsi_interrupt( var_vlsi_interrupt_28_p0, var_vlsi_interrupt_28_p1);
 2595 				/* LDV_COMMENT_BEGIN_PREP */
 2596 				#ifdef CONFIG_PM
 2597 				#endif 
 2598 				#ifdef CONFIG_PM
 2599 				#endif
 2600 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2601 				/* LDV_COMMENT_END_PREP */
 2602 				LDV_IN_INTERRUPT=1;
 2603 
 2604 				
 2605 
 2606 			}
 2607 
 2608 			break;
 2609 			default: break;
 2610 
 2611 		}
 2612 
 2613 	}
 2614 
 2615 	ldv_module_exit: 
 2616 
 2617 	/** INIT: init_type: ST_MODULE_EXIT **/
 2618 	/* content: static void __exit vlsi_mod_exit(void)*/
 2619 	/* LDV_COMMENT_BEGIN_PREP */
 2620 	#define DRIVER_NAME 		"vlsi_ir"
 2621 	#define DRIVER_VERSION		"v0.5"
 2622 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2623 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2624 	#ifdef CONFIG_PROC_FS
 2625 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2626 	#else	
 2627 	#define VLSI_PROC_FOPS		NULL
 2628 	#endif
 2629 	#ifdef CONFIG_PM
 2630 	#endif 
 2631 	#ifdef CONFIG_PM
 2632 	#endif
 2633 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2634 	/* LDV_COMMENT_END_PREP */
 2635 	/* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
 2636 	ldv_handler_precall();
 2637 	vlsi_mod_exit();
 2638 
 2639 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 2640 	ldv_final: ldv_check_final_state();
 2641 
 2642 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 2643 	return;
 2644 
 2645 }
 2646 #endif
 2647 
 2648 /* LDV_COMMENT_END_MAIN */                 1 
    2 #include <linux/kernel.h>
    3 bool ldv_is_err(const void *ptr);
    4 bool ldv_is_err_or_null(const void *ptr);
    5 void* ldv_err_ptr(long error);
    6 long ldv_ptr_err(const void *ptr);
    7 
    8 extern void ldv_dma_map_page(void);
    9 extern void ldv_dma_mapping_error(void);
   10 #line 1 "/home/vitaly/ldv-launches/work/current--X--drivers--X--defaultlinux-4.11-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.11-rc1.tar.xz/csd_deg_dscv/4878/dscv_tempdir/dscv/ri/331_1a/drivers/net/irda/vlsi_ir.c"
   11 
   12 /*********************************************************************
   13  *
   14  *	vlsi_ir.c:	VLSI82C147 PCI IrDA controller driver for Linux
   15  *
   16  *	Copyright (c) 2001-2003 Martin Diehl
   17  *
   18  *	This program is free software; you can redistribute it and/or 
   19  *	modify it under the terms of the GNU General Public License as 
   20  *	published by the Free Software Foundation; either version 2 of 
   21  *	the License, or (at your option) any later version.
   22  *
   23  *	This program is distributed in the hope that it will be useful,
   24  *	but WITHOUT ANY WARRANTY; without even the implied warranty of
   25  *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
   26  *	GNU General Public License for more details.
   27  *
   28  *	You should have received a copy of the GNU General Public License 
   29  *	along with this program; if not, see <http://www.gnu.org/licenses/>.
   30  *
   31  ********************************************************************/
   32 
   33 #include <linux/module.h>
   34  
   35 #define DRIVER_NAME 		"vlsi_ir"
   36 #define DRIVER_VERSION		"v0.5"
   37 #define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
   38 #define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
   39 
   40 MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
   41 MODULE_AUTHOR(DRIVER_AUTHOR);
   42 MODULE_LICENSE("GPL");
   43 
   44 /********************************************************/
   45 
   46 #include <linux/kernel.h>
   47 #include <linux/ktime.h>
   48 #include <linux/init.h>
   49 #include <linux/interrupt.h>
   50 #include <linux/pci.h>
   51 #include <linux/slab.h>
   52 #include <linux/netdevice.h>
   53 #include <linux/skbuff.h>
   54 #include <linux/delay.h>
   55 #include <linux/proc_fs.h>
   56 #include <linux/seq_file.h>
   57 #include <linux/math64.h>
   58 #include <linux/mutex.h>
   59 #include <linux/uaccess.h>
   60 #include <asm/byteorder.h>
   61 
   62 #include <net/irda/irda.h>
   63 #include <net/irda/irda_device.h>
   64 #include <net/irda/wrapper.h>
   65 #include <net/irda/crc.h>
   66 
   67 #include "vlsi_ir.h"
   68 
   69 /********************************************************/
   70 
   71 static /* const */ char drivername[] = DRIVER_NAME;
   72 
   73 static const struct pci_device_id vlsi_irda_table[] = {
   74 	{
   75 		.class =        PCI_CLASS_WIRELESS_IRDA << 8,
   76 		.class_mask =	PCI_CLASS_SUBCLASS_MASK << 8, 
   77 		.vendor =       PCI_VENDOR_ID_VLSI,
   78 		.device =       PCI_DEVICE_ID_VLSI_82C147,
   79 		.subvendor = 	PCI_ANY_ID,
   80 		.subdevice =	PCI_ANY_ID,
   81 	},
   82 	{ /* all zeroes */ }
   83 };
   84 
   85 MODULE_DEVICE_TABLE(pci, vlsi_irda_table);
   86 
   87 /********************************************************/
   88 
   89 /*	clksrc: which clock source to be used
   90  *		0: auto - try PLL, fallback to 40MHz XCLK
   91  *		1: on-chip 48MHz PLL
   92  *		2: external 48MHz XCLK
   93  *		3: external 40MHz XCLK (HP OB-800)
   94  */
   95 
   96 static int clksrc = 0;			/* default is 0(auto) */
   97 module_param(clksrc, int, 0);
   98 MODULE_PARM_DESC(clksrc, "clock input source selection");
   99 
  100 /*	ringsize: size of the tx and rx descriptor rings
  101  *		independent for tx and rx
  102  *		specify as ringsize=tx[,rx]
  103  *		allowed values: 4, 8, 16, 32, 64
  104  *		Due to the IrDA 1.x max. allowed window size=7,
  105  *		there should be no gain when using rings larger than 8
  106  */
  107 
  108 static int ringsize[] = {8,8};		/* default is tx=8 / rx=8 */
  109 module_param_array(ringsize, int, NULL, 0);
  110 MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size");
  111 
  112 /*	sirpulse: tuning of the SIR pulse width within IrPHY 1.3 limits
  113  *		0: very short, 1.5us (exception: 6us at 2.4 kbaud)
  114  *		1: nominal 3/16 bittime width
  115  *	note: IrDA compliant peer devices should be happy regardless
  116  *		which one is used. Primary goal is to save some power
  117  *		on the sender's side - at 9.6kbaud for example the short
  118  *		pulse width saves more than 90% of the transmitted IR power.
  119  */
  120 
  121 static int sirpulse = 1;		/* default is 3/16 bittime */
  122 module_param(sirpulse, int, 0);
  123 MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning");
  124 
  125 /*	qos_mtt_bits: encoded min-turn-time value we require the peer device
  126  *		 to use before transmitting to us. "Type 1" (per-station)
  127  *		 bitfield according to IrLAP definition (section 6.6.8)
  128  *		 Don't know which transceiver is used by my OB800 - the
  129  *		 pretty common HP HDLS-1100 requires 1 msec - so lets use this.
  130  */
  131 
  132 static int qos_mtt_bits = 0x07;		/* default is 1 ms or more */
  133 module_param(qos_mtt_bits, int, 0);
  134 MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time");
  135 
  136 /********************************************************/
  137 
  138 static void vlsi_reg_debug(unsigned iobase, const char *s)
  139 {
  140 	int	i;
  141 
  142 	printk(KERN_DEBUG "%s: ", s);
  143 	for (i = 0; i < 0x20; i++)
  144 		printk("%02x", (unsigned)inb((iobase+i)));
  145 	printk("\n");
  146 }
  147 
  148 static void vlsi_ring_debug(struct vlsi_ring *r)
  149 {
  150 	struct ring_descr *rd;
  151 	unsigned i;
  152 
  153 	printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
  154 		__func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw);
  155 	printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__,
  156 		atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask);
  157 	for (i = 0; i < r->size; i++) {
  158 		rd = &r->rd[i];
  159 		printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i);
  160 		printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
  161 		printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n",
  162 			__func__, (unsigned) rd_get_status(rd),
  163 			(unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
  164 	}
  165 }
  166 
  167 /********************************************************/
  168 
  169 /* needed regardless of CONFIG_PROC_FS */
  170 static struct proc_dir_entry *vlsi_proc_root = NULL;
  171 
  172 #ifdef CONFIG_PROC_FS
  173 
  174 static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev)
  175 {
  176 	unsigned iobase = pci_resource_start(pdev, 0);
  177 	unsigned i;
  178 
  179 	seq_printf(seq, "\n%s (vid/did: [%04x:%04x])\n",
  180 		   pci_name(pdev), (int)pdev->vendor, (int)pdev->device);
  181 	seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state);
  182 	seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n",
  183 		   pdev->irq, (unsigned)pci_resource_start(pdev, 0), (unsigned long long)pdev->dma_mask);
  184 	seq_printf(seq, "hw registers: ");
  185 	for (i = 0; i < 0x20; i++)
  186 		seq_printf(seq, "%02x", (unsigned)inb((iobase+i)));
  187 	seq_printf(seq, "\n");
  188 }
  189 		
  190 static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
  191 {
  192 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
  193 	u8 byte;
  194 	u16 word;
  195 	s32 sec, usec;
  196 	unsigned iobase = ndev->base_addr;
  197 
  198 	seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name,
  199 		netif_device_present(ndev) ? "attached" : "detached", 
  200 		netif_running(ndev) ? "running" : "not running",
  201 		netif_carrier_ok(ndev) ? "carrier ok" : "no carrier",
  202 		netif_queue_stopped(ndev) ? "queue stopped" : "queue running");
  203 
  204 	if (!netif_running(ndev))
  205 		return;
  206 
  207 	seq_printf(seq, "\nhw-state:\n");
  208 	pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte);
  209 	seq_printf(seq, "IRMISC:%s%s%s uart%s",
  210 		(byte&IRMISC_IRRAIL) ? " irrail" : "",
  211 		(byte&IRMISC_IRPD) ? " irpd" : "",
  212 		(byte&IRMISC_UARTTST) ? " uarttest" : "",
  213 		(byte&IRMISC_UARTEN) ? "@" : " disabled\n");
  214 	if (byte&IRMISC_UARTEN) {
  215 		seq_printf(seq, "0x%s\n",
  216 			(byte&2) ? ((byte&1) ? "3e8" : "2e8")
  217 				 : ((byte&1) ? "3f8" : "2f8"));
  218 	}
  219 	pci_read_config_byte(idev->pdev, VLSI_PCI_CLKCTL, &byte);
  220 	seq_printf(seq, "CLKCTL: PLL %s%s%s / clock %s / wakeup %s\n",
  221 		(byte&CLKCTL_PD_INV) ? "powered" : "down",
  222 		(byte&CLKCTL_LOCK) ? " locked" : "",
  223 		(byte&CLKCTL_EXTCLK) ? ((byte&CLKCTL_XCKSEL)?" / 40 MHz XCLK":" / 48 MHz XCLK") : "",
  224 		(byte&CLKCTL_CLKSTP) ? "stopped" : "running",
  225 		(byte&CLKCTL_WAKE) ? "enabled" : "disabled");
  226 	pci_read_config_byte(idev->pdev, VLSI_PCI_MSTRPAGE, &byte);
  227 	seq_printf(seq, "MSTRPAGE: 0x%02x\n", (unsigned)byte);
  228 
  229 	byte = inb(iobase+VLSI_PIO_IRINTR);
  230 	seq_printf(seq, "IRINTR:%s%s%s%s%s%s%s%s\n",
  231 		(byte&IRINTR_ACTEN) ? " ACTEN" : "",
  232 		(byte&IRINTR_RPKTEN) ? " RPKTEN" : "",
  233 		(byte&IRINTR_TPKTEN) ? " TPKTEN" : "",
  234 		(byte&IRINTR_OE_EN) ? " OE_EN" : "",
  235 		(byte&IRINTR_ACTIVITY) ? " ACTIVITY" : "",
  236 		(byte&IRINTR_RPKTINT) ? " RPKTINT" : "",
  237 		(byte&IRINTR_TPKTINT) ? " TPKTINT" : "",
  238 		(byte&IRINTR_OE_INT) ? " OE_INT" : "");
  239 	word = inw(iobase+VLSI_PIO_RINGPTR);
  240 	seq_printf(seq, "RINGPTR: rx=%u / tx=%u\n", RINGPTR_GET_RX(word), RINGPTR_GET_TX(word));
  241 	word = inw(iobase+VLSI_PIO_RINGBASE);
  242 	seq_printf(seq, "RINGBASE: busmap=0x%08x\n",
  243 		((unsigned)word << 10)|(MSTRPAGE_VALUE<<24));
  244 	word = inw(iobase+VLSI_PIO_RINGSIZE);
  245 	seq_printf(seq, "RINGSIZE: rx=%u / tx=%u\n", RINGSIZE_TO_RXSIZE(word),
  246 		RINGSIZE_TO_TXSIZE(word));
  247 
  248 	word = inw(iobase+VLSI_PIO_IRCFG);
  249 	seq_printf(seq, "IRCFG:%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
  250 		(word&IRCFG_LOOP) ? " LOOP" : "",
  251 		(word&IRCFG_ENTX) ? " ENTX" : "",
  252 		(word&IRCFG_ENRX) ? " ENRX" : "",
  253 		(word&IRCFG_MSTR) ? " MSTR" : "",
  254 		(word&IRCFG_RXANY) ? " RXANY" : "",
  255 		(word&IRCFG_CRC16) ? " CRC16" : "",
  256 		(word&IRCFG_FIR) ? " FIR" : "",
  257 		(word&IRCFG_MIR) ? " MIR" : "",
  258 		(word&IRCFG_SIR) ? " SIR" : "",
  259 		(word&IRCFG_SIRFILT) ? " SIRFILT" : "",
  260 		(word&IRCFG_SIRTEST) ? " SIRTEST" : "",
  261 		(word&IRCFG_TXPOL) ? " TXPOL" : "",
  262 		(word&IRCFG_RXPOL) ? " RXPOL" : "");
  263 	word = inw(iobase+VLSI_PIO_IRENABLE);
  264 	seq_printf(seq, "IRENABLE:%s%s%s%s%s%s%s%s\n",
  265 		(word&IRENABLE_PHYANDCLOCK) ? " PHYANDCLOCK" : "",
  266 		(word&IRENABLE_CFGER) ? " CFGERR" : "",
  267 		(word&IRENABLE_FIR_ON) ? " FIR_ON" : "",
  268 		(word&IRENABLE_MIR_ON) ? " MIR_ON" : "",
  269 		(word&IRENABLE_SIR_ON) ? " SIR_ON" : "",
  270 		(word&IRENABLE_ENTXST) ? " ENTXST" : "",
  271 		(word&IRENABLE_ENRXST) ? " ENRXST" : "",
  272 		(word&IRENABLE_CRC16_ON) ? " CRC16_ON" : "");
  273 	word = inw(iobase+VLSI_PIO_PHYCTL);
  274 	seq_printf(seq, "PHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n",
  275 		(unsigned)PHYCTL_TO_BAUD(word),
  276 		(unsigned)PHYCTL_TO_PLSWID(word),
  277 		(unsigned)PHYCTL_TO_PREAMB(word));
  278 	word = inw(iobase+VLSI_PIO_NPHYCTL);
  279 	seq_printf(seq, "NPHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n",
  280 		(unsigned)PHYCTL_TO_BAUD(word),
  281 		(unsigned)PHYCTL_TO_PLSWID(word),
  282 		(unsigned)PHYCTL_TO_PREAMB(word));
  283 	word = inw(iobase+VLSI_PIO_MAXPKT);
  284 	seq_printf(seq, "MAXPKT: max. rx packet size = %u\n", word);
  285 	word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
  286 	seq_printf(seq, "RCVBCNT: rx-fifo filling level = %u\n", word);
  287 
  288 	seq_printf(seq, "\nsw-state:\n");
  289 	seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud, 
  290 		(idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"));
  291 	sec = div_s64_rem(ktime_us_delta(ktime_get(), idev->last_rx),
  292 			  USEC_PER_SEC, &usec);
  293 	seq_printf(seq, "last rx: %ul.%06u sec\n", sec, usec);
  294 
  295 	seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
  296 		ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors,
  297 		ndev->stats.rx_dropped);
  298 	seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n",
  299 		ndev->stats.rx_over_errors, ndev->stats.rx_length_errors,
  300 		ndev->stats.rx_frame_errors, ndev->stats.rx_crc_errors);
  301 	seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n",
  302 		ndev->stats.tx_packets, ndev->stats.tx_bytes, ndev->stats.tx_errors,
  303 		ndev->stats.tx_dropped, ndev->stats.tx_fifo_errors);
  304 
  305 }
  306 		
  307 static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r)
  308 {
  309 	struct ring_descr *rd;
  310 	unsigned i, j;
  311 	int h, t;
  312 
  313 	seq_printf(seq, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
  314 		r->size, r->mask, r->len, r->dir, r->rd[0].hw);
  315 	h = atomic_read(&r->head) & r->mask;
  316 	t = atomic_read(&r->tail) & r->mask;
  317 	seq_printf(seq, "head = %d / tail = %d ", h, t);
  318 	if (h == t)
  319 		seq_printf(seq, "(empty)\n");
  320 	else {
  321 		if (((t+1)&r->mask) == h)
  322 			seq_printf(seq, "(full)\n");
  323 		else
  324 			seq_printf(seq, "(level = %d)\n", ((unsigned)(t-h) & r->mask)); 
  325 		rd = &r->rd[h];
  326 		j = (unsigned) rd_get_count(rd);
  327 		seq_printf(seq, "current: rd = %d / status = %02x / len = %u\n",
  328 				h, (unsigned)rd_get_status(rd), j);
  329 		if (j > 0) {
  330 			seq_printf(seq, "   data: %*ph\n",
  331 				   min_t(unsigned, j, 20), rd->buf);
  332 		}
  333 	}
  334 	for (i = 0; i < r->size; i++) {
  335 		rd = &r->rd[i];
  336 		seq_printf(seq, "> ring descr %u: ", i);
  337 		seq_printf(seq, "skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
  338 		seq_printf(seq, "  hw: status=%02x count=%u busaddr=0x%08x\n",
  339 			(unsigned) rd_get_status(rd),
  340 			(unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
  341 	}
  342 }
  343 
  344 static int vlsi_seq_show(struct seq_file *seq, void *v)
  345 {
  346 	struct net_device *ndev = seq->private;
  347 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
  348 	unsigned long flags;
  349 
  350 	seq_printf(seq, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION);
  351 	seq_printf(seq, "clksrc: %s\n", 
  352 		(clksrc>=2) ? ((clksrc==3)?"40MHz XCLK":"48MHz XCLK")
  353 			    : ((clksrc==1)?"48MHz PLL":"autodetect"));
  354 	seq_printf(seq, "ringsize: tx=%d / rx=%d\n",
  355 		ringsize[0], ringsize[1]);
  356 	seq_printf(seq, "sirpulse: %s\n", (sirpulse)?"3/16 bittime":"short");
  357 	seq_printf(seq, "qos_mtt_bits: 0x%02x\n", (unsigned)qos_mtt_bits);
  358 
  359 	spin_lock_irqsave(&idev->lock, flags);
  360 	if (idev->pdev != NULL) {
  361 		vlsi_proc_pdev(seq, idev->pdev);
  362 
  363 		if (idev->pdev->current_state == 0)
  364 			vlsi_proc_ndev(seq, ndev);
  365 		else
  366 			seq_printf(seq, "\nPCI controller down - resume_ok = %d\n",
  367 				idev->resume_ok);
  368 		if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) {
  369 			seq_printf(seq, "\n--------- RX ring -----------\n\n");
  370 			vlsi_proc_ring(seq, idev->rx_ring);
  371 			seq_printf(seq, "\n--------- TX ring -----------\n\n");
  372 			vlsi_proc_ring(seq, idev->tx_ring);
  373 		}
  374 	}
  375 	seq_printf(seq, "\n");
  376 	spin_unlock_irqrestore(&idev->lock, flags);
  377 
  378 	return 0;
  379 }
  380 
  381 static int vlsi_seq_open(struct inode *inode, struct file *file)
  382 {
  383 	return single_open(file, vlsi_seq_show, PDE_DATA(inode));
  384 }
  385 
  386 static const struct file_operations vlsi_proc_fops = {
  387 	.owner	 = THIS_MODULE,
  388 	.open    = vlsi_seq_open,
  389 	.read    = seq_read,
  390 	.llseek  = seq_lseek,
  391 	.release = single_release,
  392 };
  393 
  394 #define VLSI_PROC_FOPS		(&vlsi_proc_fops)
  395 
  396 #else	/* CONFIG_PROC_FS */
  397 #define VLSI_PROC_FOPS		NULL
  398 #endif
  399 
  400 /********************************************************/
  401 
  402 static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap,
  403 						unsigned size, unsigned len, int dir)
  404 {
  405 	struct vlsi_ring *r;
  406 	struct ring_descr *rd;
  407 	unsigned	i, j;
  408 	dma_addr_t	busaddr;
  409 
  410 	if (!size  ||  ((size-1)&size)!=0)	/* must be >0 and power of 2 */
  411 		return NULL;
  412 
  413 	r = kmalloc(sizeof(*r) + size * sizeof(struct ring_descr), GFP_KERNEL);
  414 	if (!r)
  415 		return NULL;
  416 	memset(r, 0, sizeof(*r));
  417 
  418 	r->pdev = pdev;
  419 	r->dir = dir;
  420 	r->len = len;
  421 	r->rd = (struct ring_descr *)(r+1);
  422 	r->mask = size - 1;
  423 	r->size = size;
  424 	atomic_set(&r->head, 0);
  425 	atomic_set(&r->tail, 0);
  426 
  427 	for (i = 0; i < size; i++) {
  428 		rd = r->rd + i;
  429 		memset(rd, 0, sizeof(*rd));
  430 		rd->hw = hwmap + i;
  431 		rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
  432 		if (rd->buf == NULL ||
  433 		    !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
  434 			if (rd->buf) {
  435 				net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
  436 						    __func__, rd->buf);
  437 				kfree(rd->buf);
  438 				rd->buf = NULL;
  439 			}
  440 			for (j = 0; j < i; j++) {
  441 				rd = r->rd + j;
  442 				busaddr = rd_get_addr(rd);
  443 				rd_set_addr_status(rd, 0, 0);
  444 				if (busaddr)
  445 					pci_unmap_single(pdev, busaddr, len, dir);
  446 				kfree(rd->buf);
  447 				rd->buf = NULL;
  448 			}
  449 			kfree(r);
  450 			return NULL;
  451 		}
  452 		rd_set_addr_status(rd, busaddr, 0);
  453 		/* initially, the dma buffer is owned by the CPU */
  454 		rd->skb = NULL;
  455 	}
  456 	return r;
  457 }
  458 
  459 static int vlsi_free_ring(struct vlsi_ring *r)
  460 {
  461 	struct ring_descr *rd;
  462 	unsigned	i;
  463 	dma_addr_t	busaddr;
  464 
  465 	for (i = 0; i < r->size; i++) {
  466 		rd = r->rd + i;
  467 		if (rd->skb)
  468 			dev_kfree_skb_any(rd->skb);
  469 		busaddr = rd_get_addr(rd);
  470 		rd_set_addr_status(rd, 0, 0);
  471 		if (busaddr)
  472 			pci_unmap_single(r->pdev, busaddr, r->len, r->dir);
  473 		kfree(rd->buf);
  474 	}
  475 	kfree(r);
  476 	return 0;
  477 }
  478 
  479 static int vlsi_create_hwif(vlsi_irda_dev_t *idev)
  480 {
  481 	char 			*ringarea;
  482 	struct ring_descr_hw	*hwmap;
  483 
  484 	idev->virtaddr = NULL;
  485 	idev->busaddr = 0;
  486 
  487 	ringarea = pci_zalloc_consistent(idev->pdev, HW_RING_AREA_SIZE,
  488 					 &idev->busaddr);
  489 	if (!ringarea)
  490 		goto out;
  491 
  492 	hwmap = (struct ring_descr_hw *)ringarea;
  493 	idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1],
  494 					XFER_BUF_SIZE, PCI_DMA_FROMDEVICE);
  495 	if (idev->rx_ring == NULL)
  496 		goto out_unmap;
  497 
  498 	hwmap += MAX_RING_DESCR;
  499 	idev->tx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[0],
  500 					XFER_BUF_SIZE, PCI_DMA_TODEVICE);
  501 	if (idev->tx_ring == NULL)
  502 		goto out_free_rx;
  503 
  504 	idev->virtaddr = ringarea;
  505 	return 0;
  506 
  507 out_free_rx:
  508 	vlsi_free_ring(idev->rx_ring);
  509 out_unmap:
  510 	idev->rx_ring = idev->tx_ring = NULL;
  511 	pci_free_consistent(idev->pdev, HW_RING_AREA_SIZE, ringarea, idev->busaddr);
  512 	idev->busaddr = 0;
  513 out:
  514 	return -ENOMEM;
  515 }
  516 
  517 static int vlsi_destroy_hwif(vlsi_irda_dev_t *idev)
  518 {
  519 	vlsi_free_ring(idev->rx_ring);
  520 	vlsi_free_ring(idev->tx_ring);
  521 	idev->rx_ring = idev->tx_ring = NULL;
  522 
  523 	if (idev->busaddr)
  524 		pci_free_consistent(idev->pdev,HW_RING_AREA_SIZE,idev->virtaddr,idev->busaddr);
  525 
  526 	idev->virtaddr = NULL;
  527 	idev->busaddr = 0;
  528 
  529 	return 0;
  530 }
  531 
  532 /********************************************************/
  533 
  534 static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
  535 {
  536 	u16		status;
  537 	int		crclen, len = 0;
  538 	struct sk_buff	*skb;
  539 	int		ret = 0;
  540 	struct net_device *ndev = pci_get_drvdata(r->pdev);
  541 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
  542 
  543 	pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
  544 	/* dma buffer now owned by the CPU */
  545 	status = rd_get_status(rd);
  546 	if (status & RD_RX_ERROR) {
  547 		if (status & RD_RX_OVER)  
  548 			ret |= VLSI_RX_OVER;
  549 		if (status & RD_RX_LENGTH)  
  550 			ret |= VLSI_RX_LENGTH;
  551 		if (status & RD_RX_PHYERR)  
  552 			ret |= VLSI_RX_FRAME;
  553 		if (status & RD_RX_CRCERR)  
  554 			ret |= VLSI_RX_CRC;
  555 		goto done;
  556 	}
  557 
  558 	len = rd_get_count(rd);
  559 	crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);
  560 	len -= crclen;		/* remove trailing CRC */
  561 	if (len <= 0) {
  562 		pr_debug("%s: strange frame (len=%d)\n", __func__, len);
  563 		ret |= VLSI_RX_DROP;
  564 		goto done;
  565 	}
  566 
  567 	if (idev->mode == IFF_SIR) {	/* hw checks CRC in MIR, FIR mode */
  568 
  569 		/* rd->buf is a streaming PCI_DMA_FROMDEVICE map. Doing the
  570 		 * endian-adjustment there just in place will dirty a cache line
  571 		 * which belongs to the map and thus we must be sure it will
  572 		 * get flushed before giving the buffer back to hardware.
  573 		 * vlsi_fill_rx() will do this anyway - but here we rely on.
  574 		 */
  575 		le16_to_cpus(rd->buf+len);
  576 		if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) {
  577 			pr_debug("%s: crc error\n", __func__);
  578 			ret |= VLSI_RX_CRC;
  579 			goto done;
  580 		}
  581 	}
  582 
  583 	if (!rd->skb) {
  584 		net_warn_ratelimited("%s: rx packet lost\n", __func__);
  585 		ret |= VLSI_RX_DROP;
  586 		goto done;
  587 	}
  588 
  589 	skb = rd->skb;
  590 	rd->skb = NULL;
  591 	skb->dev = ndev;
  592 	memcpy(skb_put(skb,len), rd->buf, len);
  593 	skb_reset_mac_header(skb);
  594 	if (in_interrupt())
  595 		netif_rx(skb);
  596 	else
  597 		netif_rx_ni(skb);
  598 
  599 done:
  600 	rd_set_status(rd, 0);
  601 	rd_set_count(rd, 0);
  602 	/* buffer still owned by CPU */
  603 
  604 	return (ret) ? -ret : len;
  605 }
  606 
  607 static void vlsi_fill_rx(struct vlsi_ring *r)
  608 {
  609 	struct ring_descr *rd;
  610 
  611 	for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) {
  612 		if (rd_is_active(rd)) {
  613 			net_warn_ratelimited("%s: driver bug: rx descr race with hw\n",
  614 					     __func__);
  615 			vlsi_ring_debug(r);
  616 			break;
  617 		}
  618 		if (!rd->skb) {
  619 			rd->skb = dev_alloc_skb(IRLAP_SKB_ALLOCSIZE);
  620 			if (rd->skb) {
  621 				skb_reserve(rd->skb,1);
  622 				rd->skb->protocol = htons(ETH_P_IRDA);
  623 			}
  624 			else
  625 				break;	/* probably not worth logging? */
  626 		}
  627 		/* give dma buffer back to busmaster */
  628 		pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir);
  629 		rd_activate(rd);
  630 	}
  631 }
  632 
  633 static void vlsi_rx_interrupt(struct net_device *ndev)
  634 {
  635 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
  636 	struct vlsi_ring *r = idev->rx_ring;
  637 	struct ring_descr *rd;
  638 	int ret;
  639 
  640 	for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
  641 
  642 		if (rd_is_active(rd))
  643 			break;
  644 
  645 		ret = vlsi_process_rx(r, rd);
  646 
  647 		if (ret < 0) {
  648 			ret = -ret;
  649 			ndev->stats.rx_errors++;
  650 			if (ret & VLSI_RX_DROP)  
  651 				ndev->stats.rx_dropped++;
  652 			if (ret & VLSI_RX_OVER)  
  653 				ndev->stats.rx_over_errors++;
  654 			if (ret & VLSI_RX_LENGTH)  
  655 				ndev->stats.rx_length_errors++;
  656 			if (ret & VLSI_RX_FRAME)  
  657 				ndev->stats.rx_frame_errors++;
  658 			if (ret & VLSI_RX_CRC)  
  659 				ndev->stats.rx_crc_errors++;
  660 		}
  661 		else if (ret > 0) {
  662 			ndev->stats.rx_packets++;
  663 			ndev->stats.rx_bytes += ret;
  664 		}
  665 	}
  666 
  667 	idev->last_rx = ktime_get(); /* remember "now" for later mtt delay */
  668 
  669 	vlsi_fill_rx(r);
  670 
  671 	if (ring_first(r) == NULL) {
  672 		/* we are in big trouble, if this should ever happen */
  673 		net_err_ratelimited("%s: rx ring exhausted!\n", __func__);
  674 		vlsi_ring_debug(r);
  675 	}
  676 	else
  677 		outw(0, ndev->base_addr+VLSI_PIO_PROMPT);
  678 }
  679 
  680 /* caller must have stopped the controller from busmastering */
  681 
  682 static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
  683 {
  684 	struct net_device *ndev = pci_get_drvdata(idev->pdev);
  685 	struct vlsi_ring *r = idev->rx_ring;
  686 	struct ring_descr *rd;
  687 	int ret;
  688 
  689 	for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
  690 
  691 		ret = 0;
  692 		if (rd_is_active(rd)) {
  693 			rd_set_status(rd, 0);
  694 			if (rd_get_count(rd)) {
  695 				pr_debug("%s - dropping rx packet\n", __func__);
  696 				ret = -VLSI_RX_DROP;
  697 			}
  698 			rd_set_count(rd, 0);
  699 			pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
  700 			if (rd->skb) {
  701 				dev_kfree_skb_any(rd->skb);
  702 				rd->skb = NULL;
  703 			}
  704 		}
  705 		else
  706 			ret = vlsi_process_rx(r, rd);
  707 
  708 		if (ret < 0) {
  709 			ret = -ret;
  710 			ndev->stats.rx_errors++;
  711 			if (ret & VLSI_RX_DROP)  
  712 				ndev->stats.rx_dropped++;
  713 			if (ret & VLSI_RX_OVER)  
  714 				ndev->stats.rx_over_errors++;
  715 			if (ret & VLSI_RX_LENGTH)  
  716 				ndev->stats.rx_length_errors++;
  717 			if (ret & VLSI_RX_FRAME)  
  718 				ndev->stats.rx_frame_errors++;
  719 			if (ret & VLSI_RX_CRC)  
  720 				ndev->stats.rx_crc_errors++;
  721 		}
  722 		else if (ret > 0) {
  723 			ndev->stats.rx_packets++;
  724 			ndev->stats.rx_bytes += ret;
  725 		}
  726 	}
  727 }
  728 
  729 /********************************************************/
  730 
  731 static int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd)
  732 {
  733 	u16		status;
  734 	int		len;
  735 	int		ret;
  736 
  737 	pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
  738 	/* dma buffer now owned by the CPU */
  739 	status = rd_get_status(rd);
  740 	if (status & RD_TX_UNDRN)
  741 		ret = VLSI_TX_FIFO;
  742 	else
  743 		ret = 0;
  744 	rd_set_status(rd, 0);
  745 
  746 	if (rd->skb) {
  747 		len = rd->skb->len;
  748 		dev_kfree_skb_any(rd->skb);
  749 		rd->skb = NULL;
  750 	}
  751 	else	/* tx-skb already freed? - should never happen */
  752 		len = rd_get_count(rd);		/* incorrect for SIR! (due to wrapping) */
  753 
  754 	rd_set_count(rd, 0);
  755 	/* dma buffer still owned by the CPU */
  756 
  757 	return (ret) ? -ret : len;
  758 }
  759 
  760 static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
  761 {
  762 	u16 nphyctl;
  763 	u16 config;
  764 	unsigned mode;
  765 	int	ret;
  766 	int	baudrate;
  767 	int	fifocnt;
  768 
  769 	baudrate = idev->new_baud;
  770 	pr_debug("%s: %d -> %d\n", __func__, idev->baud, idev->new_baud);
  771 	if (baudrate == 4000000) {
  772 		mode = IFF_FIR;
  773 		config = IRCFG_FIR;
  774 		nphyctl = PHYCTL_FIR;
  775 	}
  776 	else if (baudrate == 1152000) {
  777 		mode = IFF_MIR;
  778 		config = IRCFG_MIR | IRCFG_CRC16;
  779 		nphyctl = PHYCTL_MIR(clksrc==3);
  780 	}
  781 	else {
  782 		mode = IFF_SIR;
  783 		config = IRCFG_SIR | IRCFG_SIRFILT  | IRCFG_RXANY;
  784 		switch(baudrate) {
  785 			default:
  786 				net_warn_ratelimited("%s: undefined baudrate %d - fallback to 9600!\n",
  787 						     __func__, baudrate);
  788 				baudrate = 9600;
  789 				/* fallthru */
  790 			case 2400:
  791 			case 9600:
  792 			case 19200:
  793 			case 38400:
  794 			case 57600:
  795 			case 115200:
  796 				nphyctl = PHYCTL_SIR(baudrate,sirpulse,clksrc==3);
  797 				break;
  798 		}
  799 	}
  800 	config |= IRCFG_MSTR | IRCFG_ENRX;
  801 
  802 	fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
  803 	if (fifocnt != 0) {
  804 		pr_debug("%s: rx fifo not empty(%d)\n", __func__, fifocnt);
  805 	}
  806 
  807 	outw(0, iobase+VLSI_PIO_IRENABLE);
  808 	outw(config, iobase+VLSI_PIO_IRCFG);
  809 	outw(nphyctl, iobase+VLSI_PIO_NPHYCTL);
  810 	wmb();
  811 	outw(IRENABLE_PHYANDCLOCK, iobase+VLSI_PIO_IRENABLE);
  812 	mb();
  813 
  814 	udelay(1);	/* chip applies IRCFG on next rising edge of its 8MHz clock */
  815 
  816 	/* read back settings for validation */
  817 
  818 	config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK;
  819 
  820 	if (mode == IFF_FIR)
  821 		config ^= IRENABLE_FIR_ON;
  822 	else if (mode == IFF_MIR)
  823 		config ^= (IRENABLE_MIR_ON|IRENABLE_CRC16_ON);
  824 	else
  825 		config ^= IRENABLE_SIR_ON;
  826 
  827 	if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) {
  828 		net_warn_ratelimited("%s: failed to set %s mode!\n",
  829 				     __func__,
  830 				     mode == IFF_SIR ? "SIR" :
  831 				     mode == IFF_MIR ? "MIR" : "FIR");
  832 		ret = -1;
  833 	}
  834 	else {
  835 		if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) {
  836 			net_warn_ratelimited("%s: failed to apply baudrate %d\n",
  837 					     __func__, baudrate);
  838 			ret = -1;
  839 		}
  840 		else {
  841 			idev->mode = mode;
  842 			idev->baud = baudrate;
  843 			idev->new_baud = 0;
  844 			ret = 0;
  845 		}
  846 	}
  847 
  848 	if (ret)
  849 		vlsi_reg_debug(iobase,__func__);
  850 
  851 	return ret;
  852 }
  853 
  854 static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
  855 					      struct net_device *ndev)
  856 {
  857 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
  858 	struct vlsi_ring	*r = idev->tx_ring;
  859 	struct ring_descr *rd;
  860 	unsigned long flags;
  861 	unsigned iobase = ndev->base_addr;
  862 	u8 status;
  863 	u16 config;
  864 	int mtt, diff;
  865 	int len, speed;
  866 	char *msg = NULL;
  867 
  868 	speed = irda_get_next_speed(skb);
  869 	spin_lock_irqsave(&idev->lock, flags);
  870 	if (speed != -1  &&  speed != idev->baud) {
  871 		netif_stop_queue(ndev);
  872 		idev->new_baud = speed;
  873 		status = RD_TX_CLRENTX;  /* stop tx-ring after this frame */
  874 	}
  875 	else
  876 		status = 0;
  877 
  878 	if (skb->len == 0) {
  879 		/* handle zero packets - should be speed change */
  880 		if (status == 0) {
  881 			msg = "bogus zero-length packet";
  882 			goto drop_unlock;
  883 		}
  884 
  885 		/* due to the completely asynch tx operation we might have
  886 		 * IrLAP racing with the hardware here, f.e. if the controller
  887 		 * is just sending the last packet with current speed while
  888 		 * the LAP is already switching the speed using synchronous
  889 		 * len=0 packet. Immediate execution would lead to hw lockup
  890 		 * requiring a powercycle to reset. Good candidate to trigger
  891 		 * this is the final UA:RSP packet after receiving a DISC:CMD
  892 		 * when getting the LAP down.
  893 		 * Note that we are not protected by the queue_stop approach
  894 		 * because the final UA:RSP arrives _without_ request to apply
  895 		 * new-speed-after-this-packet - hence the driver doesn't know
  896 		 * this was the last packet and doesn't stop the queue. So the
  897 		 * forced switch to default speed from LAP gets through as fast
  898 		 * as only some 10 usec later while the UA:RSP is still processed
  899 		 * by the hardware and we would get screwed.
  900 		 */
  901 
  902 		if (ring_first(idev->tx_ring) == NULL) {
  903 			/* no race - tx-ring already empty */
  904 			vlsi_set_baud(idev, iobase);
  905 			netif_wake_queue(ndev);
  906 		}
  907 		else
  908 			;
  909 			/* keep the speed change pending like it would
  910 			 * for any len>0 packet. tx completion interrupt
  911 			 * will apply it when the tx ring becomes empty.
  912 			 */
  913 		spin_unlock_irqrestore(&idev->lock, flags);
  914 		dev_kfree_skb_any(skb);
  915 		return NETDEV_TX_OK;
  916 	}
  917 
  918 	/* sanity checks - simply drop the packet */
  919 
  920 	rd = ring_last(r);
  921 	if (!rd) {
  922 		msg = "ring full, but queue wasn't stopped";
  923 		goto drop_unlock;
  924 	}
  925 
  926 	if (rd_is_active(rd)) {
  927 		msg = "entry still owned by hw";
  928 		goto drop_unlock;
  929 	}
  930 
  931 	if (!rd->buf) {
  932 		msg = "tx ring entry without pci buffer";
  933 		goto drop_unlock;
  934 	}
  935 
  936 	if (rd->skb) {
  937 		msg = "ring entry with old skb still attached";
  938 		goto drop_unlock;
  939 	}
  940 
  941 	/* no need for serialization or interrupt disable during mtt */
  942 	spin_unlock_irqrestore(&idev->lock, flags);
  943 
  944 	if ((mtt = irda_get_mtt(skb)) > 0) {
  945 		diff = ktime_us_delta(ktime_get(), idev->last_rx);
  946 		if (mtt > diff)
  947 			udelay(mtt - diff);
  948 			/* must not sleep here - called under netif_tx_lock! */
  949 	}
  950 
  951 	/* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu()
  952 	 * after subsequent tx-completion
  953 	 */
  954 
  955 	if (idev->mode == IFF_SIR) {
  956 		status |= RD_TX_DISCRC;		/* no hw-crc creation */
  957 		len = async_wrap_skb(skb, rd->buf, r->len);
  958 
  959 		/* Some rare worst case situation in SIR mode might lead to
  960 		 * potential buffer overflow. The wrapper detects this, returns
  961 		 * with a shortened frame (without FCS/EOF) but doesn't provide
  962 		 * any error indication about the invalid packet which we are
  963 		 * going to transmit.
  964 		 * Therefore we log if the buffer got filled to the point, where the
  965 		 * wrapper would abort, i.e. when there are less than 5 bytes left to
  966 		 * allow appending the FCS/EOF.
  967 		 */
  968 
  969 		if (len >= r->len-5)
  970 			net_warn_ratelimited("%s: possible buffer overflow with SIR wrapping!\n",
  971 					     __func__);
  972 	}
  973 	else {
  974 		/* hw deals with MIR/FIR mode wrapping */
  975 		status |= RD_TX_PULSE;		/* send 2 us highspeed indication pulse */
  976 		len = skb->len;
  977 		if (len > r->len) {
  978 			msg = "frame exceeds tx buffer length";
  979 			goto drop;
  980 		}
  981 		else
  982 			skb_copy_from_linear_data(skb, rd->buf, len);
  983 	}
  984 
  985 	rd->skb = skb;			/* remember skb for tx-complete stats */
  986 
  987 	rd_set_count(rd, len);
  988 	rd_set_status(rd, status);	/* not yet active! */
  989 
  990 	/* give dma buffer back to busmaster-hw (flush caches to make
  991 	 * CPU-driven changes visible from the pci bus).
  992 	 */
  993 
  994 	pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir);
  995 
  996 /*	Switching to TX mode here races with the controller
  997  *	which may stop TX at any time when fetching an inactive descriptor
  998  *	or one with CLR_ENTX set. So we switch on TX only, if TX was not running
  999  *	_after_ the new descriptor was activated on the ring. This ensures
 1000  *	we will either find TX already stopped or we can be sure, there
 1001  *	will be a TX-complete interrupt even if the chip stopped doing
 1002  *	TX just after we found it still running. The ISR will then find
 1003  *	the non-empty ring and restart TX processing. The enclosing
 1004  *	spinlock provides the correct serialization to prevent race with isr.
 1005  */
 1006 
 1007 	spin_lock_irqsave(&idev->lock,flags);
 1008 
 1009 	rd_activate(rd);
 1010 
 1011 	if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
 1012 		int fifocnt;
 1013 
 1014 		fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
 1015 		if (fifocnt != 0) {
 1016 			pr_debug("%s: rx fifo not empty(%d)\n",
 1017 				 __func__, fifocnt);
 1018 		}
 1019 
 1020 		config = inw(iobase+VLSI_PIO_IRCFG);
 1021 		mb();
 1022 		outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
 1023 		wmb();
 1024 		outw(0, iobase+VLSI_PIO_PROMPT);
 1025 	}
 1026 
 1027 	if (ring_put(r) == NULL) {
 1028 		netif_stop_queue(ndev);
 1029 		pr_debug("%s: tx ring full - queue stopped\n", __func__);
 1030 	}
 1031 	spin_unlock_irqrestore(&idev->lock, flags);
 1032 
 1033 	return NETDEV_TX_OK;
 1034 
 1035 drop_unlock:
 1036 	spin_unlock_irqrestore(&idev->lock, flags);
 1037 drop:
 1038 	net_warn_ratelimited("%s: dropping packet - %s\n", __func__, msg);
 1039 	dev_kfree_skb_any(skb);
 1040 	ndev->stats.tx_errors++;
 1041 	ndev->stats.tx_dropped++;
 1042 	/* Don't even think about returning NET_XMIT_DROP (=1) here!
 1043 	 * In fact any retval!=0 causes the packet scheduler to requeue the
 1044 	 * packet for later retry of transmission - which isn't exactly
 1045 	 * what we want after we've just called dev_kfree_skb_any ;-)
 1046 	 */
 1047 	return NETDEV_TX_OK;
 1048 }
 1049 
 1050 static void vlsi_tx_interrupt(struct net_device *ndev)
 1051 {
 1052 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 1053 	struct vlsi_ring	*r = idev->tx_ring;
 1054 	struct ring_descr	*rd;
 1055 	unsigned	iobase;
 1056 	int	ret;
 1057 	u16	config;
 1058 
 1059 	for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
 1060 
 1061 		if (rd_is_active(rd))
 1062 			break;
 1063 
 1064 		ret = vlsi_process_tx(r, rd);
 1065 
 1066 		if (ret < 0) {
 1067 			ret = -ret;
 1068 			ndev->stats.tx_errors++;
 1069 			if (ret & VLSI_TX_DROP)
 1070 				ndev->stats.tx_dropped++;
 1071 			if (ret & VLSI_TX_FIFO)
 1072 				ndev->stats.tx_fifo_errors++;
 1073 		}
 1074 		else if (ret > 0){
 1075 			ndev->stats.tx_packets++;
 1076 			ndev->stats.tx_bytes += ret;
 1077 		}
 1078 	}
 1079 
 1080 	iobase = ndev->base_addr;
 1081 
 1082 	if (idev->new_baud  &&  rd == NULL)	/* tx ring empty and speed change pending */
 1083 		vlsi_set_baud(idev, iobase);
 1084 
 1085 	config = inw(iobase+VLSI_PIO_IRCFG);
 1086 	if (rd == NULL)			/* tx ring empty: re-enable rx */
 1087 		outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG);
 1088 
 1089 	else if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
 1090 		int fifocnt;
 1091 
 1092 		fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
 1093 		if (fifocnt != 0) {
 1094 			pr_debug("%s: rx fifo not empty(%d)\n",
 1095 				 __func__, fifocnt);
 1096 		}
 1097 		outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
 1098 	}
 1099 
 1100 	outw(0, iobase+VLSI_PIO_PROMPT);
 1101 
 1102 	if (netif_queue_stopped(ndev)  &&  !idev->new_baud) {
 1103 		netif_wake_queue(ndev);
 1104 		pr_debug("%s: queue awoken\n", __func__);
 1105 	}
 1106 }
 1107 
 1108 /* caller must have stopped the controller from busmastering */
 1109 
 1110 static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
 1111 {
 1112 	struct net_device *ndev = pci_get_drvdata(idev->pdev);
 1113 	struct vlsi_ring *r = idev->tx_ring;
 1114 	struct ring_descr *rd;
 1115 	int ret;
 1116 
 1117 	for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
 1118 
 1119 		ret = 0;
 1120 		if (rd_is_active(rd)) {
 1121 			rd_set_status(rd, 0);
 1122 			rd_set_count(rd, 0);
 1123 			pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
 1124 			if (rd->skb) {
 1125 				dev_kfree_skb_any(rd->skb);
 1126 				rd->skb = NULL;
 1127 			}
 1128 			pr_debug("%s - dropping tx packet\n", __func__);
 1129 			ret = -VLSI_TX_DROP;
 1130 		}
 1131 		else
 1132 			ret = vlsi_process_tx(r, rd);
 1133 
 1134 		if (ret < 0) {
 1135 			ret = -ret;
 1136 			ndev->stats.tx_errors++;
 1137 			if (ret & VLSI_TX_DROP)
 1138 				ndev->stats.tx_dropped++;
 1139 			if (ret & VLSI_TX_FIFO)
 1140 				ndev->stats.tx_fifo_errors++;
 1141 		}
 1142 		else if (ret > 0){
 1143 			ndev->stats.tx_packets++;
 1144 			ndev->stats.tx_bytes += ret;
 1145 		}
 1146 	}
 1147 
 1148 }
 1149 
 1150 /********************************************************/
 1151 
 1152 static int vlsi_start_clock(struct pci_dev *pdev)
 1153 {
 1154 	u8	clkctl, lock;
 1155 	int	i, count;
 1156 
 1157 	if (clksrc < 2) { /* auto or PLL: try PLL */
 1158 		clkctl = CLKCTL_PD_INV | CLKCTL_CLKSTP;
 1159 		pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
 1160 
 1161 		/* procedure to detect PLL lock synchronisation:
 1162 		 * after 0.5 msec initial delay we expect to find 3 PLL lock
 1163 		 * indications within 10 msec for successful PLL detection.
 1164 		 */
 1165 		udelay(500);
 1166 		count = 0;
 1167 		for (i = 500; i <= 10000; i += 50) { /* max 10 msec */
 1168 			pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock);
 1169 			if (lock&CLKCTL_LOCK) {
 1170 				if (++count >= 3)
 1171 					break;
 1172 			}
 1173 			udelay(50);
 1174 		}
 1175 		if (count < 3) {
 1176 			if (clksrc == 1) { /* explicitly asked for PLL hence bail out */
 1177 				net_err_ratelimited("%s: no PLL or failed to lock!\n",
 1178 						    __func__);
 1179 				clkctl = CLKCTL_CLKSTP;
 1180 				pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
 1181 				return -1;
 1182 			}
 1183 			else			/* was: clksrc=0(auto) */
 1184 				clksrc = 3;	/* fallback to 40MHz XCLK (OB800) */
 1185 
 1186 			pr_debug("%s: PLL not locked, fallback to clksrc=%d\n",
 1187 				 __func__, clksrc);
 1188 		}
 1189 		else
 1190 			clksrc = 1;	/* got successful PLL lock */
 1191 	}
 1192 
 1193 	if (clksrc != 1) {
 1194 		/* we get here if either no PLL detected in auto-mode or
 1195 		   an external clock source was explicitly specified */
 1196 
 1197 		clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP;
 1198 		if (clksrc == 3)
 1199 			clkctl |= CLKCTL_XCKSEL;	
 1200 		pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
 1201 
 1202 		/* no way to test for working XCLK */
 1203 	}
 1204 	else
 1205 		pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
 1206 
 1207 	/* ok, now going to connect the chip with the clock source */
 1208 
 1209 	clkctl &= ~CLKCTL_CLKSTP;
 1210 	pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
 1211 
 1212 	return 0;
 1213 }
 1214 
 1215 static void vlsi_stop_clock(struct pci_dev *pdev)
 1216 {
 1217 	u8	clkctl;
 1218 
 1219 	/* disconnect chip from clock source */
 1220 	pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
 1221 	clkctl |= CLKCTL_CLKSTP;
 1222 	pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
 1223 
 1224 	/* disable all clock sources */
 1225 	clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_PD_INV);
 1226 	pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
 1227 }
 1228 
 1229 /********************************************************/
 1230 
 1231 /* writing all-zero to the VLSI PCI IO register area seems to prevent
 1232  * some occasional situations where the hardware fails (symptoms are 
 1233  * what appears as stalled tx/rx state machines, i.e. everything ok for
 1234  * receive or transmit but hw makes no progress or is unable to access
 1235  * the bus memory locations).
 1236  * Best place to call this is immediately after/before the internal clock
 1237  * gets started/stopped.
 1238  */
 1239 
 1240 static inline void vlsi_clear_regs(unsigned iobase)
 1241 {
 1242 	unsigned	i;
 1243 	const unsigned	chip_io_extent = 32;
 1244 
 1245 	for (i = 0; i < chip_io_extent; i += sizeof(u16))
 1246 		outw(0, iobase + i);
 1247 }
 1248 
 1249 static int vlsi_init_chip(struct pci_dev *pdev)
 1250 {
 1251 	struct net_device *ndev = pci_get_drvdata(pdev);
 1252 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 1253 	unsigned	iobase;
 1254 	u16 ptr;
 1255 
 1256 	/* start the clock and clean the registers */
 1257 
 1258 	if (vlsi_start_clock(pdev)) {
 1259 		net_err_ratelimited("%s: no valid clock source\n", __func__);
 1260 		return -1;
 1261 	}
 1262 	iobase = ndev->base_addr;
 1263 	vlsi_clear_regs(iobase);
 1264 
 1265 	outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */
 1266 
 1267 	outw(0, iobase+VLSI_PIO_IRENABLE);	/* disable IrPHY-interface */
 1268 
 1269 	/* disable everything, particularly IRCFG_MSTR - (also resetting the RING_PTR) */
 1270 
 1271 	outw(0, iobase+VLSI_PIO_IRCFG);
 1272 	wmb();
 1273 
 1274 	outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT);  /* max possible value=0x0fff */
 1275 
 1276 	outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE);
 1277 
 1278 	outw(TX_RX_TO_RINGSIZE(idev->tx_ring->size, idev->rx_ring->size),
 1279 		iobase+VLSI_PIO_RINGSIZE);	
 1280 
 1281 	ptr = inw(iobase+VLSI_PIO_RINGPTR);
 1282 	atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr));
 1283 	atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr));
 1284 	atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr));
 1285 	atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr));
 1286 
 1287 	vlsi_set_baud(idev, iobase);	/* idev->new_baud used as provided by caller */
 1288 
 1289 	outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR);	/* just in case - w/c pending IRQ's */
 1290 	wmb();
 1291 
 1292 	/* DO NOT BLINDLY ENABLE IRINTR_ACTEN!
 1293 	 * basically every received pulse fires an ACTIVITY-INT
 1294 	 * leading to >>1000 INT's per second instead of few 10
 1295 	 */
 1296 
 1297 	outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR);
 1298 
 1299 	return 0;
 1300 }
 1301 
 1302 static int vlsi_start_hw(vlsi_irda_dev_t *idev)
 1303 {
 1304 	struct pci_dev *pdev = idev->pdev;
 1305 	struct net_device *ndev = pci_get_drvdata(pdev);
 1306 	unsigned iobase = ndev->base_addr;
 1307 	u8 byte;
 1308 
 1309 	/* we don't use the legacy UART, disable its address decoding */
 1310 
 1311 	pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte);
 1312 	byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST);
 1313 	pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte);
 1314 
 1315 	/* enable PCI busmaster access to our 16MB page */
 1316 
 1317 	pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE);
 1318 	pci_set_master(pdev);
 1319 
 1320 	if (vlsi_init_chip(pdev) < 0) {
 1321 		pci_disable_device(pdev);
 1322 		return -1;
 1323 	}
 1324 
 1325 	vlsi_fill_rx(idev->rx_ring);
 1326 
 1327 	idev->last_rx = ktime_get();	/* first mtt may start from now on */
 1328 
 1329 	outw(0, iobase+VLSI_PIO_PROMPT);	/* kick hw state machine */
 1330 
 1331 	return 0;
 1332 }
 1333 
 1334 static int vlsi_stop_hw(vlsi_irda_dev_t *idev)
 1335 {
 1336 	struct pci_dev *pdev = idev->pdev;
 1337 	struct net_device *ndev = pci_get_drvdata(pdev);
 1338 	unsigned iobase = ndev->base_addr;
 1339 	unsigned long flags;
 1340 
 1341 	spin_lock_irqsave(&idev->lock,flags);
 1342 	outw(0, iobase+VLSI_PIO_IRENABLE);
 1343 	outw(0, iobase+VLSI_PIO_IRCFG);			/* disable everything */
 1344 
 1345 	/* disable and w/c irqs */
 1346 	outb(0, iobase+VLSI_PIO_IRINTR);
 1347 	wmb();
 1348 	outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR);
 1349 	spin_unlock_irqrestore(&idev->lock,flags);
 1350 
 1351 	vlsi_unarm_tx(idev);
 1352 	vlsi_unarm_rx(idev);
 1353 
 1354 	vlsi_clear_regs(iobase);
 1355 	vlsi_stop_clock(pdev);
 1356 
 1357 	pci_disable_device(pdev);
 1358 
 1359 	return 0;
 1360 }
 1361 
 1362 /**************************************************************/
 1363 
 1364 static void vlsi_tx_timeout(struct net_device *ndev)
 1365 {
 1366 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 1367 
 1368 
 1369 	vlsi_reg_debug(ndev->base_addr, __func__);
 1370 	vlsi_ring_debug(idev->tx_ring);
 1371 
 1372 	if (netif_running(ndev))
 1373 		netif_stop_queue(ndev);
 1374 
 1375 	vlsi_stop_hw(idev);
 1376 
 1377 	/* now simply restart the whole thing */
 1378 
 1379 	if (!idev->new_baud)
 1380 		idev->new_baud = idev->baud;		/* keep current baudrate */
 1381 
 1382 	if (vlsi_start_hw(idev))
 1383 		net_err_ratelimited("%s: failed to restart hw - %s(%s) unusable!\n",
 1384 				    __func__, pci_name(idev->pdev), ndev->name);
 1385 	else
 1386 		netif_start_queue(ndev);
 1387 }
 1388 
 1389 static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 1390 {
 1391 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 1392 	struct if_irda_req *irq = (struct if_irda_req *) rq;
 1393 	unsigned long flags;
 1394 	u16 fifocnt;
 1395 	int ret = 0;
 1396 
 1397 	switch (cmd) {
 1398 		case SIOCSBANDWIDTH:
 1399 			if (!capable(CAP_NET_ADMIN)) {
 1400 				ret = -EPERM;
 1401 				break;
 1402 			}
 1403 			spin_lock_irqsave(&idev->lock, flags);
 1404 			idev->new_baud = irq->ifr_baudrate;
 1405 			/* when called from userland there might be a minor race window here
 1406 			 * if the stack tries to change speed concurrently - which would be
 1407 			 * pretty strange anyway with the userland having full control...
 1408 			 */
 1409 			vlsi_set_baud(idev, ndev->base_addr);
 1410 			spin_unlock_irqrestore(&idev->lock, flags);
 1411 			break;
 1412 		case SIOCSMEDIABUSY:
 1413 			if (!capable(CAP_NET_ADMIN)) {
 1414 				ret = -EPERM;
 1415 				break;
 1416 			}
 1417 			irda_device_set_media_busy(ndev, TRUE);
 1418 			break;
 1419 		case SIOCGRECEIVING:
 1420 			/* the best we can do: check whether there are any bytes in rx fifo.
 1421 			 * The trustable window (in case some data arrives just afterwards)
 1422 			 * may be as short as 1usec or so at 4Mbps.
 1423 			 */
 1424 			fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
 1425 			irq->ifr_receiving = (fifocnt!=0) ? 1 : 0;
 1426 			break;
 1427 		default:
 1428 			net_warn_ratelimited("%s: notsupp - cmd=%04x\n",
 1429 					     __func__, cmd);
 1430 			ret = -EOPNOTSUPP;
 1431 	}	
 1432 	
 1433 	return ret;
 1434 }
 1435 
 1436 /********************************************************/
 1437 
 1438 static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)
 1439 {
 1440 	struct net_device *ndev = dev_instance;
 1441 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 1442 	unsigned	iobase;
 1443 	u8		irintr;
 1444 	int 		boguscount = 5;
 1445 	unsigned long	flags;
 1446 	int		handled = 0;
 1447 
 1448 	iobase = ndev->base_addr;
 1449 	spin_lock_irqsave(&idev->lock,flags);
 1450 	do {
 1451 		irintr = inb(iobase+VLSI_PIO_IRINTR);
 1452 		mb();
 1453 		outb(irintr, iobase+VLSI_PIO_IRINTR);	/* acknowledge asap */
 1454 
 1455 		if (!(irintr&=IRINTR_INT_MASK))		/* not our INT - probably shared */
 1456 			break;
 1457 
 1458 		handled = 1;
 1459 
 1460 		if (unlikely(!(irintr & ~IRINTR_ACTIVITY)))
 1461 			break;				/* nothing todo if only activity */
 1462 
 1463 		if (irintr&IRINTR_RPKTINT)
 1464 			vlsi_rx_interrupt(ndev);
 1465 
 1466 		if (irintr&IRINTR_TPKTINT)
 1467 			vlsi_tx_interrupt(ndev);
 1468 
 1469 	} while (--boguscount > 0);
 1470 	spin_unlock_irqrestore(&idev->lock,flags);
 1471 
 1472 	if (boguscount <= 0)
 1473 		net_info_ratelimited("%s: too much work in interrupt!\n",
 1474 				     __func__);
 1475 	return IRQ_RETVAL(handled);
 1476 }
 1477 
 1478 /********************************************************/
 1479 
 1480 static int vlsi_open(struct net_device *ndev)
 1481 {
 1482 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 1483 	int	err = -EAGAIN;
 1484 	char	hwname[32];
 1485 
 1486 	if (pci_request_regions(idev->pdev, drivername)) {
 1487 		net_warn_ratelimited("%s: io resource busy\n", __func__);
 1488 		goto errout;
 1489 	}
 1490 	ndev->base_addr = pci_resource_start(idev->pdev,0);
 1491 	ndev->irq = idev->pdev->irq;
 1492 
 1493 	/* under some rare occasions the chip apparently comes up with
 1494 	 * IRQ's pending. We better w/c pending IRQ and disable them all
 1495 	 */
 1496 
 1497 	outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR);
 1498 
 1499 	if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED,
 1500 			drivername, ndev)) {
 1501 		net_warn_ratelimited("%s: couldn't get IRQ: %d\n",
 1502 				     __func__, ndev->irq);
 1503 		goto errout_io;
 1504 	}
 1505 
 1506 	if ((err = vlsi_create_hwif(idev)) != 0)
 1507 		goto errout_irq;
 1508 
 1509 	sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr);
 1510 	idev->irlap = irlap_open(ndev,&idev->qos,hwname);
 1511 	if (!idev->irlap)
 1512 		goto errout_free_ring;
 1513 
 1514 	idev->last_rx = ktime_get();  /* first mtt may start from now on */
 1515 
 1516 	idev->new_baud = 9600;		/* start with IrPHY using 9600(SIR) mode */
 1517 
 1518 	if ((err = vlsi_start_hw(idev)) != 0)
 1519 		goto errout_close_irlap;
 1520 
 1521 	netif_start_queue(ndev);
 1522 
 1523 	net_info_ratelimited("%s: device %s operational\n",
 1524 			     __func__, ndev->name);
 1525 
 1526 	return 0;
 1527 
 1528 errout_close_irlap:
 1529 	irlap_close(idev->irlap);
 1530 errout_free_ring:
 1531 	vlsi_destroy_hwif(idev);
 1532 errout_irq:
 1533 	free_irq(ndev->irq,ndev);
 1534 errout_io:
 1535 	pci_release_regions(idev->pdev);
 1536 errout:
 1537 	return err;
 1538 }
 1539 
 1540 static int vlsi_close(struct net_device *ndev)
 1541 {
 1542 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 1543 
 1544 	netif_stop_queue(ndev);
 1545 
 1546 	if (idev->irlap)
 1547 		irlap_close(idev->irlap);
 1548 	idev->irlap = NULL;
 1549 
 1550 	vlsi_stop_hw(idev);
 1551 
 1552 	vlsi_destroy_hwif(idev);
 1553 
 1554 	free_irq(ndev->irq,ndev);
 1555 
 1556 	pci_release_regions(idev->pdev);
 1557 
 1558 	net_info_ratelimited("%s: device %s stopped\n", __func__, ndev->name);
 1559 
 1560 	return 0;
 1561 }
 1562 
 1563 static const struct net_device_ops vlsi_netdev_ops = {
 1564 	.ndo_open       = vlsi_open,
 1565 	.ndo_stop       = vlsi_close,
 1566 	.ndo_start_xmit = vlsi_hard_start_xmit,
 1567 	.ndo_do_ioctl   = vlsi_ioctl,
 1568 	.ndo_tx_timeout = vlsi_tx_timeout,
 1569 };
 1570 
 1571 static int vlsi_irda_init(struct net_device *ndev)
 1572 {
 1573 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
 1574 	struct pci_dev *pdev = idev->pdev;
 1575 
 1576 	ndev->irq = pdev->irq;
 1577 	ndev->base_addr = pci_resource_start(pdev,0);
 1578 
 1579 	/* PCI busmastering
 1580 	 * see include file for details why we need these 2 masks, in this order!
 1581 	 */
 1582 
 1583 	if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) ||
 1584 	    pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) {
 1585 		net_err_ratelimited("%s: aborting due to PCI BM-DMA address limitations\n",
 1586 				    __func__);
 1587 		return -1;
 1588 	}
 1589 
 1590 	irda_init_max_qos_capabilies(&idev->qos);
 1591 
 1592 	/* the VLSI82C147 does not support 576000! */
 1593 
 1594 	idev->qos.baud_rate.bits = IR_2400 | IR_9600
 1595 		| IR_19200 | IR_38400 | IR_57600 | IR_115200
 1596 		| IR_1152000 | (IR_4000000 << 8);
 1597 
 1598 	idev->qos.min_turn_time.bits = qos_mtt_bits;
 1599 
 1600 	irda_qos_bits_to_value(&idev->qos);
 1601 
 1602 	/* currently no public media definitions for IrDA */
 1603 
 1604 	ndev->flags |= IFF_PORTSEL | IFF_AUTOMEDIA;
 1605 	ndev->if_port = IF_PORT_UNKNOWN;
 1606  
 1607 	ndev->netdev_ops = &vlsi_netdev_ops;
 1608 	ndev->watchdog_timeo  = 500*HZ/1000;	/* max. allowed turn time for IrLAP */
 1609 
 1610 	SET_NETDEV_DEV(ndev, &pdev->dev);
 1611 
 1612 	return 0;
 1613 }	
 1614 
 1615 /**************************************************************/
 1616 
 1617 static int
 1618 vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 1619 {
 1620 	struct net_device	*ndev;
 1621 	vlsi_irda_dev_t		*idev;
 1622 
 1623 	if (pci_enable_device(pdev))
 1624 		goto out;
 1625 	else
 1626 		pdev->current_state = 0; /* hw must be running now */
 1627 
 1628 	net_info_ratelimited("%s: IrDA PCI controller %s detected\n",
 1629 			     drivername, pci_name(pdev));
 1630 
 1631 	if ( !pci_resource_start(pdev,0) ||
 1632 	     !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) {
 1633 		net_err_ratelimited("%s: bar 0 invalid", __func__);
 1634 		goto out_disable;
 1635 	}
 1636 
 1637 	ndev = alloc_irdadev(sizeof(*idev));
 1638 	if (ndev==NULL) {
 1639 		net_err_ratelimited("%s: Unable to allocate device memory.\n",
 1640 				    __func__);
 1641 		goto out_disable;
 1642 	}
 1643 
 1644 	idev = netdev_priv(ndev);
 1645 
 1646 	spin_lock_init(&idev->lock);
 1647 	mutex_init(&idev->mtx);
 1648 	mutex_lock(&idev->mtx);
 1649 	idev->pdev = pdev;
 1650 
 1651 	if (vlsi_irda_init(ndev) < 0)
 1652 		goto out_freedev;
 1653 
 1654 	if (register_netdev(ndev) < 0) {
 1655 		net_err_ratelimited("%s: register_netdev failed\n", __func__);
 1656 		goto out_freedev;
 1657 	}
 1658 
 1659 	if (vlsi_proc_root != NULL) {
 1660 		struct proc_dir_entry *ent;
 1661 
 1662 		ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO,
 1663 				       vlsi_proc_root, VLSI_PROC_FOPS, ndev);
 1664 		if (!ent) {
 1665 			net_warn_ratelimited("%s: failed to create proc entry\n",
 1666 					     __func__);
 1667 		} else {
 1668 			proc_set_size(ent, 0);
 1669 		}
 1670 		idev->proc_entry = ent;
 1671 	}
 1672 	net_info_ratelimited("%s: registered device %s\n",
 1673 			     drivername, ndev->name);
 1674 
 1675 	pci_set_drvdata(pdev, ndev);
 1676 	mutex_unlock(&idev->mtx);
 1677 
 1678 	return 0;
 1679 
 1680 out_freedev:
 1681 	mutex_unlock(&idev->mtx);
 1682 	free_netdev(ndev);
 1683 out_disable:
 1684 	pci_disable_device(pdev);
 1685 out:
 1686 	return -ENODEV;
 1687 }
 1688 
 1689 static void vlsi_irda_remove(struct pci_dev *pdev)
 1690 {
 1691 	struct net_device *ndev = pci_get_drvdata(pdev);
 1692 	vlsi_irda_dev_t *idev;
 1693 
 1694 	if (!ndev) {
 1695 		net_err_ratelimited("%s: lost netdevice?\n", drivername);
 1696 		return;
 1697 	}
 1698 
 1699 	unregister_netdev(ndev);
 1700 
 1701 	idev = netdev_priv(ndev);
 1702 	mutex_lock(&idev->mtx);
 1703 	if (idev->proc_entry) {
 1704 		remove_proc_entry(ndev->name, vlsi_proc_root);
 1705 		idev->proc_entry = NULL;
 1706 	}
 1707 	mutex_unlock(&idev->mtx);
 1708 
 1709 	free_netdev(ndev);
 1710 
 1711 	net_info_ratelimited("%s: %s removed\n", drivername, pci_name(pdev));
 1712 }
 1713 
 1714 #ifdef CONFIG_PM
 1715 
 1716 /* The Controller doesn't provide PCI PM capabilities as defined by PCI specs.
 1717  * Some of the Linux PCI-PM code however depends on this, for example in
 1718  * pci_set_power_state(). So we have to take care to perform the required
 1719  * operations on our own (particularly reflecting the pdev->current_state)
 1720  * otherwise we might get cheated by pci-pm.
 1721  */
 1722 
 1723 
 1724 static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
 1725 {
 1726 	struct net_device *ndev = pci_get_drvdata(pdev);
 1727 	vlsi_irda_dev_t *idev;
 1728 
 1729 	if (!ndev) {
 1730 		net_err_ratelimited("%s - %s: no netdevice\n",
 1731 				    __func__, pci_name(pdev));
 1732 		return 0;
 1733 	}
 1734 	idev = netdev_priv(ndev);
 1735 	mutex_lock(&idev->mtx);
 1736 	if (pdev->current_state != 0) {			/* already suspended */
 1737 		if (state.event > pdev->current_state) {	/* simply go deeper */
 1738 			pci_set_power_state(pdev, pci_choose_state(pdev, state));
 1739 			pdev->current_state = state.event;
 1740 		}
 1741 		else
 1742 			net_err_ratelimited("%s - %s: invalid suspend request %u -> %u\n",
 1743 					    __func__, pci_name(pdev),
 1744 					    pdev->current_state, state.event);
 1745 		mutex_unlock(&idev->mtx);
 1746 		return 0;
 1747 	}
 1748 
 1749 	if (netif_running(ndev)) {
 1750 		netif_device_detach(ndev);
 1751 		vlsi_stop_hw(idev);
 1752 		pci_save_state(pdev);
 1753 		if (!idev->new_baud)
 1754 			/* remember speed settings to restore on resume */
 1755 			idev->new_baud = idev->baud;
 1756 	}
 1757 
 1758 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
 1759 	pdev->current_state = state.event;
 1760 	idev->resume_ok = 1;
 1761 	mutex_unlock(&idev->mtx);
 1762 	return 0;
 1763 }
 1764 
 1765 static int vlsi_irda_resume(struct pci_dev *pdev)
 1766 {
 1767 	struct net_device *ndev = pci_get_drvdata(pdev);
 1768 	vlsi_irda_dev_t	*idev;
 1769 
 1770 	if (!ndev) {
 1771 		net_err_ratelimited("%s - %s: no netdevice\n",
 1772 				    __func__, pci_name(pdev));
 1773 		return 0;
 1774 	}
 1775 	idev = netdev_priv(ndev);
 1776 	mutex_lock(&idev->mtx);
 1777 	if (pdev->current_state == 0) {
 1778 		mutex_unlock(&idev->mtx);
 1779 		net_warn_ratelimited("%s - %s: already resumed\n",
 1780 				     __func__, pci_name(pdev));
 1781 		return 0;
 1782 	}
 1783 	
 1784 	pci_set_power_state(pdev, PCI_D0);
 1785 	pdev->current_state = PM_EVENT_ON;
 1786 
 1787 	if (!idev->resume_ok) {
 1788 		/* should be obsolete now - but used to happen due to:
 1789 		 * - pci layer initially setting pdev->current_state = 4 (unknown)
 1790 		 * - pci layer did not walk the save_state-tree (might be APM problem)
 1791 		 *   so we could not refuse to suspend from undefined state
 1792 		 * - vlsi_irda_suspend detected invalid state and refused to save
 1793 		 *   configuration for resume - but was too late to stop suspending
 1794 		 * - vlsi_irda_resume got screwed when trying to resume from garbage
 1795 		 *
 1796 		 * now we explicitly set pdev->current_state = 0 after enabling the
 1797 		 * device and independently resume_ok should catch any garbage config.
 1798 		 */
 1799 		net_warn_ratelimited("%s - hm, nothing to resume?\n", __func__);
 1800 		mutex_unlock(&idev->mtx);
 1801 		return 0;
 1802 	}
 1803 
 1804 	if (netif_running(ndev)) {
 1805 		pci_restore_state(pdev);
 1806 		vlsi_start_hw(idev);
 1807 		netif_device_attach(ndev);
 1808 	}
 1809 	idev->resume_ok = 0;
 1810 	mutex_unlock(&idev->mtx);
 1811 	return 0;
 1812 }
 1813 
 1814 #endif /* CONFIG_PM */
 1815 
 1816 /*********************************************************/
 1817 
 1818 static struct pci_driver vlsi_irda_driver = {
 1819 	.name		= drivername,
 1820 	.id_table	= vlsi_irda_table,
 1821 	.probe		= vlsi_irda_probe,
 1822 	.remove		= vlsi_irda_remove,
 1823 #ifdef CONFIG_PM
 1824 	.suspend	= vlsi_irda_suspend,
 1825 	.resume		= vlsi_irda_resume,
 1826 #endif
 1827 };
 1828 
 1829 #define PROC_DIR ("driver/" DRIVER_NAME)
 1830 
 1831 static int __init vlsi_mod_init(void)
 1832 {
 1833 	int	i, ret;
 1834 
 1835 	if (clksrc < 0  ||  clksrc > 3) {
 1836 		net_err_ratelimited("%s: invalid clksrc=%d\n",
 1837 				    drivername, clksrc);
 1838 		return -1;
 1839 	}
 1840 
 1841 	for (i = 0; i < 2; i++) {
 1842 		switch(ringsize[i]) {
 1843 			case 4:
 1844 			case 8:
 1845 			case 16:
 1846 			case 32:
 1847 			case 64:
 1848 				break;
 1849 			default:
 1850 				net_warn_ratelimited("%s: invalid %s ringsize %d, using default=8\n",
 1851 						     drivername,
 1852 						     i ? "rx" : "tx",
 1853 						     ringsize[i]);
 1854 				ringsize[i] = 8;
 1855 				break;
 1856 		}
 1857 	} 
 1858 
 1859 	sirpulse = !!sirpulse;
 1860 
 1861 	/* proc_mkdir returns NULL if !CONFIG_PROC_FS.
 1862 	 * Failure to create the procfs entry is handled like running
 1863 	 * without procfs - it's not required for the driver to work.
 1864 	 */
 1865 	vlsi_proc_root = proc_mkdir(PROC_DIR, NULL);
 1866 
 1867 	ret = pci_register_driver(&vlsi_irda_driver);
 1868 
 1869 	if (ret && vlsi_proc_root)
 1870 		remove_proc_entry(PROC_DIR, NULL);
 1871 	return ret;
 1872 
 1873 }
 1874 
 1875 static void __exit vlsi_mod_exit(void)
 1876 {
 1877 	pci_unregister_driver(&vlsi_irda_driver);
 1878 	if (vlsi_proc_root)
 1879 		remove_proc_entry(PROC_DIR, NULL);
 1880 }
 1881 
 1882 module_init(vlsi_mod_init);
 1883 module_exit(vlsi_mod_exit);
 1884 
 1885 
 1886 
 1887 
 1888 
 1889 /* LDV_COMMENT_BEGIN_MAIN */
 1890 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
 1891 
 1892 /*###########################################################################*/
 1893 
 1894 /*############## Driver Environment Generator 0.2 output ####################*/
 1895 
 1896 /*###########################################################################*/
 1897 
 1898 
 1899 
 1900 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
 1901 void ldv_check_final_state(void);
 1902 
 1903 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
 1904 void ldv_check_return_value(int res);
 1905 
 1906 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
 1907 void ldv_check_return_value_probe(int res);
 1908 
 1909 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
 1910 void ldv_initialize(void);
 1911 
 1912 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
 1913 void ldv_handler_precall(void);
 1914 
 1915 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
 1916 int nondet_int(void);
 1917 
 1918 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
 1919 int LDV_IN_INTERRUPT;
 1920 
 1921 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
 1922 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
 1923 
 1924 
 1925 
 1926 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
 1927 	/*============================= VARIABLE DECLARATION PART   =============================*/
 1928 	/** STRUCT: struct type: file_operations, struct name: vlsi_proc_fops **/
 1929 	/* content: static int vlsi_seq_open(struct inode *inode, struct file *file)*/
 1930 	/* LDV_COMMENT_BEGIN_PREP */
 1931 	#define DRIVER_NAME 		"vlsi_ir"
 1932 	#define DRIVER_VERSION		"v0.5"
 1933 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 1934 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 1935 	#ifdef CONFIG_PROC_FS
 1936 	/* LDV_COMMENT_END_PREP */
 1937 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_seq_open" */
 1938 	struct inode * var_group1;
 1939 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_seq_open" */
 1940 	struct file * var_group2;
 1941 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vlsi_seq_open" */
 1942 	static int res_vlsi_seq_open_6;
 1943 	/* LDV_COMMENT_BEGIN_PREP */
 1944 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 1945 	#else	
 1946 	#define VLSI_PROC_FOPS		NULL
 1947 	#endif
 1948 	#ifdef CONFIG_PM
 1949 	#endif 
 1950 	#ifdef CONFIG_PM
 1951 	#endif
 1952 	#define PROC_DIR ("driver/" DRIVER_NAME)
 1953 	/* LDV_COMMENT_END_PREP */
 1954 
 1955 	/** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/
 1956 	/* content: static int vlsi_open(struct net_device *ndev)*/
 1957 	/* LDV_COMMENT_BEGIN_PREP */
 1958 	#define DRIVER_NAME 		"vlsi_ir"
 1959 	#define DRIVER_VERSION		"v0.5"
 1960 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 1961 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 1962 	#ifdef CONFIG_PROC_FS
 1963 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 1964 	#else	
 1965 	#define VLSI_PROC_FOPS		NULL
 1966 	#endif
 1967 	/* LDV_COMMENT_END_PREP */
 1968 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_open" */
 1969 	struct net_device * var_group3;
 1970 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vlsi_open" */
 1971 	static int res_vlsi_open_29;
 1972 	/* LDV_COMMENT_BEGIN_PREP */
 1973 	#ifdef CONFIG_PM
 1974 	#endif 
 1975 	#ifdef CONFIG_PM
 1976 	#endif
 1977 	#define PROC_DIR ("driver/" DRIVER_NAME)
 1978 	/* LDV_COMMENT_END_PREP */
 1979 	/* content: static int vlsi_close(struct net_device *ndev)*/
 1980 	/* LDV_COMMENT_BEGIN_PREP */
 1981 	#define DRIVER_NAME 		"vlsi_ir"
 1982 	#define DRIVER_VERSION		"v0.5"
 1983 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 1984 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 1985 	#ifdef CONFIG_PROC_FS
 1986 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 1987 	#else	
 1988 	#define VLSI_PROC_FOPS		NULL
 1989 	#endif
 1990 	/* LDV_COMMENT_END_PREP */
 1991 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vlsi_close" */
 1992 	static int res_vlsi_close_30;
 1993 	/* LDV_COMMENT_BEGIN_PREP */
 1994 	#ifdef CONFIG_PM
 1995 	#endif 
 1996 	#ifdef CONFIG_PM
 1997 	#endif
 1998 	#define PROC_DIR ("driver/" DRIVER_NAME)
 1999 	/* LDV_COMMENT_END_PREP */
 2000 	/* content: static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)*/
 2001 	/* LDV_COMMENT_BEGIN_PREP */
 2002 	#define DRIVER_NAME 		"vlsi_ir"
 2003 	#define DRIVER_VERSION		"v0.5"
 2004 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2005 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2006 	#ifdef CONFIG_PROC_FS
 2007 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2008 	#else	
 2009 	#define VLSI_PROC_FOPS		NULL
 2010 	#endif
 2011 	/* LDV_COMMENT_END_PREP */
 2012 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_hard_start_xmit" */
 2013 	struct sk_buff * var_group4;
 2014 	/* LDV_COMMENT_BEGIN_PREP */
 2015 	#ifdef CONFIG_PM
 2016 	#endif 
 2017 	#ifdef CONFIG_PM
 2018 	#endif
 2019 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2020 	/* LDV_COMMENT_END_PREP */
 2021 	/* content: static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)*/
 2022 	/* LDV_COMMENT_BEGIN_PREP */
 2023 	#define DRIVER_NAME 		"vlsi_ir"
 2024 	#define DRIVER_VERSION		"v0.5"
 2025 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2026 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2027 	#ifdef CONFIG_PROC_FS
 2028 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2029 	#else	
 2030 	#define VLSI_PROC_FOPS		NULL
 2031 	#endif
 2032 	/* LDV_COMMENT_END_PREP */
 2033 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_ioctl" */
 2034 	struct ifreq * var_group5;
 2035 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_ioctl" */
 2036 	int  var_vlsi_ioctl_27_p2;
 2037 	/* LDV_COMMENT_BEGIN_PREP */
 2038 	#ifdef CONFIG_PM
 2039 	#endif 
 2040 	#ifdef CONFIG_PM
 2041 	#endif
 2042 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2043 	/* LDV_COMMENT_END_PREP */
 2044 	/* content: static void vlsi_tx_timeout(struct net_device *ndev)*/
 2045 	/* LDV_COMMENT_BEGIN_PREP */
 2046 	#define DRIVER_NAME 		"vlsi_ir"
 2047 	#define DRIVER_VERSION		"v0.5"
 2048 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2049 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2050 	#ifdef CONFIG_PROC_FS
 2051 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2052 	#else	
 2053 	#define VLSI_PROC_FOPS		NULL
 2054 	#endif
 2055 	/* LDV_COMMENT_END_PREP */
 2056 	/* LDV_COMMENT_BEGIN_PREP */
 2057 	#ifdef CONFIG_PM
 2058 	#endif 
 2059 	#ifdef CONFIG_PM
 2060 	#endif
 2061 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2062 	/* LDV_COMMENT_END_PREP */
 2063 
 2064 	/** STRUCT: struct type: pci_driver, struct name: vlsi_irda_driver **/
 2065 	/* content: static int vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)*/
 2066 	/* LDV_COMMENT_BEGIN_PREP */
 2067 	#define DRIVER_NAME 		"vlsi_ir"
 2068 	#define DRIVER_VERSION		"v0.5"
 2069 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2070 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2071 	#ifdef CONFIG_PROC_FS
 2072 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2073 	#else	
 2074 	#define VLSI_PROC_FOPS		NULL
 2075 	#endif
 2076 	/* LDV_COMMENT_END_PREP */
 2077 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_irda_probe" */
 2078 	struct pci_dev * var_group6;
 2079 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_irda_probe" */
 2080 	const struct pci_device_id * var_vlsi_irda_probe_32_p1;
 2081 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "vlsi_irda_probe" */
 2082 	static int res_vlsi_irda_probe_32;
 2083 	/* LDV_COMMENT_BEGIN_PREP */
 2084 	#ifdef CONFIG_PM
 2085 	#endif 
 2086 	#ifdef CONFIG_PM
 2087 	#endif
 2088 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2089 	/* LDV_COMMENT_END_PREP */
 2090 	/* content: static void vlsi_irda_remove(struct pci_dev *pdev)*/
 2091 	/* LDV_COMMENT_BEGIN_PREP */
 2092 	#define DRIVER_NAME 		"vlsi_ir"
 2093 	#define DRIVER_VERSION		"v0.5"
 2094 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2095 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2096 	#ifdef CONFIG_PROC_FS
 2097 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2098 	#else	
 2099 	#define VLSI_PROC_FOPS		NULL
 2100 	#endif
 2101 	/* LDV_COMMENT_END_PREP */
 2102 	/* LDV_COMMENT_BEGIN_PREP */
 2103 	#ifdef CONFIG_PM
 2104 	#endif 
 2105 	#ifdef CONFIG_PM
 2106 	#endif
 2107 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2108 	/* LDV_COMMENT_END_PREP */
 2109 	/* content: static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)*/
 2110 	/* LDV_COMMENT_BEGIN_PREP */
 2111 	#define DRIVER_NAME 		"vlsi_ir"
 2112 	#define DRIVER_VERSION		"v0.5"
 2113 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2114 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2115 	#ifdef CONFIG_PROC_FS
 2116 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2117 	#else	
 2118 	#define VLSI_PROC_FOPS		NULL
 2119 	#endif
 2120 	#ifdef CONFIG_PM
 2121 	/* LDV_COMMENT_END_PREP */
 2122 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_irda_suspend" */
 2123 	pm_message_t  var_vlsi_irda_suspend_34_p1;
 2124 	/* LDV_COMMENT_BEGIN_PREP */
 2125 	#endif 
 2126 	#ifdef CONFIG_PM
 2127 	#endif
 2128 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2129 	/* LDV_COMMENT_END_PREP */
 2130 	/* content: static int vlsi_irda_resume(struct pci_dev *pdev)*/
 2131 	/* LDV_COMMENT_BEGIN_PREP */
 2132 	#define DRIVER_NAME 		"vlsi_ir"
 2133 	#define DRIVER_VERSION		"v0.5"
 2134 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2135 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2136 	#ifdef CONFIG_PROC_FS
 2137 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2138 	#else	
 2139 	#define VLSI_PROC_FOPS		NULL
 2140 	#endif
 2141 	#ifdef CONFIG_PM
 2142 	/* LDV_COMMENT_END_PREP */
 2143 	/* LDV_COMMENT_BEGIN_PREP */
 2144 	#endif 
 2145 	#ifdef CONFIG_PM
 2146 	#endif
 2147 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2148 	/* LDV_COMMENT_END_PREP */
 2149 
 2150 	/** CALLBACK SECTION request_irq **/
 2151 	/* content: static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)*/
 2152 	/* LDV_COMMENT_BEGIN_PREP */
 2153 	#define DRIVER_NAME 		"vlsi_ir"
 2154 	#define DRIVER_VERSION		"v0.5"
 2155 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2156 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2157 	#ifdef CONFIG_PROC_FS
 2158 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2159 	#else	
 2160 	#define VLSI_PROC_FOPS		NULL
 2161 	#endif
 2162 	/* LDV_COMMENT_END_PREP */
 2163 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_interrupt" */
 2164 	int  var_vlsi_interrupt_28_p0;
 2165 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "vlsi_interrupt" */
 2166 	void * var_vlsi_interrupt_28_p1;
 2167 	/* LDV_COMMENT_BEGIN_PREP */
 2168 	#ifdef CONFIG_PM
 2169 	#endif 
 2170 	#ifdef CONFIG_PM
 2171 	#endif
 2172 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2173 	/* LDV_COMMENT_END_PREP */
 2174 
 2175 
 2176 
 2177 
 2178 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 2179 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 2180 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 2181 	LDV_IN_INTERRUPT=1;
 2182 
 2183 
 2184 
 2185 
 2186 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 2187 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 2188 	/*============================= FUNCTION CALL SECTION       =============================*/
 2189 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 2190 	ldv_initialize();
 2191 
 2192 	/** INIT: init_type: ST_MODULE_INIT **/
 2193 	/* content: static int __init vlsi_mod_init(void)*/
 2194 	/* LDV_COMMENT_BEGIN_PREP */
 2195 	#define DRIVER_NAME 		"vlsi_ir"
 2196 	#define DRIVER_VERSION		"v0.5"
 2197 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2198 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2199 	#ifdef CONFIG_PROC_FS
 2200 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2201 	#else	
 2202 	#define VLSI_PROC_FOPS		NULL
 2203 	#endif
 2204 	#ifdef CONFIG_PM
 2205 	#endif 
 2206 	#ifdef CONFIG_PM
 2207 	#endif
 2208 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2209 	/* LDV_COMMENT_END_PREP */
 2210 	/* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
 2211 	ldv_handler_precall();
 2212 	 if(vlsi_mod_init()) 
 2213 		goto ldv_final;
 2214 	int ldv_s_vlsi_proc_fops_file_operations = 0;
 2215 
 2216 	int ldv_s_vlsi_netdev_ops_net_device_ops = 0;
 2217 	
 2218 
 2219 	int ldv_s_vlsi_irda_driver_pci_driver = 0;
 2220 	
 2221 
 2222 	
 2223 
 2224 
 2225 	while(  nondet_int()
 2226 		|| !(ldv_s_vlsi_proc_fops_file_operations == 0)
 2227 		|| !(ldv_s_vlsi_netdev_ops_net_device_ops == 0)
 2228 		|| !(ldv_s_vlsi_irda_driver_pci_driver == 0)
 2229 	) {
 2230 
 2231 		switch(nondet_int()) {
 2232 
 2233 			case 0: {
 2234 
 2235 				/** STRUCT: struct type: file_operations, struct name: vlsi_proc_fops **/
 2236 				if(ldv_s_vlsi_proc_fops_file_operations==0) {
 2237 
 2238 				/* content: static int vlsi_seq_open(struct inode *inode, struct file *file)*/
 2239 				/* LDV_COMMENT_BEGIN_PREP */
 2240 				#define DRIVER_NAME 		"vlsi_ir"
 2241 				#define DRIVER_VERSION		"v0.5"
 2242 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2243 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2244 				#ifdef CONFIG_PROC_FS
 2245 				/* LDV_COMMENT_END_PREP */
 2246 				/* LDV_COMMENT_FUNCTION_CALL Function from field "open" from driver structure with callbacks "vlsi_proc_fops". Standart function test for correct return result. */
 2247 				ldv_handler_precall();
 2248 				res_vlsi_seq_open_6 = vlsi_seq_open( var_group1, var_group2);
 2249 				 ldv_check_return_value(res_vlsi_seq_open_6);
 2250 				 if(res_vlsi_seq_open_6) 
 2251 					goto ldv_module_exit;
 2252 				/* LDV_COMMENT_BEGIN_PREP */
 2253 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2254 				#else	
 2255 				#define VLSI_PROC_FOPS		NULL
 2256 				#endif
 2257 				#ifdef CONFIG_PM
 2258 				#endif 
 2259 				#ifdef CONFIG_PM
 2260 				#endif
 2261 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2262 				/* LDV_COMMENT_END_PREP */
 2263 				ldv_s_vlsi_proc_fops_file_operations=0;
 2264 
 2265 				}
 2266 
 2267 			}
 2268 
 2269 			break;
 2270 			case 1: {
 2271 
 2272 				/** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/
 2273 				if(ldv_s_vlsi_netdev_ops_net_device_ops==0) {
 2274 
 2275 				/* content: static int vlsi_open(struct net_device *ndev)*/
 2276 				/* LDV_COMMENT_BEGIN_PREP */
 2277 				#define DRIVER_NAME 		"vlsi_ir"
 2278 				#define DRIVER_VERSION		"v0.5"
 2279 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2280 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2281 				#ifdef CONFIG_PROC_FS
 2282 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2283 				#else	
 2284 				#define VLSI_PROC_FOPS		NULL
 2285 				#endif
 2286 				/* LDV_COMMENT_END_PREP */
 2287 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "vlsi_netdev_ops". Standart function test for correct return result. */
 2288 				ldv_handler_precall();
 2289 				res_vlsi_open_29 = vlsi_open( var_group3);
 2290 				 ldv_check_return_value(res_vlsi_open_29);
 2291 				 if(res_vlsi_open_29 < 0) 
 2292 					goto ldv_module_exit;
 2293 				/* LDV_COMMENT_BEGIN_PREP */
 2294 				#ifdef CONFIG_PM
 2295 				#endif 
 2296 				#ifdef CONFIG_PM
 2297 				#endif
 2298 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2299 				/* LDV_COMMENT_END_PREP */
 2300 				ldv_s_vlsi_netdev_ops_net_device_ops++;
 2301 
 2302 				}
 2303 
 2304 			}
 2305 
 2306 			break;
 2307 			case 2: {
 2308 
 2309 				/** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/
 2310 				if(ldv_s_vlsi_netdev_ops_net_device_ops==1) {
 2311 
 2312 				/* content: static int vlsi_close(struct net_device *ndev)*/
 2313 				/* LDV_COMMENT_BEGIN_PREP */
 2314 				#define DRIVER_NAME 		"vlsi_ir"
 2315 				#define DRIVER_VERSION		"v0.5"
 2316 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2317 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2318 				#ifdef CONFIG_PROC_FS
 2319 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2320 				#else	
 2321 				#define VLSI_PROC_FOPS		NULL
 2322 				#endif
 2323 				/* LDV_COMMENT_END_PREP */
 2324 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "vlsi_netdev_ops". Standart function test for correct return result. */
 2325 				ldv_handler_precall();
 2326 				res_vlsi_close_30 = vlsi_close( var_group3);
 2327 				 ldv_check_return_value(res_vlsi_close_30);
 2328 				 if(res_vlsi_close_30) 
 2329 					goto ldv_module_exit;
 2330 				/* LDV_COMMENT_BEGIN_PREP */
 2331 				#ifdef CONFIG_PM
 2332 				#endif 
 2333 				#ifdef CONFIG_PM
 2334 				#endif
 2335 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2336 				/* LDV_COMMENT_END_PREP */
 2337 				ldv_s_vlsi_netdev_ops_net_device_ops=0;
 2338 
 2339 				}
 2340 
 2341 			}
 2342 
 2343 			break;
 2344 			case 3: {
 2345 
 2346 				/** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/
 2347 				
 2348 
 2349 				/* content: static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)*/
 2350 				/* LDV_COMMENT_BEGIN_PREP */
 2351 				#define DRIVER_NAME 		"vlsi_ir"
 2352 				#define DRIVER_VERSION		"v0.5"
 2353 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2354 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2355 				#ifdef CONFIG_PROC_FS
 2356 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2357 				#else	
 2358 				#define VLSI_PROC_FOPS		NULL
 2359 				#endif
 2360 				/* LDV_COMMENT_END_PREP */
 2361 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_start_xmit" from driver structure with callbacks "vlsi_netdev_ops" */
 2362 				ldv_handler_precall();
 2363 				vlsi_hard_start_xmit( var_group4, var_group3);
 2364 				/* LDV_COMMENT_BEGIN_PREP */
 2365 				#ifdef CONFIG_PM
 2366 				#endif 
 2367 				#ifdef CONFIG_PM
 2368 				#endif
 2369 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2370 				/* LDV_COMMENT_END_PREP */
 2371 				
 2372 
 2373 				
 2374 
 2375 			}
 2376 
 2377 			break;
 2378 			case 4: {
 2379 
 2380 				/** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/
 2381 				
 2382 
 2383 				/* content: static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)*/
 2384 				/* LDV_COMMENT_BEGIN_PREP */
 2385 				#define DRIVER_NAME 		"vlsi_ir"
 2386 				#define DRIVER_VERSION		"v0.5"
 2387 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2388 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2389 				#ifdef CONFIG_PROC_FS
 2390 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2391 				#else	
 2392 				#define VLSI_PROC_FOPS		NULL
 2393 				#endif
 2394 				/* LDV_COMMENT_END_PREP */
 2395 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_do_ioctl" from driver structure with callbacks "vlsi_netdev_ops" */
 2396 				ldv_handler_precall();
 2397 				vlsi_ioctl( var_group3, var_group5, var_vlsi_ioctl_27_p2);
 2398 				/* LDV_COMMENT_BEGIN_PREP */
 2399 				#ifdef CONFIG_PM
 2400 				#endif 
 2401 				#ifdef CONFIG_PM
 2402 				#endif
 2403 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2404 				/* LDV_COMMENT_END_PREP */
 2405 				
 2406 
 2407 				
 2408 
 2409 			}
 2410 
 2411 			break;
 2412 			case 5: {
 2413 
 2414 				/** STRUCT: struct type: net_device_ops, struct name: vlsi_netdev_ops **/
 2415 				
 2416 
 2417 				/* content: static void vlsi_tx_timeout(struct net_device *ndev)*/
 2418 				/* LDV_COMMENT_BEGIN_PREP */
 2419 				#define DRIVER_NAME 		"vlsi_ir"
 2420 				#define DRIVER_VERSION		"v0.5"
 2421 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2422 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2423 				#ifdef CONFIG_PROC_FS
 2424 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2425 				#else	
 2426 				#define VLSI_PROC_FOPS		NULL
 2427 				#endif
 2428 				/* LDV_COMMENT_END_PREP */
 2429 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_tx_timeout" from driver structure with callbacks "vlsi_netdev_ops" */
 2430 				ldv_handler_precall();
 2431 				vlsi_tx_timeout( var_group3);
 2432 				/* LDV_COMMENT_BEGIN_PREP */
 2433 				#ifdef CONFIG_PM
 2434 				#endif 
 2435 				#ifdef CONFIG_PM
 2436 				#endif
 2437 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2438 				/* LDV_COMMENT_END_PREP */
 2439 				
 2440 
 2441 				
 2442 
 2443 			}
 2444 
 2445 			break;
 2446 			case 6: {
 2447 
 2448 				/** STRUCT: struct type: pci_driver, struct name: vlsi_irda_driver **/
 2449 				if(ldv_s_vlsi_irda_driver_pci_driver==0) {
 2450 
 2451 				/* content: static int vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)*/
 2452 				/* LDV_COMMENT_BEGIN_PREP */
 2453 				#define DRIVER_NAME 		"vlsi_ir"
 2454 				#define DRIVER_VERSION		"v0.5"
 2455 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2456 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2457 				#ifdef CONFIG_PROC_FS
 2458 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2459 				#else	
 2460 				#define VLSI_PROC_FOPS		NULL
 2461 				#endif
 2462 				/* LDV_COMMENT_END_PREP */
 2463 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "vlsi_irda_driver". Standart function test for correct return result. */
 2464 				res_vlsi_irda_probe_32 = vlsi_irda_probe( var_group6, var_vlsi_irda_probe_32_p1);
 2465 				 ldv_check_return_value(res_vlsi_irda_probe_32);
 2466 				 ldv_check_return_value_probe(res_vlsi_irda_probe_32);
 2467 				 if(res_vlsi_irda_probe_32) 
 2468 					goto ldv_module_exit;
 2469 				/* LDV_COMMENT_BEGIN_PREP */
 2470 				#ifdef CONFIG_PM
 2471 				#endif 
 2472 				#ifdef CONFIG_PM
 2473 				#endif
 2474 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2475 				/* LDV_COMMENT_END_PREP */
 2476 				ldv_s_vlsi_irda_driver_pci_driver++;
 2477 
 2478 				}
 2479 
 2480 			}
 2481 
 2482 			break;
 2483 			case 7: {
 2484 
 2485 				/** STRUCT: struct type: pci_driver, struct name: vlsi_irda_driver **/
 2486 				if(ldv_s_vlsi_irda_driver_pci_driver==1) {
 2487 
 2488 				/* content: static void vlsi_irda_remove(struct pci_dev *pdev)*/
 2489 				/* LDV_COMMENT_BEGIN_PREP */
 2490 				#define DRIVER_NAME 		"vlsi_ir"
 2491 				#define DRIVER_VERSION		"v0.5"
 2492 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2493 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2494 				#ifdef CONFIG_PROC_FS
 2495 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2496 				#else	
 2497 				#define VLSI_PROC_FOPS		NULL
 2498 				#endif
 2499 				/* LDV_COMMENT_END_PREP */
 2500 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "vlsi_irda_driver" */
 2501 				ldv_handler_precall();
 2502 				vlsi_irda_remove( var_group6);
 2503 				/* LDV_COMMENT_BEGIN_PREP */
 2504 				#ifdef CONFIG_PM
 2505 				#endif 
 2506 				#ifdef CONFIG_PM
 2507 				#endif
 2508 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2509 				/* LDV_COMMENT_END_PREP */
 2510 				ldv_s_vlsi_irda_driver_pci_driver=0;
 2511 
 2512 				}
 2513 
 2514 			}
 2515 
 2516 			break;
 2517 			case 8: {
 2518 
 2519 				/** STRUCT: struct type: pci_driver, struct name: vlsi_irda_driver **/
 2520 				
 2521 
 2522 				/* content: static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)*/
 2523 				/* LDV_COMMENT_BEGIN_PREP */
 2524 				#define DRIVER_NAME 		"vlsi_ir"
 2525 				#define DRIVER_VERSION		"v0.5"
 2526 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2527 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2528 				#ifdef CONFIG_PROC_FS
 2529 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2530 				#else	
 2531 				#define VLSI_PROC_FOPS		NULL
 2532 				#endif
 2533 				#ifdef CONFIG_PM
 2534 				/* LDV_COMMENT_END_PREP */
 2535 				/* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "vlsi_irda_driver" */
 2536 				ldv_handler_precall();
 2537 				vlsi_irda_suspend( var_group6, var_vlsi_irda_suspend_34_p1);
 2538 				/* LDV_COMMENT_BEGIN_PREP */
 2539 				#endif 
 2540 				#ifdef CONFIG_PM
 2541 				#endif
 2542 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2543 				/* LDV_COMMENT_END_PREP */
 2544 				
 2545 
 2546 				
 2547 
 2548 			}
 2549 
 2550 			break;
 2551 			case 9: {
 2552 
 2553 				/** STRUCT: struct type: pci_driver, struct name: vlsi_irda_driver **/
 2554 				
 2555 
 2556 				/* content: static int vlsi_irda_resume(struct pci_dev *pdev)*/
 2557 				/* LDV_COMMENT_BEGIN_PREP */
 2558 				#define DRIVER_NAME 		"vlsi_ir"
 2559 				#define DRIVER_VERSION		"v0.5"
 2560 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2561 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2562 				#ifdef CONFIG_PROC_FS
 2563 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2564 				#else	
 2565 				#define VLSI_PROC_FOPS		NULL
 2566 				#endif
 2567 				#ifdef CONFIG_PM
 2568 				/* LDV_COMMENT_END_PREP */
 2569 				/* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "vlsi_irda_driver" */
 2570 				ldv_handler_precall();
 2571 				vlsi_irda_resume( var_group6);
 2572 				/* LDV_COMMENT_BEGIN_PREP */
 2573 				#endif 
 2574 				#ifdef CONFIG_PM
 2575 				#endif
 2576 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2577 				/* LDV_COMMENT_END_PREP */
 2578 				
 2579 
 2580 				
 2581 
 2582 			}
 2583 
 2584 			break;
 2585 			case 10: {
 2586 
 2587 				/** CALLBACK SECTION request_irq **/
 2588 				LDV_IN_INTERRUPT=2;
 2589 
 2590 				/* content: static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)*/
 2591 				/* LDV_COMMENT_BEGIN_PREP */
 2592 				#define DRIVER_NAME 		"vlsi_ir"
 2593 				#define DRIVER_VERSION		"v0.5"
 2594 				#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2595 				#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2596 				#ifdef CONFIG_PROC_FS
 2597 				#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2598 				#else	
 2599 				#define VLSI_PROC_FOPS		NULL
 2600 				#endif
 2601 				/* LDV_COMMENT_END_PREP */
 2602 				/* LDV_COMMENT_FUNCTION_CALL */
 2603 				ldv_handler_precall();
 2604 				vlsi_interrupt( var_vlsi_interrupt_28_p0, var_vlsi_interrupt_28_p1);
 2605 				/* LDV_COMMENT_BEGIN_PREP */
 2606 				#ifdef CONFIG_PM
 2607 				#endif 
 2608 				#ifdef CONFIG_PM
 2609 				#endif
 2610 				#define PROC_DIR ("driver/" DRIVER_NAME)
 2611 				/* LDV_COMMENT_END_PREP */
 2612 				LDV_IN_INTERRUPT=1;
 2613 
 2614 				
 2615 
 2616 			}
 2617 
 2618 			break;
 2619 			default: break;
 2620 
 2621 		}
 2622 
 2623 	}
 2624 
 2625 	ldv_module_exit: 
 2626 
 2627 	/** INIT: init_type: ST_MODULE_EXIT **/
 2628 	/* content: static void __exit vlsi_mod_exit(void)*/
 2629 	/* LDV_COMMENT_BEGIN_PREP */
 2630 	#define DRIVER_NAME 		"vlsi_ir"
 2631 	#define DRIVER_VERSION		"v0.5"
 2632 	#define DRIVER_DESCRIPTION	"IrDA SIR/MIR/FIR driver for VLSI 82C147"
 2633 	#define DRIVER_AUTHOR		"Martin Diehl <info@mdiehl.de>"
 2634 	#ifdef CONFIG_PROC_FS
 2635 	#define VLSI_PROC_FOPS		(&vlsi_proc_fops)
 2636 	#else	
 2637 	#define VLSI_PROC_FOPS		NULL
 2638 	#endif
 2639 	#ifdef CONFIG_PM
 2640 	#endif 
 2641 	#ifdef CONFIG_PM
 2642 	#endif
 2643 	#define PROC_DIR ("driver/" DRIVER_NAME)
 2644 	/* LDV_COMMENT_END_PREP */
 2645 	/* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
 2646 	ldv_handler_precall();
 2647 	vlsi_mod_exit();
 2648 
 2649 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 2650 	ldv_final: ldv_check_final_state();
 2651 
 2652 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 2653 	return;
 2654 
 2655 }
 2656 #endif
 2657 
 2658 /* LDV_COMMENT_END_MAIN */
 2659 
 2660 #line 10 "/home/vitaly/ldv-launches/work/current--X--drivers--X--defaultlinux-4.11-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.11-rc1.tar.xz/csd_deg_dscv/4878/dscv_tempdir/dscv/ri/331_1a/drivers/net/irda/vlsi_ir.o.c.prepared"                 1 
    2 #include <verifier/rcv.h>
    3 #include <kernel-model/ERR.inc>
    4 
    5 int LDV_DMA_MAP_CALLS = 0;
    6 
    7 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_page') maps page */
    8 void ldv_dma_map_page(void) {
    9  /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked */
   10  ldv_assert(LDV_DMA_MAP_CALLS == 0);
   11  /* LDV_COMMENT_CHANGE_STATE Increase dma_mapping counter */
   12  LDV_DMA_MAP_CALLS++;
   13 }
   14 
   15 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_mapping_error') unmaps page */
   16 void ldv_dma_mapping_error(void) {
   17  /* LDV_COMMENT_ASSERT No dma_mapping calls to verify */
   18  ldv_assert(LDV_DMA_MAP_CALLS != 0);
   19  /* LDV_COMMENT_CHANGE_STATE Check that previos dma_mapping call was checked */
   20  LDV_DMA_MAP_CALLS--;
   21 }
   22 
   23 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all module reference counters have their initial values at the end */
   24 void ldv_check_final_state(void) {
   25  /* LDV_COMMENT_ASSERT All incremented module reference counters should be decremented before module unloading*/
   26  ldv_assert(LDV_DMA_MAP_CALLS == 0);
   27 }                 1 #ifndef _LDV_RCV_H_
    2 #define _LDV_RCV_H_
    3 
    4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
    5    label like the standard assert(). */
    6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
    7 
    8 /* The error label wrapper. It is used because of some static verifiers (like
    9    BLAST) don't accept multiple error labels through a program. */
   10 static inline void ldv_error(void)
   11 {
   12   LDV_ERROR: goto LDV_ERROR;
   13 }
   14 
   15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
   16    avoided by verifiers. */
   17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
   18 
   19 /* Infinite loop, that causes verifiers to skip such paths. */
   20 static inline void ldv_stop(void) {
   21   LDV_STOP: goto LDV_STOP;
   22 }
   23 
   24 /* Special nondeterministic functions. */
   25 int ldv_undef_int(void);
   26 void *ldv_undef_ptr(void);
   27 unsigned long ldv_undef_ulong(void);
   28 long ldv_undef_long(void);
   29 /* Return nondeterministic negative integer number. */
   30 static inline int ldv_undef_int_negative(void)
   31 {
   32   int ret = ldv_undef_int();
   33 
   34   ldv_assume(ret < 0);
   35 
   36   return ret;
   37 }
   38 /* Return nondeterministic nonpositive integer number. */
   39 static inline int ldv_undef_int_nonpositive(void)
   40 {
   41   int ret = ldv_undef_int();
   42 
   43   ldv_assume(ret <= 0);
   44 
   45   return ret;
   46 }
   47 
   48 /* Add explicit model for __builin_expect GCC function. Without the model a
   49    return value will be treated as nondetermined by verifiers. */
   50 static inline long __builtin_expect(long exp, long c)
   51 {
   52   return exp;
   53 }
   54 
   55 /* This function causes the program to exit abnormally. GCC implements this
   56 function by using a target-dependent mechanism (such as intentionally executing
   57 an illegal instruction) or by calling abort. The mechanism used may vary from
   58 release to release so you should not rely on any particular implementation.
   59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
   60 static inline void __builtin_trap(void)
   61 {
   62   ldv_assert(0);
   63 }
   64 
   65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
   66 #define LDV_PTR_MAX 2012
   67 
   68 #endif /* _LDV_RCV_H_ */                 1 #ifndef __LINUX_COMPILER_H
    2 #define __LINUX_COMPILER_H
    3 
    4 #ifndef __ASSEMBLY__
    5 
    6 #ifdef __CHECKER__
    7 # define __user		__attribute__((noderef, address_space(1)))
    8 # define __kernel	__attribute__((address_space(0)))
    9 # define __safe		__attribute__((safe))
   10 # define __force	__attribute__((force))
   11 # define __nocast	__attribute__((nocast))
   12 # define __iomem	__attribute__((noderef, address_space(2)))
   13 # define __must_hold(x)	__attribute__((context(x,1,1)))
   14 # define __acquires(x)	__attribute__((context(x,0,1)))
   15 # define __releases(x)	__attribute__((context(x,1,0)))
   16 # define __acquire(x)	__context__(x,1)
   17 # define __release(x)	__context__(x,-1)
   18 # define __cond_lock(x,c)	((c) ? ({ __acquire(x); 1; }) : 0)
   19 # define __percpu	__attribute__((noderef, address_space(3)))
   20 #ifdef CONFIG_SPARSE_RCU_POINTER
   21 # define __rcu		__attribute__((noderef, address_space(4)))
   22 #else /* CONFIG_SPARSE_RCU_POINTER */
   23 # define __rcu
   24 #endif /* CONFIG_SPARSE_RCU_POINTER */
   25 # define __private	__attribute__((noderef))
   26 extern void __chk_user_ptr(const volatile void __user *);
   27 extern void __chk_io_ptr(const volatile void __iomem *);
   28 # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
   29 #else /* __CHECKER__ */
   30 # ifdef STRUCTLEAK_PLUGIN
   31 #  define __user __attribute__((user))
   32 # else
   33 #  define __user
   34 # endif
   35 # define __kernel
   36 # define __safe
   37 # define __force
   38 # define __nocast
   39 # define __iomem
   40 # define __chk_user_ptr(x) (void)0
   41 # define __chk_io_ptr(x) (void)0
   42 # define __builtin_warning(x, y...) (1)
   43 # define __must_hold(x)
   44 # define __acquires(x)
   45 # define __releases(x)
   46 # define __acquire(x) (void)0
   47 # define __release(x) (void)0
   48 # define __cond_lock(x,c) (c)
   49 # define __percpu
   50 # define __rcu
   51 # define __private
   52 # define ACCESS_PRIVATE(p, member) ((p)->member)
   53 #endif /* __CHECKER__ */
   54 
   55 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
   56 #define ___PASTE(a,b) a##b
   57 #define __PASTE(a,b) ___PASTE(a,b)
   58 
   59 #ifdef __KERNEL__
   60 
   61 #ifdef __GNUC__
   62 #include <linux/compiler-gcc.h>
   63 #endif
   64 
   65 #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
   66 #define notrace __attribute__((hotpatch(0,0)))
   67 #else
   68 #define notrace __attribute__((no_instrument_function))
   69 #endif
   70 
   71 /* Intel compiler defines __GNUC__. So we will overwrite implementations
   72  * coming from above header files here
   73  */
   74 #ifdef __INTEL_COMPILER
   75 # include <linux/compiler-intel.h>
   76 #endif
   77 
   78 /* Clang compiler defines __GNUC__. So we will overwrite implementations
   79  * coming from above header files here
   80  */
   81 #ifdef __clang__
   82 #include <linux/compiler-clang.h>
   83 #endif
   84 
   85 /*
   86  * Generic compiler-dependent macros required for kernel
   87  * build go below this comment. Actual compiler/compiler version
   88  * specific implementations come from the above header files
   89  */
   90 
   91 struct ftrace_branch_data {
   92 	const char *func;
   93 	const char *file;
   94 	unsigned line;
   95 	union {
   96 		struct {
   97 			unsigned long correct;
   98 			unsigned long incorrect;
   99 		};
  100 		struct {
  101 			unsigned long miss;
  102 			unsigned long hit;
  103 		};
  104 		unsigned long miss_hit[2];
  105 	};
  106 };
  107 
  108 struct ftrace_likely_data {
  109 	struct ftrace_branch_data	data;
  110 	unsigned long			constant;
  111 };
  112 
  113 /*
  114  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
  115  * to disable branch tracing on a per file basis.
  116  */
  117 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
  118     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
  119 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
  120 			  int expect, int is_constant);
  121 
  122 #define likely_notrace(x)	__builtin_expect(!!(x), 1)
  123 #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
  124 
  125 #define __branch_check__(x, expect, is_constant) ({			\
  126 			int ______r;					\
  127 			static struct ftrace_likely_data		\
  128 				__attribute__((__aligned__(4)))		\
  129 				__attribute__((section("_ftrace_annotated_branch"))) \
  130 				______f = {				\
  131 				.data.func = __func__,			\
  132 				.data.file = __FILE__,			\
  133 				.data.line = __LINE__,			\
  134 			};						\
  135 			______r = __builtin_expect(!!(x), expect);	\
  136 			ftrace_likely_update(&______f, ______r,		\
  137 					     expect, is_constant);	\
  138 			______r;					\
  139 		})
  140 
  141 /*
  142  * Using __builtin_constant_p(x) to ignore cases where the return
  143  * value is always the same.  This idea is taken from a similar patch
  144  * written by Daniel Walker.
  145  */
  146 # ifndef likely
  147 #  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
  148 # endif
  149 # ifndef unlikely
  150 #  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
  151 # endif
  152 
  153 #ifdef CONFIG_PROFILE_ALL_BRANCHES
  154 /*
  155  * "Define 'is'", Bill Clinton
  156  * "Define 'if'", Steven Rostedt
  157  */
  158 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
  159 #define __trace_if(cond) \
  160 	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
  161 	({								\
  162 		int ______r;						\
  163 		static struct ftrace_branch_data			\
  164 			__attribute__((__aligned__(4)))			\
  165 			__attribute__((section("_ftrace_branch")))	\
  166 			______f = {					\
  167 				.func = __func__,			\
  168 				.file = __FILE__,			\
  169 				.line = __LINE__,			\
  170 			};						\
  171 		______r = !!(cond);					\
  172 		______f.miss_hit[______r]++;					\
  173 		______r;						\
  174 	}))
  175 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
  176 
  177 #else
  178 # define likely(x)	__builtin_expect(!!(x), 1)
  179 # define unlikely(x)	__builtin_expect(!!(x), 0)
  180 #endif
  181 
  182 /* Optimization barrier */
  183 #ifndef barrier
  184 # define barrier() __memory_barrier()
  185 #endif
  186 
  187 #ifndef barrier_data
  188 # define barrier_data(ptr) barrier()
  189 #endif
  190 
  191 /* Unreachable code */
  192 #ifndef unreachable
  193 # define unreachable() do { } while (1)
  194 #endif
  195 
  196 /*
  197  * KENTRY - kernel entry point
  198  * This can be used to annotate symbols (functions or data) that are used
  199  * without their linker symbol being referenced explicitly. For example,
  200  * interrupt vector handlers, or functions in the kernel image that are found
  201  * programatically.
  202  *
  203  * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
  204  * are handled in their own way (with KEEP() in linker scripts).
  205  *
  206  * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
  207  * linker script. For example an architecture could KEEP() its entire
  208  * boot/exception vector code rather than annotate each function and data.
  209  */
  210 #ifndef KENTRY
  211 # define KENTRY(sym)						\
  212 	extern typeof(sym) sym;					\
  213 	static const unsigned long __kentry_##sym		\
  214 	__used							\
  215 	__attribute__((section("___kentry" "+" #sym ), used))	\
  216 	= (unsigned long)&sym;
  217 #endif
  218 
  219 #ifndef RELOC_HIDE
  220 # define RELOC_HIDE(ptr, off)					\
  221   ({ unsigned long __ptr;					\
  222      __ptr = (unsigned long) (ptr);				\
  223     (typeof(ptr)) (__ptr + (off)); })
  224 #endif
  225 
  226 #ifndef OPTIMIZER_HIDE_VAR
  227 #define OPTIMIZER_HIDE_VAR(var) barrier()
  228 #endif
  229 
  230 /* Not-quite-unique ID. */
  231 #ifndef __UNIQUE_ID
  232 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
  233 #endif
  234 
  235 #include <uapi/linux/types.h>
  236 
  237 #define __READ_ONCE_SIZE						\
  238 ({									\
  239 	switch (size) {							\
  240 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
  241 	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
  242 	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
  243 	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
  244 	default:							\
  245 		barrier();						\
  246 		__builtin_memcpy((void *)res, (const void *)p, size);	\
  247 		barrier();						\
  248 	}								\
  249 })
  250 
  251 static __always_inline
  252 void __read_once_size(const volatile void *p, void *res, int size)
  253 {
  254 	__READ_ONCE_SIZE;
  255 }
  256 
  257 #ifdef CONFIG_KASAN
  258 /*
  259  * This function is not 'inline' because __no_sanitize_address confilcts
  260  * with inlining. Attempt to inline it may cause a build failure.
  261  * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
  262  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
  263  */
  264 static __no_sanitize_address __maybe_unused
  265 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
  266 {
  267 	__READ_ONCE_SIZE;
  268 }
  269 #else
  270 static __always_inline
  271 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
  272 {
  273 	__READ_ONCE_SIZE;
  274 }
  275 #endif
  276 
  277 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
  278 {
  279 	switch (size) {
  280 	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
  281 	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
  282 	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
  283 	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
  284 	default:
  285 		barrier();
  286 		__builtin_memcpy((void *)p, (const void *)res, size);
  287 		barrier();
  288 	}
  289 }
  290 
  291 /*
  292  * Prevent the compiler from merging or refetching reads or writes. The
  293  * compiler is also forbidden from reordering successive instances of
  294  * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
  295  * compiler is aware of some particular ordering.  One way to make the
  296  * compiler aware of ordering is to put the two invocations of READ_ONCE,
  297  * WRITE_ONCE or ACCESS_ONCE() in different C statements.
  298  *
  299  * In contrast to ACCESS_ONCE these two macros will also work on aggregate
  300  * data types like structs or unions. If the size of the accessed data
  301  * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
  302  * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
  303  * least two memcpy()s: one for the __builtin_memcpy() and then one for
  304  * the macro doing the copy of variable - '__u' allocated on the stack.
  305  *
  306  * Their two major use cases are: (1) Mediating communication between
  307  * process-level code and irq/NMI handlers, all running on the same CPU,
  308  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
  309  * mutilate accesses that either do not require ordering or that interact
  310  * with an explicit memory barrier or atomic instruction that provides the
  311  * required ordering.
  312  */
  313 
  314 #define __READ_ONCE(x, check)						\
  315 ({									\
  316 	union { typeof(x) __val; char __c[1]; } __u;			\
  317 	if (check)							\
  318 		__read_once_size(&(x), __u.__c, sizeof(x));		\
  319 	else								\
  320 		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
  321 	__u.__val;							\
  322 })
  323 #define READ_ONCE(x) __READ_ONCE(x, 1)
  324 
  325 /*
  326  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
  327  * to hide memory access from KASAN.
  328  */
  329 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
  330 
  331 #define WRITE_ONCE(x, val) \
  332 ({							\
  333 	union { typeof(x) __val; char __c[1]; } __u =	\
  334 		{ .__val = (__force typeof(x)) (val) }; \
  335 	__write_once_size(&(x), __u.__c, sizeof(x));	\
  336 	__u.__val;					\
  337 })
  338 
  339 #endif /* __KERNEL__ */
  340 
  341 #endif /* __ASSEMBLY__ */
  342 
  343 #ifdef __KERNEL__
  344 /*
  345  * Allow us to mark functions as 'deprecated' and have gcc emit a nice
  346  * warning for each use, in hopes of speeding the functions removal.
  347  * Usage is:
  348  * 		int __deprecated foo(void)
  349  */
  350 #ifndef __deprecated
  351 # define __deprecated		/* unimplemented */
  352 #endif
  353 
  354 #ifdef MODULE
  355 #define __deprecated_for_modules __deprecated
  356 #else
  357 #define __deprecated_for_modules
  358 #endif
  359 
  360 #ifndef __must_check
  361 #define __must_check
  362 #endif
  363 
  364 #ifndef CONFIG_ENABLE_MUST_CHECK
  365 #undef __must_check
  366 #define __must_check
  367 #endif
  368 #ifndef CONFIG_ENABLE_WARN_DEPRECATED
  369 #undef __deprecated
  370 #undef __deprecated_for_modules
  371 #define __deprecated
  372 #define __deprecated_for_modules
  373 #endif
  374 
  375 #ifndef __malloc
  376 #define __malloc
  377 #endif
  378 
  379 /*
  380  * Allow us to avoid 'defined but not used' warnings on functions and data,
  381  * as well as force them to be emitted to the assembly file.
  382  *
  383  * As of gcc 3.4, static functions that are not marked with attribute((used))
  384  * may be elided from the assembly file.  As of gcc 3.4, static data not so
  385  * marked will not be elided, but this may change in a future gcc version.
  386  *
  387  * NOTE: Because distributions shipped with a backported unit-at-a-time
  388  * compiler in gcc 3.3, we must define __used to be __attribute__((used))
  389  * for gcc >=3.3 instead of 3.4.
  390  *
  391  * In prior versions of gcc, such functions and data would be emitted, but
  392  * would be warned about except with attribute((unused)).
  393  *
  394  * Mark functions that are referenced only in inline assembly as __used so
  395  * the code is emitted even though it appears to be unreferenced.
  396  */
  397 #ifndef __used
  398 # define __used			/* unimplemented */
  399 #endif
  400 
  401 #ifndef __maybe_unused
  402 # define __maybe_unused		/* unimplemented */
  403 #endif
  404 
  405 #ifndef __always_unused
  406 # define __always_unused	/* unimplemented */
  407 #endif
  408 
  409 #ifndef noinline
  410 #define noinline
  411 #endif
  412 
  413 /*
  414  * Rather then using noinline to prevent stack consumption, use
  415  * noinline_for_stack instead.  For documentation reasons.
  416  */
  417 #define noinline_for_stack noinline
  418 
  419 #ifndef __always_inline
  420 #define __always_inline inline
  421 #endif
  422 
  423 #endif /* __KERNEL__ */
  424 
  425 /*
  426  * From the GCC manual:
  427  *
  428  * Many functions do not examine any values except their arguments,
  429  * and have no effects except the return value.  Basically this is
  430  * just slightly more strict class than the `pure' attribute above,
  431  * since function is not allowed to read global memory.
  432  *
  433  * Note that a function that has pointer arguments and examines the
  434  * data pointed to must _not_ be declared `const'.  Likewise, a
  435  * function that calls a non-`const' function usually must not be
  436  * `const'.  It does not make sense for a `const' function to return
  437  * `void'.
  438  */
  439 #ifndef __attribute_const__
  440 # define __attribute_const__	/* unimplemented */
  441 #endif
  442 
  443 #ifndef __latent_entropy
  444 # define __latent_entropy
  445 #endif
  446 
  447 /*
  448  * Tell gcc if a function is cold. The compiler will assume any path
  449  * directly leading to the call is unlikely.
  450  */
  451 
  452 #ifndef __cold
  453 #define __cold
  454 #endif
  455 
  456 /* Simple shorthand for a section definition */
  457 #ifndef __section
  458 # define __section(S) __attribute__ ((__section__(#S)))
  459 #endif
  460 
  461 #ifndef __visible
  462 #define __visible
  463 #endif
  464 
  465 /*
  466  * Assume alignment of return value.
  467  */
  468 #ifndef __assume_aligned
  469 #define __assume_aligned(a, ...)
  470 #endif
  471 
  472 
  473 /* Are two types/vars the same type (ignoring qualifiers)? */
  474 #ifndef __same_type
  475 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
  476 #endif
  477 
  478 /* Is this type a native word size -- useful for atomic operations */
  479 #ifndef __native_word
  480 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
  481 #endif
  482 
  483 /* Compile time object size, -1 for unknown */
  484 #ifndef __compiletime_object_size
  485 # define __compiletime_object_size(obj) -1
  486 #endif
  487 #ifndef __compiletime_warning
  488 # define __compiletime_warning(message)
  489 #endif
  490 #ifndef __compiletime_error
  491 # define __compiletime_error(message)
  492 /*
  493  * Sparse complains of variable sized arrays due to the temporary variable in
  494  * __compiletime_assert. Unfortunately we can't just expand it out to make
  495  * sparse see a constant array size without breaking compiletime_assert on old
  496  * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
  497  */
  498 # ifndef __CHECKER__
  499 #  define __compiletime_error_fallback(condition) \
  500 	do {  } while (0)
  501 # endif
  502 #endif
  503 #ifndef __compiletime_error_fallback
  504 # define __compiletime_error_fallback(condition) do { } while (0)
  505 #endif
  506 
  507 #define __compiletime_assert(condition, msg, prefix, suffix)		\
  508 	do {								\
  509 		bool __cond = !(condition);				\
  510 		extern void prefix ## suffix(void) __compiletime_error(msg); \
  511 		if (__cond)						\
  512 			prefix ## suffix();				\
  513 		__compiletime_error_fallback(__cond);			\
  514 	} while (0)
  515 
  516 #define _compiletime_assert(condition, msg, prefix, suffix) \
  517 	__compiletime_assert(condition, msg, prefix, suffix)
  518 
  519 /**
  520  * compiletime_assert - break build and emit msg if condition is false
  521  * @condition: a compile-time constant condition to check
  522  * @msg:       a message to emit if condition is false
  523  *
  524  * In tradition of POSIX assert, this macro will break the build if the
  525  * supplied condition is *false*, emitting the supplied error message if the
  526  * compiler has support to do so.
  527  */
  528 #define compiletime_assert(condition, msg) \
  529 	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
  530 
  531 #define compiletime_assert_atomic_type(t)				\
  532 	compiletime_assert(__native_word(t),				\
  533 		"Need native word sized stores/loads for atomicity.")
  534 
  535 /*
  536  * Prevent the compiler from merging or refetching accesses.  The compiler
  537  * is also forbidden from reordering successive instances of ACCESS_ONCE(),
  538  * but only when the compiler is aware of some particular ordering.  One way
  539  * to make the compiler aware of ordering is to put the two invocations of
  540  * ACCESS_ONCE() in different C statements.
  541  *
  542  * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
  543  * on a union member will work as long as the size of the member matches the
  544  * size of the union and the size is smaller than word size.
  545  *
  546  * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
  547  * between process-level code and irq/NMI handlers, all running on the same CPU,
  548  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
  549  * mutilate accesses that either do not require ordering or that interact
  550  * with an explicit memory barrier or atomic instruction that provides the
  551  * required ordering.
  552  *
  553  * If possible use READ_ONCE()/WRITE_ONCE() instead.
  554  */
  555 #define __ACCESS_ONCE(x) ({ \
  556 	 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
  557 	(volatile typeof(x) *)&(x); })
  558 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
  559 
  560 /**
  561  * lockless_dereference() - safely load a pointer for later dereference
  562  * @p: The pointer to load
  563  *
  564  * Similar to rcu_dereference(), but for situations where the pointed-to
  565  * object's lifetime is managed by something other than RCU.  That
  566  * "something other" might be reference counting or simple immortality.
  567  *
  568  * The seemingly unused variable ___typecheck_p validates that @p is
  569  * indeed a pointer type by using a pointer to typeof(*p) as the type.
  570  * Taking a pointer to typeof(*p) again is needed in case p is void *.
  571  */
  572 #define lockless_dereference(p) \
  573 ({ \
  574 	typeof(p) _________p1 = READ_ONCE(p); \
  575 	typeof(*(p)) *___typecheck_p __maybe_unused; \
  576 	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
  577 	(_________p1); \
  578 })
  579 
  580 #endif /* __LINUX_COMPILER_H */                 1 #ifndef _LINUX_DMA_MAPPING_H
    2 #define _LINUX_DMA_MAPPING_H
    3 
    4 #include <linux/sizes.h>
    5 #include <linux/string.h>
    6 #include <linux/device.h>
    7 #include <linux/err.h>
    8 #include <linux/dma-debug.h>
    9 #include <linux/dma-direction.h>
   10 #include <linux/scatterlist.h>
   11 #include <linux/kmemcheck.h>
   12 #include <linux/bug.h>
   13 
   14 /**
   15  * List of possible attributes associated with a DMA mapping. The semantics
   16  * of each attribute should be defined in Documentation/DMA-attributes.txt.
   17  *
   18  * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
   19  * forces all pending DMA writes to complete.
   20  */
   21 #define DMA_ATTR_WRITE_BARRIER		(1UL << 0)
   22 /*
   23  * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
   24  * may be weakly ordered, that is that reads and writes may pass each other.
   25  */
   26 #define DMA_ATTR_WEAK_ORDERING		(1UL << 1)
   27 /*
   28  * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
   29  * buffered to improve performance.
   30  */
   31 #define DMA_ATTR_WRITE_COMBINE		(1UL << 2)
   32 /*
   33  * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
   34  * consistent or non-consistent memory as it sees fit.
   35  */
   36 #define DMA_ATTR_NON_CONSISTENT		(1UL << 3)
   37 /*
   38  * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
   39  * virtual mapping for the allocated buffer.
   40  */
   41 #define DMA_ATTR_NO_KERNEL_MAPPING	(1UL << 4)
   42 /*
   43  * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
   44  * the CPU cache for the given buffer assuming that it has been already
   45  * transferred to 'device' domain.
   46  */
   47 #define DMA_ATTR_SKIP_CPU_SYNC		(1UL << 5)
   48 /*
   49  * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
   50  * in physical memory.
   51  */
   52 #define DMA_ATTR_FORCE_CONTIGUOUS	(1UL << 6)
   53 /*
   54  * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
   55  * that it's probably not worth the time to try to allocate memory to in a way
   56  * that gives better TLB efficiency.
   57  */
   58 #define DMA_ATTR_ALLOC_SINGLE_PAGES	(1UL << 7)
   59 /*
   60  * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
   61  * allocation failure reports (similarly to __GFP_NOWARN).
   62  */
   63 #define DMA_ATTR_NO_WARN	(1UL << 8)
   64 
   65 /*
   66  * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
   67  * accessible at an elevated privilege level (and ideally inaccessible or
   68  * at least read-only at lesser-privileged levels).
   69  */
   70 #define DMA_ATTR_PRIVILEGED		(1UL << 9)
   71 
   72 /*
   73  * A dma_addr_t can hold any valid DMA or bus address for the platform.
   74  * It can be given to a device to use as a DMA source or target.  A CPU cannot
   75  * reference a dma_addr_t directly because there may be translation between
   76  * its physical address space and the bus address space.
   77  */
   78 struct dma_map_ops {
   79 	void* (*alloc)(struct device *dev, size_t size,
   80 				dma_addr_t *dma_handle, gfp_t gfp,
   81 				unsigned long attrs);
   82 	void (*free)(struct device *dev, size_t size,
   83 			      void *vaddr, dma_addr_t dma_handle,
   84 			      unsigned long attrs);
   85 	int (*mmap)(struct device *, struct vm_area_struct *,
   86 			  void *, dma_addr_t, size_t,
   87 			  unsigned long attrs);
   88 
   89 	int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
   90 			   dma_addr_t, size_t, unsigned long attrs);
   91 
   92 	dma_addr_t (*map_page)(struct device *dev, struct page *page,
   93 			       unsigned long offset, size_t size,
   94 			       enum dma_data_direction dir,
   95 			       unsigned long attrs);
   96 	void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
   97 			   size_t size, enum dma_data_direction dir,
   98 			   unsigned long attrs);
   99 	/*
  100 	 * map_sg returns 0 on error and a value > 0 on success.
  101 	 * It should never return a value < 0.
  102 	 */
  103 	int (*map_sg)(struct device *dev, struct scatterlist *sg,
  104 		      int nents, enum dma_data_direction dir,
  105 		      unsigned long attrs);
  106 	void (*unmap_sg)(struct device *dev,
  107 			 struct scatterlist *sg, int nents,
  108 			 enum dma_data_direction dir,
  109 			 unsigned long attrs);
  110 	dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
  111 			       size_t size, enum dma_data_direction dir,
  112 			       unsigned long attrs);
  113 	void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
  114 			   size_t size, enum dma_data_direction dir,
  115 			   unsigned long attrs);
  116 	void (*sync_single_for_cpu)(struct device *dev,
  117 				    dma_addr_t dma_handle, size_t size,
  118 				    enum dma_data_direction dir);
  119 	void (*sync_single_for_device)(struct device *dev,
  120 				       dma_addr_t dma_handle, size_t size,
  121 				       enum dma_data_direction dir);
  122 	void (*sync_sg_for_cpu)(struct device *dev,
  123 				struct scatterlist *sg, int nents,
  124 				enum dma_data_direction dir);
  125 	void (*sync_sg_for_device)(struct device *dev,
  126 				   struct scatterlist *sg, int nents,
  127 				   enum dma_data_direction dir);
  128 	int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
  129 	int (*dma_supported)(struct device *dev, u64 mask);
  130 	int (*set_dma_mask)(struct device *dev, u64 mask);
  131 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
  132 	u64 (*get_required_mask)(struct device *dev);
  133 #endif
  134 	int is_phys;
  135 };
  136 
  137 extern const struct dma_map_ops dma_noop_ops;
  138 extern const struct dma_map_ops dma_virt_ops;
  139 
  140 #define DMA_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
  141 
  142 #define DMA_MASK_NONE	0x0ULL
  143 
  144 static inline int valid_dma_direction(int dma_direction)
  145 {
  146 	return ((dma_direction == DMA_BIDIRECTIONAL) ||
  147 		(dma_direction == DMA_TO_DEVICE) ||
  148 		(dma_direction == DMA_FROM_DEVICE));
  149 }
  150 
  151 static inline int is_device_dma_capable(struct device *dev)
  152 {
  153 	return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
  154 }
  155 
  156 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
  157 /*
  158  * These three functions are only for dma allocator.
  159  * Don't use them in device drivers.
  160  */
  161 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
  162 				       dma_addr_t *dma_handle, void **ret);
  163 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
  164 
  165 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
  166 			    void *cpu_addr, size_t size, int *ret);
  167 #else
  168 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
  169 #define dma_release_from_coherent(dev, order, vaddr) (0)
  170 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
  171 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  172 
  173 #ifdef CONFIG_HAS_DMA
  174 #include <asm/dma-mapping.h>
  175 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
  176 {
  177 	if (dev && dev->dma_ops)
  178 		return dev->dma_ops;
  179 	return get_arch_dma_ops(dev ? dev->bus : NULL);
  180 }
  181 
  182 static inline void set_dma_ops(struct device *dev,
  183 			       const struct dma_map_ops *dma_ops)
  184 {
  185 	dev->dma_ops = dma_ops;
  186 }
  187 #else
  188 /*
  189  * Define the dma api to allow compilation but not linking of
  190  * dma dependent code.  Code that depends on the dma-mapping
  191  * API needs to set 'depends on HAS_DMA' in its Kconfig
  192  */
  193 extern const struct dma_map_ops bad_dma_ops;
  194 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
  195 {
  196 	return &bad_dma_ops;
  197 }
  198 #endif
  199 
  200 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
  201 					      size_t size,
  202 					      enum dma_data_direction dir,
  203 					      unsigned long attrs)
  204 {
  205 	const struct dma_map_ops *ops = get_dma_ops(dev);
  206 	dma_addr_t addr;
  207 
  208 	kmemcheck_mark_initialized(ptr, size);
  209 	BUG_ON(!valid_dma_direction(dir));
  210 	addr = ops->map_page(dev, virt_to_page(ptr),
  211 			     offset_in_page(ptr), size,
  212 			     dir, attrs);
  213 	debug_dma_map_page(dev, virt_to_page(ptr),
  214 			   offset_in_page(ptr), size,
  215 			   dir, addr, true);
  216 	return addr;
  217 }
  218 
  219 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
  220 					  size_t size,
  221 					  enum dma_data_direction dir,
  222 					  unsigned long attrs)
  223 {
  224 	const struct dma_map_ops *ops = get_dma_ops(dev);
  225 
  226 	BUG_ON(!valid_dma_direction(dir));
  227 	if (ops->unmap_page)
  228 		ops->unmap_page(dev, addr, size, dir, attrs);
  229 	debug_dma_unmap_page(dev, addr, size, dir, true);
  230 }
  231 
  232 /*
  233  * dma_maps_sg_attrs returns 0 on error and > 0 on success.
  234  * It should never return a value < 0.
  235  */
  236 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
  237 				   int nents, enum dma_data_direction dir,
  238 				   unsigned long attrs)
  239 {
  240 	const struct dma_map_ops *ops = get_dma_ops(dev);
  241 	int i, ents;
  242 	struct scatterlist *s;
  243 
  244 	for_each_sg(sg, s, nents, i)
  245 		kmemcheck_mark_initialized(sg_virt(s), s->length);
  246 	BUG_ON(!valid_dma_direction(dir));
  247 	ents = ops->map_sg(dev, sg, nents, dir, attrs);
  248 	BUG_ON(ents < 0);
  249 	debug_dma_map_sg(dev, sg, nents, ents, dir);
  250 
  251 	return ents;
  252 }
  253 
  254 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
  255 				      int nents, enum dma_data_direction dir,
  256 				      unsigned long attrs)
  257 {
  258 	const struct dma_map_ops *ops = get_dma_ops(dev);
  259 
  260 	BUG_ON(!valid_dma_direction(dir));
  261 	debug_dma_unmap_sg(dev, sg, nents, dir);
  262 	if (ops->unmap_sg)
  263 		ops->unmap_sg(dev, sg, nents, dir, attrs);
  264 }
  265 
  266 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
  267 					    struct page *page,
  268 					    size_t offset, size_t size,
  269 					    enum dma_data_direction dir,
  270 					    unsigned long attrs)
  271 {
  272 	const struct dma_map_ops *ops = get_dma_ops(dev);
  273 	dma_addr_t addr;
  274 
  275 	kmemcheck_mark_initialized(page_address(page) + offset, size);
  276 	BUG_ON(!valid_dma_direction(dir));
  277 	addr = ops->map_page(dev, page, offset, size, dir, attrs);
  278 	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
  279 
  280 	return addr;
  281 }
  282 
  283 static inline void dma_unmap_page_attrs(struct device *dev,
  284 					dma_addr_t addr, size_t size,
  285 					enum dma_data_direction dir,
  286 					unsigned long attrs)
  287 {
  288 	const struct dma_map_ops *ops = get_dma_ops(dev);
  289 
  290 	BUG_ON(!valid_dma_direction(dir));
  291 	if (ops->unmap_page)
  292 		ops->unmap_page(dev, addr, size, dir, attrs);
  293 	debug_dma_unmap_page(dev, addr, size, dir, false);
  294 }
  295 
  296 static inline dma_addr_t dma_map_resource(struct device *dev,
  297 					  phys_addr_t phys_addr,
  298 					  size_t size,
  299 					  enum dma_data_direction dir,
  300 					  unsigned long attrs)
  301 {
  302 	const struct dma_map_ops *ops = get_dma_ops(dev);
  303 	dma_addr_t addr;
  304 
  305 	BUG_ON(!valid_dma_direction(dir));
  306 
  307 	/* Don't allow RAM to be mapped */
  308 	BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
  309 
  310 	addr = phys_addr;
  311 	if (ops->map_resource)
  312 		addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
  313 
  314 	debug_dma_map_resource(dev, phys_addr, size, dir, addr);
  315 
  316 	return addr;
  317 }
  318 
  319 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
  320 				      size_t size, enum dma_data_direction dir,
  321 				      unsigned long attrs)
  322 {
  323 	const struct dma_map_ops *ops = get_dma_ops(dev);
  324 
  325 	BUG_ON(!valid_dma_direction(dir));
  326 	if (ops->unmap_resource)
  327 		ops->unmap_resource(dev, addr, size, dir, attrs);
  328 	debug_dma_unmap_resource(dev, addr, size, dir);
  329 }
  330 
  331 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
  332 					   size_t size,
  333 					   enum dma_data_direction dir)
  334 {
  335 	const struct dma_map_ops *ops = get_dma_ops(dev);
  336 
  337 	BUG_ON(!valid_dma_direction(dir));
  338 	if (ops->sync_single_for_cpu)
  339 		ops->sync_single_for_cpu(dev, addr, size, dir);
  340 	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
  341 }
  342 
  343 static inline void dma_sync_single_for_device(struct device *dev,
  344 					      dma_addr_t addr, size_t size,
  345 					      enum dma_data_direction dir)
  346 {
  347 	const struct dma_map_ops *ops = get_dma_ops(dev);
  348 
  349 	BUG_ON(!valid_dma_direction(dir));
  350 	if (ops->sync_single_for_device)
  351 		ops->sync_single_for_device(dev, addr, size, dir);
  352 	debug_dma_sync_single_for_device(dev, addr, size, dir);
  353 }
  354 
  355 static inline void dma_sync_single_range_for_cpu(struct device *dev,
  356 						 dma_addr_t addr,
  357 						 unsigned long offset,
  358 						 size_t size,
  359 						 enum dma_data_direction dir)
  360 {
  361 	const struct dma_map_ops *ops = get_dma_ops(dev);
  362 
  363 	BUG_ON(!valid_dma_direction(dir));
  364 	if (ops->sync_single_for_cpu)
  365 		ops->sync_single_for_cpu(dev, addr + offset, size, dir);
  366 	debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
  367 }
  368 
  369 static inline void dma_sync_single_range_for_device(struct device *dev,
  370 						    dma_addr_t addr,
  371 						    unsigned long offset,
  372 						    size_t size,
  373 						    enum dma_data_direction dir)
  374 {
  375 	const struct dma_map_ops *ops = get_dma_ops(dev);
  376 
  377 	BUG_ON(!valid_dma_direction(dir));
  378 	if (ops->sync_single_for_device)
  379 		ops->sync_single_for_device(dev, addr + offset, size, dir);
  380 	debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
  381 }
  382 
  383 static inline void
  384 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  385 		    int nelems, enum dma_data_direction dir)
  386 {
  387 	const struct dma_map_ops *ops = get_dma_ops(dev);
  388 
  389 	BUG_ON(!valid_dma_direction(dir));
  390 	if (ops->sync_sg_for_cpu)
  391 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
  392 	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
  393 }
  394 
  395 static inline void
  396 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  397 		       int nelems, enum dma_data_direction dir)
  398 {
  399 	const struct dma_map_ops *ops = get_dma_ops(dev);
  400 
  401 	BUG_ON(!valid_dma_direction(dir));
  402 	if (ops->sync_sg_for_device)
  403 		ops->sync_sg_for_device(dev, sg, nelems, dir);
  404 	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
  405 
  406 }
  407 
  408 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
  409 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
  410 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
  411 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
  412 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
  413 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
  414 
  415 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
  416 			   void *cpu_addr, dma_addr_t dma_addr, size_t size);
  417 
  418 void *dma_common_contiguous_remap(struct page *page, size_t size,
  419 			unsigned long vm_flags,
  420 			pgprot_t prot, const void *caller);
  421 
  422 void *dma_common_pages_remap(struct page **pages, size_t size,
  423 			unsigned long vm_flags, pgprot_t prot,
  424 			const void *caller);
  425 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
  426 
  427 /**
  428  * dma_mmap_attrs - map a coherent DMA allocation into user space
  429  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  430  * @vma: vm_area_struct describing requested user mapping
  431  * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
  432  * @handle: device-view address returned from dma_alloc_attrs
  433  * @size: size of memory originally requested in dma_alloc_attrs
  434  * @attrs: attributes of mapping properties requested in dma_alloc_attrs
  435  *
  436  * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
  437  * into user space.  The coherent DMA buffer must not be freed by the
  438  * driver until the user space mapping has been released.
  439  */
  440 static inline int
  441 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
  442 	       dma_addr_t dma_addr, size_t size, unsigned long attrs)
  443 {
  444 	const struct dma_map_ops *ops = get_dma_ops(dev);
  445 	BUG_ON(!ops);
  446 	if (ops->mmap)
  447 		return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
  448 	return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
  449 }
  450 
  451 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
  452 
  453 int
  454 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
  455 		       void *cpu_addr, dma_addr_t dma_addr, size_t size);
  456 
  457 static inline int
  458 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
  459 		      dma_addr_t dma_addr, size_t size,
  460 		      unsigned long attrs)
  461 {
  462 	const struct dma_map_ops *ops = get_dma_ops(dev);
  463 	BUG_ON(!ops);
  464 	if (ops->get_sgtable)
  465 		return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
  466 					attrs);
  467 	return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
  468 }
  469 
  470 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
  471 
  472 #ifndef arch_dma_alloc_attrs
  473 #define arch_dma_alloc_attrs(dev, flag)	(true)
  474 #endif
  475 
  476 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
  477 				       dma_addr_t *dma_handle, gfp_t flag,
  478 				       unsigned long attrs)
  479 {
  480 	const struct dma_map_ops *ops = get_dma_ops(dev);
  481 	void *cpu_addr;
  482 
  483 	BUG_ON(!ops);
  484 
  485 	if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
  486 		return cpu_addr;
  487 
  488 	if (!arch_dma_alloc_attrs(&dev, &flag))
  489 		return NULL;
  490 	if (!ops->alloc)
  491 		return NULL;
  492 
  493 	cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
  494 	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
  495 	return cpu_addr;
  496 }
  497 
  498 static inline void dma_free_attrs(struct device *dev, size_t size,
  499 				     void *cpu_addr, dma_addr_t dma_handle,
  500 				     unsigned long attrs)
  501 {
  502 	const struct dma_map_ops *ops = get_dma_ops(dev);
  503 
  504 	BUG_ON(!ops);
  505 	WARN_ON(irqs_disabled());
  506 
  507 	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
  508 		return;
  509 
  510 	if (!ops->free || !cpu_addr)
  511 		return;
  512 
  513 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
  514 	ops->free(dev, size, cpu_addr, dma_handle, attrs);
  515 }
  516 
  517 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  518 		dma_addr_t *dma_handle, gfp_t flag)
  519 {
  520 	return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
  521 }
  522 
  523 static inline void dma_free_coherent(struct device *dev, size_t size,
  524 		void *cpu_addr, dma_addr_t dma_handle)
  525 {
  526 	return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
  527 }
  528 
  529 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
  530 		dma_addr_t *dma_handle, gfp_t gfp)
  531 {
  532 	return dma_alloc_attrs(dev, size, dma_handle, gfp,
  533 			       DMA_ATTR_NON_CONSISTENT);
  534 }
  535 
  536 static inline void dma_free_noncoherent(struct device *dev, size_t size,
  537 		void *cpu_addr, dma_addr_t dma_handle)
  538 {
  539 	dma_free_attrs(dev, size, cpu_addr, dma_handle,
  540 		       DMA_ATTR_NON_CONSISTENT);
  541 }
  542 
  543 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  544 {
  545 	debug_dma_mapping_error(dev, dma_addr);
  546 
  547 	if (get_dma_ops(dev)->mapping_error)
  548 		return get_dma_ops(dev)->mapping_error(dev, dma_addr);
  549 
  550 #ifdef DMA_ERROR_CODE
  551 	return dma_addr == DMA_ERROR_CODE;
  552 #else
  553 	return 0;
  554 #endif
  555 }
  556 
  557 #ifndef HAVE_ARCH_DMA_SUPPORTED
  558 static inline int dma_supported(struct device *dev, u64 mask)
  559 {
  560 	const struct dma_map_ops *ops = get_dma_ops(dev);
  561 
  562 	if (!ops)
  563 		return 0;
  564 	if (!ops->dma_supported)
  565 		return 1;
  566 	return ops->dma_supported(dev, mask);
  567 }
  568 #endif
  569 
  570 #ifndef HAVE_ARCH_DMA_SET_MASK
  571 static inline int dma_set_mask(struct device *dev, u64 mask)
  572 {
  573 	const struct dma_map_ops *ops = get_dma_ops(dev);
  574 
  575 	if (ops->set_dma_mask)
  576 		return ops->set_dma_mask(dev, mask);
  577 
  578 	if (!dev->dma_mask || !dma_supported(dev, mask))
  579 		return -EIO;
  580 	*dev->dma_mask = mask;
  581 	return 0;
  582 }
  583 #endif
  584 
  585 static inline u64 dma_get_mask(struct device *dev)
  586 {
  587 	if (dev && dev->dma_mask && *dev->dma_mask)
  588 		return *dev->dma_mask;
  589 	return DMA_BIT_MASK(32);
  590 }
  591 
  592 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
  593 int dma_set_coherent_mask(struct device *dev, u64 mask);
  594 #else
  595 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
  596 {
  597 	if (!dma_supported(dev, mask))
  598 		return -EIO;
  599 	dev->coherent_dma_mask = mask;
  600 	return 0;
  601 }
  602 #endif
  603 
  604 /*
  605  * Set both the DMA mask and the coherent DMA mask to the same thing.
  606  * Note that we don't check the return value from dma_set_coherent_mask()
  607  * as the DMA API guarantees that the coherent DMA mask can be set to
  608  * the same or smaller than the streaming DMA mask.
  609  */
  610 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
  611 {
  612 	int rc = dma_set_mask(dev, mask);
  613 	if (rc == 0)
  614 		dma_set_coherent_mask(dev, mask);
  615 	return rc;
  616 }
  617 
  618 /*
  619  * Similar to the above, except it deals with the case where the device
  620  * does not have dev->dma_mask appropriately setup.
  621  */
  622 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
  623 {
  624 	dev->dma_mask = &dev->coherent_dma_mask;
  625 	return dma_set_mask_and_coherent(dev, mask);
  626 }
  627 
  628 extern u64 dma_get_required_mask(struct device *dev);
  629 
  630 #ifndef arch_setup_dma_ops
  631 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
  632 				      u64 size, const struct iommu_ops *iommu,
  633 				      bool coherent) { }
  634 #endif
  635 
  636 #ifndef arch_teardown_dma_ops
  637 static inline void arch_teardown_dma_ops(struct device *dev) { }
  638 #endif
  639 
  640 static inline unsigned int dma_get_max_seg_size(struct device *dev)
  641 {
  642 	if (dev->dma_parms && dev->dma_parms->max_segment_size)
  643 		return dev->dma_parms->max_segment_size;
  644 	return SZ_64K;
  645 }
  646 
  647 static inline unsigned int dma_set_max_seg_size(struct device *dev,
  648 						unsigned int size)
  649 {
  650 	if (dev->dma_parms) {
  651 		dev->dma_parms->max_segment_size = size;
  652 		return 0;
  653 	}
  654 	return -EIO;
  655 }
  656 
  657 static inline unsigned long dma_get_seg_boundary(struct device *dev)
  658 {
  659 	if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
  660 		return dev->dma_parms->segment_boundary_mask;
  661 	return DMA_BIT_MASK(32);
  662 }
  663 
  664 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
  665 {
  666 	if (dev->dma_parms) {
  667 		dev->dma_parms->segment_boundary_mask = mask;
  668 		return 0;
  669 	}
  670 	return -EIO;
  671 }
  672 
  673 #ifndef dma_max_pfn
  674 static inline unsigned long dma_max_pfn(struct device *dev)
  675 {
  676 	return *dev->dma_mask >> PAGE_SHIFT;
  677 }
  678 #endif
  679 
  680 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
  681 					dma_addr_t *dma_handle, gfp_t flag)
  682 {
  683 	void *ret = dma_alloc_coherent(dev, size, dma_handle,
  684 				       flag | __GFP_ZERO);
  685 	return ret;
  686 }
  687 
  688 #ifdef CONFIG_HAS_DMA
  689 static inline int dma_get_cache_alignment(void)
  690 {
  691 #ifdef ARCH_DMA_MINALIGN
  692 	return ARCH_DMA_MINALIGN;
  693 #endif
  694 	return 1;
  695 }
  696 #endif
  697 
  698 /* flags for the coherent memory api */
  699 #define	DMA_MEMORY_MAP			0x01
  700 #define DMA_MEMORY_IO			0x02
  701 #define DMA_MEMORY_INCLUDES_CHILDREN	0x04
  702 #define DMA_MEMORY_EXCLUSIVE		0x08
  703 
  704 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
  705 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
  706 				dma_addr_t device_addr, size_t size, int flags);
  707 void dma_release_declared_memory(struct device *dev);
  708 void *dma_mark_declared_memory_occupied(struct device *dev,
  709 					dma_addr_t device_addr, size_t size);
  710 #else
  711 static inline int
  712 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
  713 			    dma_addr_t device_addr, size_t size, int flags)
  714 {
  715 	return 0;
  716 }
  717 
  718 static inline void
  719 dma_release_declared_memory(struct device *dev)
  720 {
  721 }
  722 
  723 static inline void *
  724 dma_mark_declared_memory_occupied(struct device *dev,
  725 				  dma_addr_t device_addr, size_t size)
  726 {
  727 	return ERR_PTR(-EBUSY);
  728 }
  729 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  730 
  731 /*
  732  * Managed DMA API
  733  */
  734 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
  735 				 dma_addr_t *dma_handle, gfp_t gfp);
  736 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
  737 			       dma_addr_t dma_handle);
  738 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
  739 				    dma_addr_t *dma_handle, gfp_t gfp);
  740 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  741 				  dma_addr_t dma_handle);
  742 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
  743 extern int dmam_declare_coherent_memory(struct device *dev,
  744 					phys_addr_t phys_addr,
  745 					dma_addr_t device_addr, size_t size,
  746 					int flags);
  747 extern void dmam_release_declared_memory(struct device *dev);
  748 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  749 static inline int dmam_declare_coherent_memory(struct device *dev,
  750 				phys_addr_t phys_addr, dma_addr_t device_addr,
  751 				size_t size, gfp_t gfp)
  752 {
  753 	return 0;
  754 }
  755 
  756 static inline void dmam_release_declared_memory(struct device *dev)
  757 {
  758 }
  759 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  760 
  761 static inline void *dma_alloc_wc(struct device *dev, size_t size,
  762 				 dma_addr_t *dma_addr, gfp_t gfp)
  763 {
  764 	return dma_alloc_attrs(dev, size, dma_addr, gfp,
  765 			       DMA_ATTR_WRITE_COMBINE);
  766 }
  767 #ifndef dma_alloc_writecombine
  768 #define dma_alloc_writecombine dma_alloc_wc
  769 #endif
  770 
  771 static inline void dma_free_wc(struct device *dev, size_t size,
  772 			       void *cpu_addr, dma_addr_t dma_addr)
  773 {
  774 	return dma_free_attrs(dev, size, cpu_addr, dma_addr,
  775 			      DMA_ATTR_WRITE_COMBINE);
  776 }
  777 #ifndef dma_free_writecombine
  778 #define dma_free_writecombine dma_free_wc
  779 #endif
  780 
  781 static inline int dma_mmap_wc(struct device *dev,
  782 			      struct vm_area_struct *vma,
  783 			      void *cpu_addr, dma_addr_t dma_addr,
  784 			      size_t size)
  785 {
  786 	return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
  787 			      DMA_ATTR_WRITE_COMBINE);
  788 }
  789 #ifndef dma_mmap_writecombine
  790 #define dma_mmap_writecombine dma_mmap_wc
  791 #endif
  792 
  793 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
  794 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
  795 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
  796 #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
  797 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
  798 #define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
  799 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
  800 #else
  801 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
  802 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
  803 #define dma_unmap_addr(PTR, ADDR_NAME)           (0)
  804 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  do { } while (0)
  805 #define dma_unmap_len(PTR, LEN_NAME)             (0)
  806 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    do { } while (0)
  807 #endif
  808 
  809 #endif                 1 /* interrupt.h */
    2 #ifndef _LINUX_INTERRUPT_H
    3 #define _LINUX_INTERRUPT_H
    4 
    5 #include <linux/kernel.h>
    6 #include <linux/linkage.h>
    7 #include <linux/bitops.h>
    8 #include <linux/preempt.h>
    9 #include <linux/cpumask.h>
   10 #include <linux/irqreturn.h>
   11 #include <linux/irqnr.h>
   12 #include <linux/hardirq.h>
   13 #include <linux/irqflags.h>
   14 #include <linux/hrtimer.h>
   15 #include <linux/kref.h>
   16 #include <linux/workqueue.h>
   17 
   18 #include <linux/atomic.h>
   19 #include <asm/ptrace.h>
   20 #include <asm/irq.h>
   21 
   22 /*
   23  * These correspond to the IORESOURCE_IRQ_* defines in
   24  * linux/ioport.h to select the interrupt line behaviour.  When
   25  * requesting an interrupt without specifying a IRQF_TRIGGER, the
   26  * setting should be assumed to be "as already configured", which
   27  * may be as per machine or firmware initialisation.
   28  */
   29 #define IRQF_TRIGGER_NONE	0x00000000
   30 #define IRQF_TRIGGER_RISING	0x00000001
   31 #define IRQF_TRIGGER_FALLING	0x00000002
   32 #define IRQF_TRIGGER_HIGH	0x00000004
   33 #define IRQF_TRIGGER_LOW	0x00000008
   34 #define IRQF_TRIGGER_MASK	(IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
   35 				 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
   36 #define IRQF_TRIGGER_PROBE	0x00000010
   37 
   38 /*
   39  * These flags used only by the kernel as part of the
   40  * irq handling routines.
   41  *
   42  * IRQF_SHARED - allow sharing the irq among several devices
   43  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
   44  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
   45  * IRQF_PERCPU - Interrupt is per cpu
   46  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
   47  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
   48  *                registered first in an shared interrupt is considered for
   49  *                performance reasons)
   50  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
   51  *                Used by threaded interrupts which need to keep the
   52  *                irq line disabled until the threaded handler has been run.
   53  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend.  Does not guarantee
   54  *                   that this interrupt will wake the system from a suspended
   55  *                   state.  See Documentation/power/suspend-and-interrupts.txt
   56  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
   57  * IRQF_NO_THREAD - Interrupt cannot be threaded
   58  * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
   59  *                resume time.
   60  * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
   61  *                interrupt handler after suspending interrupts. For system
   62  *                wakeup devices users need to implement wakeup detection in
   63  *                their interrupt handlers.
   64  */
   65 #define IRQF_SHARED		0x00000080
   66 #define IRQF_PROBE_SHARED	0x00000100
   67 #define __IRQF_TIMER		0x00000200
   68 #define IRQF_PERCPU		0x00000400
   69 #define IRQF_NOBALANCING	0x00000800
   70 #define IRQF_IRQPOLL		0x00001000
   71 #define IRQF_ONESHOT		0x00002000
   72 #define IRQF_NO_SUSPEND		0x00004000
   73 #define IRQF_FORCE_RESUME	0x00008000
   74 #define IRQF_NO_THREAD		0x00010000
   75 #define IRQF_EARLY_RESUME	0x00020000
   76 #define IRQF_COND_SUSPEND	0x00040000
   77 
   78 #define IRQF_TIMER		(__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
   79 
   80 /*
   81  * These values can be returned by request_any_context_irq() and
   82  * describe the context the interrupt will be run in.
   83  *
   84  * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
   85  * IRQC_IS_NESTED - interrupt runs in a nested threaded context
   86  */
   87 enum {
   88 	IRQC_IS_HARDIRQ	= 0,
   89 	IRQC_IS_NESTED,
   90 };
   91 
   92 typedef irqreturn_t (*irq_handler_t)(int, void *);
   93 
   94 /**
   95  * struct irqaction - per interrupt action descriptor
   96  * @handler:	interrupt handler function
   97  * @name:	name of the device
   98  * @dev_id:	cookie to identify the device
   99  * @percpu_dev_id:	cookie to identify the device
  100  * @next:	pointer to the next irqaction for shared interrupts
  101  * @irq:	interrupt number
  102  * @flags:	flags (see IRQF_* above)
  103  * @thread_fn:	interrupt handler function for threaded interrupts
  104  * @thread:	thread pointer for threaded interrupts
  105  * @secondary:	pointer to secondary irqaction (force threading)
  106  * @thread_flags:	flags related to @thread
  107  * @thread_mask:	bitmask for keeping track of @thread activity
  108  * @dir:	pointer to the proc/irq/NN/name entry
  109  */
  110 struct irqaction {
  111 	irq_handler_t		handler;
  112 	void			*dev_id;
  113 	void __percpu		*percpu_dev_id;
  114 	struct irqaction	*next;
  115 	irq_handler_t		thread_fn;
  116 	struct task_struct	*thread;
  117 	struct irqaction	*secondary;
  118 	unsigned int		irq;
  119 	unsigned int		flags;
  120 	unsigned long		thread_flags;
  121 	unsigned long		thread_mask;
  122 	const char		*name;
  123 	struct proc_dir_entry	*dir;
  124 } ____cacheline_internodealigned_in_smp;
  125 
  126 extern irqreturn_t no_action(int cpl, void *dev_id);
  127 
  128 /*
  129  * If a (PCI) device interrupt is not connected we set dev->irq to
  130  * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
  131  * can distingiush that case from other error returns.
  132  *
  133  * 0x80000000 is guaranteed to be outside the available range of interrupts
  134  * and easy to distinguish from other possible incorrect values.
  135  */
  136 #define IRQ_NOTCONNECTED	(1U << 31)
  137 
  138 extern int __must_check
  139 request_threaded_irq(unsigned int irq, irq_handler_t handler,
  140 		     irq_handler_t thread_fn,
  141 		     unsigned long flags, const char *name, void *dev);
  142 
  143 static inline int __must_check
  144 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
  145 	    const char *name, void *dev)
  146 {
  147 	return request_threaded_irq(irq, handler, NULL, flags, name, dev);
  148 }
  149 
  150 extern int __must_check
  151 request_any_context_irq(unsigned int irq, irq_handler_t handler,
  152 			unsigned long flags, const char *name, void *dev_id);
  153 
  154 extern int __must_check
  155 request_percpu_irq(unsigned int irq, irq_handler_t handler,
  156 		   const char *devname, void __percpu *percpu_dev_id);
  157 
  158 extern void free_irq(unsigned int, void *);
  159 extern void free_percpu_irq(unsigned int, void __percpu *);
  160 
  161 struct device;
  162 
  163 extern int __must_check
  164 devm_request_threaded_irq(struct device *dev, unsigned int irq,
  165 			  irq_handler_t handler, irq_handler_t thread_fn,
  166 			  unsigned long irqflags, const char *devname,
  167 			  void *dev_id);
  168 
  169 static inline int __must_check
  170 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
  171 		 unsigned long irqflags, const char *devname, void *dev_id)
  172 {
  173 	return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
  174 					 devname, dev_id);
  175 }
  176 
  177 extern int __must_check
  178 devm_request_any_context_irq(struct device *dev, unsigned int irq,
  179 		 irq_handler_t handler, unsigned long irqflags,
  180 		 const char *devname, void *dev_id);
  181 
  182 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
  183 
  184 /*
  185  * On lockdep we dont want to enable hardirqs in hardirq
  186  * context. Use local_irq_enable_in_hardirq() to annotate
  187  * kernel code that has to do this nevertheless (pretty much
  188  * the only valid case is for old/broken hardware that is
  189  * insanely slow).
  190  *
  191  * NOTE: in theory this might break fragile code that relies
  192  * on hardirq delivery - in practice we dont seem to have such
  193  * places left. So the only effect should be slightly increased
  194  * irqs-off latencies.
  195  */
  196 #ifdef CONFIG_LOCKDEP
  197 # define local_irq_enable_in_hardirq()	do { } while (0)
  198 #else
  199 # define local_irq_enable_in_hardirq()	local_irq_enable()
  200 #endif
  201 
  202 extern void disable_irq_nosync(unsigned int irq);
  203 extern bool disable_hardirq(unsigned int irq);
  204 extern void disable_irq(unsigned int irq);
  205 extern void disable_percpu_irq(unsigned int irq);
  206 extern void enable_irq(unsigned int irq);
  207 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
  208 extern bool irq_percpu_is_enabled(unsigned int irq);
  209 extern void irq_wake_thread(unsigned int irq, void *dev_id);
  210 
  211 /* The following three functions are for the core kernel use only. */
  212 extern void suspend_device_irqs(void);
  213 extern void resume_device_irqs(void);
  214 
  215 /**
  216  * struct irq_affinity_notify - context for notification of IRQ affinity changes
  217  * @irq:		Interrupt to which notification applies
  218  * @kref:		Reference count, for internal use
  219  * @work:		Work item, for internal use
  220  * @notify:		Function to be called on change.  This will be
  221  *			called in process context.
  222  * @release:		Function to be called on release.  This will be
  223  *			called in process context.  Once registered, the
  224  *			structure must only be freed when this function is
  225  *			called or later.
  226  */
  227 struct irq_affinity_notify {
  228 	unsigned int irq;
  229 	struct kref kref;
  230 	struct work_struct work;
  231 	void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
  232 	void (*release)(struct kref *ref);
  233 };
  234 
  235 /**
  236  * struct irq_affinity - Description for automatic irq affinity assignements
  237  * @pre_vectors:	Don't apply affinity to @pre_vectors at beginning of
  238  *			the MSI(-X) vector space
  239  * @post_vectors:	Don't apply affinity to @post_vectors at end of
  240  *			the MSI(-X) vector space
  241  */
  242 struct irq_affinity {
  243 	int	pre_vectors;
  244 	int	post_vectors;
  245 };
  246 
  247 #if defined(CONFIG_SMP)
  248 
  249 extern cpumask_var_t irq_default_affinity;
  250 
  251 /* Internal implementation. Use the helpers below */
  252 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
  253 			      bool force);
  254 
  255 /**
  256  * irq_set_affinity - Set the irq affinity of a given irq
  257  * @irq:	Interrupt to set affinity
  258  * @cpumask:	cpumask
  259  *
  260  * Fails if cpumask does not contain an online CPU
  261  */
  262 static inline int
  263 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
  264 {
  265 	return __irq_set_affinity(irq, cpumask, false);
  266 }
  267 
  268 /**
  269  * irq_force_affinity - Force the irq affinity of a given irq
  270  * @irq:	Interrupt to set affinity
  271  * @cpumask:	cpumask
  272  *
  273  * Same as irq_set_affinity, but without checking the mask against
  274  * online cpus.
  275  *
  276  * Solely for low level cpu hotplug code, where we need to make per
  277  * cpu interrupts affine before the cpu becomes online.
  278  */
  279 static inline int
  280 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
  281 {
  282 	return __irq_set_affinity(irq, cpumask, true);
  283 }
  284 
  285 extern int irq_can_set_affinity(unsigned int irq);
  286 extern int irq_select_affinity(unsigned int irq);
  287 
  288 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
  289 
  290 extern int
  291 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
  292 
  293 struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
  294 int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd);
  295 
  296 #else /* CONFIG_SMP */
  297 
  298 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
  299 {
  300 	return -EINVAL;
  301 }
  302 
  303 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
  304 {
  305 	return 0;
  306 }
  307 
  308 static inline int irq_can_set_affinity(unsigned int irq)
  309 {
  310 	return 0;
  311 }
  312 
  313 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
  314 
  315 static inline int irq_set_affinity_hint(unsigned int irq,
  316 					const struct cpumask *m)
  317 {
  318 	return -EINVAL;
  319 }
  320 
  321 static inline int
  322 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
  323 {
  324 	return 0;
  325 }
  326 
  327 static inline struct cpumask *
  328 irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
  329 {
  330 	return NULL;
  331 }
  332 
  333 static inline int
  334 irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd)
  335 {
  336 	return maxvec;
  337 }
  338 
  339 #endif /* CONFIG_SMP */
  340 
  341 /*
  342  * Special lockdep variants of irq disabling/enabling.
  343  * These should be used for locking constructs that
  344  * know that a particular irq context which is disabled,
  345  * and which is the only irq-context user of a lock,
  346  * that it's safe to take the lock in the irq-disabled
  347  * section without disabling hardirqs.
  348  *
  349  * On !CONFIG_LOCKDEP they are equivalent to the normal
  350  * irq disable/enable methods.
  351  */
  352 static inline void disable_irq_nosync_lockdep(unsigned int irq)
  353 {
  354 	disable_irq_nosync(irq);
  355 #ifdef CONFIG_LOCKDEP
  356 	local_irq_disable();
  357 #endif
  358 }
  359 
  360 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
  361 {
  362 	disable_irq_nosync(irq);
  363 #ifdef CONFIG_LOCKDEP
  364 	local_irq_save(*flags);
  365 #endif
  366 }
  367 
  368 static inline void disable_irq_lockdep(unsigned int irq)
  369 {
  370 	disable_irq(irq);
  371 #ifdef CONFIG_LOCKDEP
  372 	local_irq_disable();
  373 #endif
  374 }
  375 
  376 static inline void enable_irq_lockdep(unsigned int irq)
  377 {
  378 #ifdef CONFIG_LOCKDEP
  379 	local_irq_enable();
  380 #endif
  381 	enable_irq(irq);
  382 }
  383 
  384 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
  385 {
  386 #ifdef CONFIG_LOCKDEP
  387 	local_irq_restore(*flags);
  388 #endif
  389 	enable_irq(irq);
  390 }
  391 
  392 /* IRQ wakeup (PM) control: */
  393 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
  394 
  395 static inline int enable_irq_wake(unsigned int irq)
  396 {
  397 	return irq_set_irq_wake(irq, 1);
  398 }
  399 
  400 static inline int disable_irq_wake(unsigned int irq)
  401 {
  402 	return irq_set_irq_wake(irq, 0);
  403 }
  404 
  405 /*
  406  * irq_get_irqchip_state/irq_set_irqchip_state specific flags
  407  */
  408 enum irqchip_irq_state {
  409 	IRQCHIP_STATE_PENDING,		/* Is interrupt pending? */
  410 	IRQCHIP_STATE_ACTIVE,		/* Is interrupt in progress? */
  411 	IRQCHIP_STATE_MASKED,		/* Is interrupt masked? */
  412 	IRQCHIP_STATE_LINE_LEVEL,	/* Is IRQ line high? */
  413 };
  414 
  415 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
  416 				 bool *state);
  417 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
  418 				 bool state);
  419 
  420 #ifdef CONFIG_IRQ_FORCED_THREADING
  421 extern bool force_irqthreads;
  422 #else
  423 #define force_irqthreads	(0)
  424 #endif
  425 
  426 #ifndef __ARCH_SET_SOFTIRQ_PENDING
  427 #define set_softirq_pending(x) (local_softirq_pending() = (x))
  428 #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
  429 #endif
  430 
  431 /* Some architectures might implement lazy enabling/disabling of
  432  * interrupts. In some cases, such as stop_machine, we might want
  433  * to ensure that after a local_irq_disable(), interrupts have
  434  * really been disabled in hardware. Such architectures need to
  435  * implement the following hook.
  436  */
  437 #ifndef hard_irq_disable
  438 #define hard_irq_disable()	do { } while(0)
  439 #endif
  440 
  441 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
  442    frequency threaded job scheduling. For almost all the purposes
  443    tasklets are more than enough. F.e. all serial device BHs et
  444    al. should be converted to tasklets, not to softirqs.
  445  */
  446 
  447 enum
  448 {
  449 	HI_SOFTIRQ=0,
  450 	TIMER_SOFTIRQ,
  451 	NET_TX_SOFTIRQ,
  452 	NET_RX_SOFTIRQ,
  453 	BLOCK_SOFTIRQ,
  454 	IRQ_POLL_SOFTIRQ,
  455 	TASKLET_SOFTIRQ,
  456 	SCHED_SOFTIRQ,
  457 	HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
  458 			    numbering. Sigh! */
  459 	RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
  460 
  461 	NR_SOFTIRQS
  462 };
  463 
  464 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
  465 
  466 /* map softirq index to softirq name. update 'softirq_to_name' in
  467  * kernel/softirq.c when adding a new softirq.
  468  */
  469 extern const char * const softirq_to_name[NR_SOFTIRQS];
  470 
  471 /* softirq mask and active fields moved to irq_cpustat_t in
  472  * asm/hardirq.h to get better cache usage.  KAO
  473  */
  474 
  475 struct softirq_action
  476 {
  477 	void	(*action)(struct softirq_action *);
  478 };
  479 
  480 asmlinkage void do_softirq(void);
  481 asmlinkage void __do_softirq(void);
  482 
  483 #ifdef __ARCH_HAS_DO_SOFTIRQ
  484 void do_softirq_own_stack(void);
  485 #else
  486 static inline void do_softirq_own_stack(void)
  487 {
  488 	__do_softirq();
  489 }
  490 #endif
  491 
  492 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
  493 extern void softirq_init(void);
  494 extern void __raise_softirq_irqoff(unsigned int nr);
  495 
  496 extern void raise_softirq_irqoff(unsigned int nr);
  497 extern void raise_softirq(unsigned int nr);
  498 
  499 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
  500 
  501 static inline struct task_struct *this_cpu_ksoftirqd(void)
  502 {
  503 	return this_cpu_read(ksoftirqd);
  504 }
  505 
  506 /* Tasklets --- multithreaded analogue of BHs.
  507 
  508    Main feature differing them of generic softirqs: tasklet
  509    is running only on one CPU simultaneously.
  510 
  511    Main feature differing them of BHs: different tasklets
  512    may be run simultaneously on different CPUs.
  513 
  514    Properties:
  515    * If tasklet_schedule() is called, then tasklet is guaranteed
  516      to be executed on some cpu at least once after this.
  517    * If the tasklet is already scheduled, but its execution is still not
  518      started, it will be executed only once.
  519    * If this tasklet is already running on another CPU (or schedule is called
  520      from tasklet itself), it is rescheduled for later.
  521    * Tasklet is strictly serialized wrt itself, but not
  522      wrt another tasklets. If client needs some intertask synchronization,
  523      he makes it with spinlocks.
  524  */
  525 
  526 struct tasklet_struct
  527 {
  528 	struct tasklet_struct *next;
  529 	unsigned long state;
  530 	atomic_t count;
  531 	void (*func)(unsigned long);
  532 	unsigned long data;
  533 };
  534 
  535 #define DECLARE_TASKLET(name, func, data) \
  536 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
  537 
  538 #define DECLARE_TASKLET_DISABLED(name, func, data) \
  539 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
  540 
  541 
  542 enum
  543 {
  544 	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
  545 	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
  546 };
  547 
  548 #ifdef CONFIG_SMP
  549 static inline int tasklet_trylock(struct tasklet_struct *t)
  550 {
  551 	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
  552 }
  553 
  554 static inline void tasklet_unlock(struct tasklet_struct *t)
  555 {
  556 	smp_mb__before_atomic();
  557 	clear_bit(TASKLET_STATE_RUN, &(t)->state);
  558 }
  559 
  560 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
  561 {
  562 	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
  563 }
  564 #else
  565 #define tasklet_trylock(t) 1
  566 #define tasklet_unlock_wait(t) do { } while (0)
  567 #define tasklet_unlock(t) do { } while (0)
  568 #endif
  569 
  570 extern void __tasklet_schedule(struct tasklet_struct *t);
  571 
  572 static inline void tasklet_schedule(struct tasklet_struct *t)
  573 {
  574 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  575 		__tasklet_schedule(t);
  576 }
  577 
  578 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
  579 
  580 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
  581 {
  582 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  583 		__tasklet_hi_schedule(t);
  584 }
  585 
  586 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
  587 
  588 /*
  589  * This version avoids touching any other tasklets. Needed for kmemcheck
  590  * in order not to take any page faults while enqueueing this tasklet;
  591  * consider VERY carefully whether you really need this or
  592  * tasklet_hi_schedule()...
  593  */
  594 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
  595 {
  596 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  597 		__tasklet_hi_schedule_first(t);
  598 }
  599 
  600 
  601 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
  602 {
  603 	atomic_inc(&t->count);
  604 	smp_mb__after_atomic();
  605 }
  606 
  607 static inline void tasklet_disable(struct tasklet_struct *t)
  608 {
  609 	tasklet_disable_nosync(t);
  610 	tasklet_unlock_wait(t);
  611 	smp_mb();
  612 }
  613 
  614 static inline void tasklet_enable(struct tasklet_struct *t)
  615 {
  616 	smp_mb__before_atomic();
  617 	atomic_dec(&t->count);
  618 }
  619 
  620 extern void tasklet_kill(struct tasklet_struct *t);
  621 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
  622 extern void tasklet_init(struct tasklet_struct *t,
  623 			 void (*func)(unsigned long), unsigned long data);
  624 
  625 struct tasklet_hrtimer {
  626 	struct hrtimer		timer;
  627 	struct tasklet_struct	tasklet;
  628 	enum hrtimer_restart	(*function)(struct hrtimer *);
  629 };
  630 
  631 extern void
  632 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
  633 		     enum hrtimer_restart (*function)(struct hrtimer *),
  634 		     clockid_t which_clock, enum hrtimer_mode mode);
  635 
  636 static inline
  637 void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
  638 			   const enum hrtimer_mode mode)
  639 {
  640 	hrtimer_start(&ttimer->timer, time, mode);
  641 }
  642 
  643 static inline
  644 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
  645 {
  646 	hrtimer_cancel(&ttimer->timer);
  647 	tasklet_kill(&ttimer->tasklet);
  648 }
  649 
  650 /*
  651  * Autoprobing for irqs:
  652  *
  653  * probe_irq_on() and probe_irq_off() provide robust primitives
  654  * for accurate IRQ probing during kernel initialization.  They are
  655  * reasonably simple to use, are not "fooled" by spurious interrupts,
  656  * and, unlike other attempts at IRQ probing, they do not get hung on
  657  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
  658  *
  659  * For reasonably foolproof probing, use them as follows:
  660  *
  661  * 1. clear and/or mask the device's internal interrupt.
  662  * 2. sti();
  663  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
  664  * 4. enable the device and cause it to trigger an interrupt.
  665  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
  666  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
  667  * 7. service the device to clear its pending interrupt.
  668  * 8. loop again if paranoia is required.
  669  *
  670  * probe_irq_on() returns a mask of allocated irq's.
  671  *
  672  * probe_irq_off() takes the mask as a parameter,
  673  * and returns the irq number which occurred,
  674  * or zero if none occurred, or a negative irq number
  675  * if more than one irq occurred.
  676  */
  677 
  678 #if !defined(CONFIG_GENERIC_IRQ_PROBE) 
  679 static inline unsigned long probe_irq_on(void)
  680 {
  681 	return 0;
  682 }
  683 static inline int probe_irq_off(unsigned long val)
  684 {
  685 	return 0;
  686 }
  687 static inline unsigned int probe_irq_mask(unsigned long val)
  688 {
  689 	return 0;
  690 }
  691 #else
  692 extern unsigned long probe_irq_on(void);	/* returns 0 on failure */
  693 extern int probe_irq_off(unsigned long);	/* returns 0 or negative on failure */
  694 extern unsigned int probe_irq_mask(unsigned long);	/* returns mask of ISA interrupts */
  695 #endif
  696 
  697 #ifdef CONFIG_PROC_FS
  698 /* Initialize /proc/irq/ */
  699 extern void init_irq_proc(void);
  700 #else
  701 static inline void init_irq_proc(void)
  702 {
  703 }
  704 #endif
  705 
  706 struct seq_file;
  707 int show_interrupts(struct seq_file *p, void *v);
  708 int arch_show_interrupts(struct seq_file *p, int prec);
  709 
  710 extern int early_irq_init(void);
  711 extern int arch_probe_nr_irqs(void);
  712 extern int arch_early_irq_init(void);
  713 
  714 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
  715 /*
  716  * We want to know which function is an entrypoint of a hardirq or a softirq.
  717  */
  718 #define __irq_entry		 __attribute__((__section__(".irqentry.text")))
  719 #define __softirq_entry  \
  720 	__attribute__((__section__(".softirqentry.text")))
  721 
  722 /* Limits of hardirq entrypoints */
  723 extern char __irqentry_text_start[];
  724 extern char __irqentry_text_end[];
  725 /* Limits of softirq entrypoints */
  726 extern char __softirqentry_text_start[];
  727 extern char __softirqentry_text_end[];
  728 
  729 #else
  730 #define __irq_entry
  731 #define __softirq_entry
  732 #endif
  733 
  734 #endif                 1 #ifndef LINUX_KMEMCHECK_H
    2 #define LINUX_KMEMCHECK_H
    3 
    4 #include <linux/mm_types.h>
    5 #include <linux/types.h>
    6 
    7 #ifdef CONFIG_KMEMCHECK
    8 extern int kmemcheck_enabled;
    9 
   10 /* The slab-related functions. */
   11 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
   12 void kmemcheck_free_shadow(struct page *page, int order);
   13 void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
   14 			  size_t size);
   15 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
   16 
   17 void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
   18 			       gfp_t gfpflags);
   19 
   20 void kmemcheck_show_pages(struct page *p, unsigned int n);
   21 void kmemcheck_hide_pages(struct page *p, unsigned int n);
   22 
   23 bool kmemcheck_page_is_tracked(struct page *p);
   24 
   25 void kmemcheck_mark_unallocated(void *address, unsigned int n);
   26 void kmemcheck_mark_uninitialized(void *address, unsigned int n);
   27 void kmemcheck_mark_initialized(void *address, unsigned int n);
   28 void kmemcheck_mark_freed(void *address, unsigned int n);
   29 
   30 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
   31 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
   32 void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
   33 
   34 int kmemcheck_show_addr(unsigned long address);
   35 int kmemcheck_hide_addr(unsigned long address);
   36 
   37 bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
   38 
   39 /*
   40  * Bitfield annotations
   41  *
   42  * How to use: If you have a struct using bitfields, for example
   43  *
   44  *     struct a {
   45  *             int x:8, y:8;
   46  *     };
   47  *
   48  * then this should be rewritten as
   49  *
   50  *     struct a {
   51  *             kmemcheck_bitfield_begin(flags);
   52  *             int x:8, y:8;
   53  *             kmemcheck_bitfield_end(flags);
   54  *     };
   55  *
   56  * Now the "flags_begin" and "flags_end" members may be used to refer to the
   57  * beginning and end, respectively, of the bitfield (and things like
   58  * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
   59  * fields should be annotated:
   60  *
   61  *     struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
   62  *     kmemcheck_annotate_bitfield(a, flags);
   63  */
   64 #define kmemcheck_bitfield_begin(name)	\
   65 	int name##_begin[0];
   66 
   67 #define kmemcheck_bitfield_end(name)	\
   68 	int name##_end[0];
   69 
   70 #define kmemcheck_annotate_bitfield(ptr, name)				\
   71 	do {								\
   72 		int _n;							\
   73 									\
   74 		if (!ptr)						\
   75 			break;						\
   76 									\
   77 		_n = (long) &((ptr)->name##_end)			\
   78 			- (long) &((ptr)->name##_begin);		\
   79 		BUILD_BUG_ON(_n < 0);					\
   80 									\
   81 		kmemcheck_mark_initialized(&((ptr)->name##_begin), _n);	\
   82 	} while (0)
   83 
   84 #define kmemcheck_annotate_variable(var)				\
   85 	do {								\
   86 		kmemcheck_mark_initialized(&(var), sizeof(var));	\
   87 	} while (0)							\
   88 
   89 #else
   90 #define kmemcheck_enabled 0
   91 
   92 static inline void
   93 kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
   94 {
   95 }
   96 
   97 static inline void
   98 kmemcheck_free_shadow(struct page *page, int order)
   99 {
  100 }
  101 
  102 static inline void
  103 kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
  104 		     size_t size)
  105 {
  106 }
  107 
  108 static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
  109 				       size_t size)
  110 {
  111 }
  112 
  113 static inline void kmemcheck_pagealloc_alloc(struct page *p,
  114 	unsigned int order, gfp_t gfpflags)
  115 {
  116 }
  117 
  118 static inline bool kmemcheck_page_is_tracked(struct page *p)
  119 {
  120 	return false;
  121 }
  122 
  123 static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
  124 {
  125 }
  126 
  127 static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
  128 {
  129 }
  130 
  131 static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
  132 {
  133 }
  134 
  135 static inline void kmemcheck_mark_freed(void *address, unsigned int n)
  136 {
  137 }
  138 
  139 static inline void kmemcheck_mark_unallocated_pages(struct page *p,
  140 						    unsigned int n)
  141 {
  142 }
  143 
  144 static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
  145 						      unsigned int n)
  146 {
  147 }
  148 
  149 static inline void kmemcheck_mark_initialized_pages(struct page *p,
  150 						    unsigned int n)
  151 {
  152 }
  153 
  154 static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
  155 {
  156 	return true;
  157 }
  158 
  159 #define kmemcheck_bitfield_begin(name)
  160 #define kmemcheck_bitfield_end(name)
  161 #define kmemcheck_annotate_bitfield(ptr, name)	\
  162 	do {					\
  163 	} while (0)
  164 
  165 #define kmemcheck_annotate_variable(var)	\
  166 	do {					\
  167 	} while (0)
  168 
  169 #endif /* CONFIG_KMEMCHECK */
  170 
  171 #endif /* LINUX_KMEMCHECK_H */                 1 /*
    2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
    3  *		operating system.  INET is implemented using the  BSD Socket
    4  *		interface as the means of communication with the user level.
    5  *
    6  *		Definitions for the Interfaces handler.
    7  *
    8  * Version:	@(#)dev.h	1.0.10	08/12/93
    9  *
   10  * Authors:	Ross Biro
   11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
   12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
   13  *		Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
   14  *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
   15  *		Bjorn Ekwall. <bj0rn@blox.se>
   16  *              Pekka Riikonen <priikone@poseidon.pspt.fi>
   17  *
   18  *		This program is free software; you can redistribute it and/or
   19  *		modify it under the terms of the GNU General Public License
   20  *		as published by the Free Software Foundation; either version
   21  *		2 of the License, or (at your option) any later version.
   22  *
   23  *		Moved to /usr/include/linux for NET3
   24  */
   25 #ifndef _LINUX_NETDEVICE_H
   26 #define _LINUX_NETDEVICE_H
   27 
   28 #include <linux/timer.h>
   29 #include <linux/bug.h>
   30 #include <linux/delay.h>
   31 #include <linux/atomic.h>
   32 #include <linux/prefetch.h>
   33 #include <asm/cache.h>
   34 #include <asm/byteorder.h>
   35 
   36 #include <linux/percpu.h>
   37 #include <linux/rculist.h>
   38 #include <linux/dmaengine.h>
   39 #include <linux/workqueue.h>
   40 #include <linux/dynamic_queue_limits.h>
   41 
   42 #include <linux/ethtool.h>
   43 #include <net/net_namespace.h>
   44 #include <net/dsa.h>
   45 #ifdef CONFIG_DCB
   46 #include <net/dcbnl.h>
   47 #endif
   48 #include <net/netprio_cgroup.h>
   49 
   50 #include <linux/netdev_features.h>
   51 #include <linux/neighbour.h>
   52 #include <uapi/linux/netdevice.h>
   53 #include <uapi/linux/if_bonding.h>
   54 #include <uapi/linux/pkt_cls.h>
   55 #include <linux/hashtable.h>
   56 
   57 struct netpoll_info;
   58 struct device;
   59 struct phy_device;
   60 /* 802.11 specific */
   61 struct wireless_dev;
   62 /* 802.15.4 specific */
   63 struct wpan_dev;
   64 struct mpls_dev;
   65 /* UDP Tunnel offloads */
   66 struct udp_tunnel_info;
   67 struct bpf_prog;
   68 
   69 void netdev_set_default_ethtool_ops(struct net_device *dev,
   70 				    const struct ethtool_ops *ops);
   71 
   72 /* Backlog congestion levels */
   73 #define NET_RX_SUCCESS		0	/* keep 'em coming, baby */
   74 #define NET_RX_DROP		1	/* packet dropped */
   75 
   76 /*
   77  * Transmit return codes: transmit return codes originate from three different
   78  * namespaces:
   79  *
   80  * - qdisc return codes
   81  * - driver transmit return codes
   82  * - errno values
   83  *
   84  * Drivers are allowed to return any one of those in their hard_start_xmit()
   85  * function. Real network devices commonly used with qdiscs should only return
   86  * the driver transmit return codes though - when qdiscs are used, the actual
   87  * transmission happens asynchronously, so the value is not propagated to
   88  * higher layers. Virtual network devices transmit synchronously; in this case
   89  * the driver transmit return codes are consumed by dev_queue_xmit(), and all
   90  * others are propagated to higher layers.
   91  */
   92 
   93 /* qdisc ->enqueue() return codes. */
   94 #define NET_XMIT_SUCCESS	0x00
   95 #define NET_XMIT_DROP		0x01	/* skb dropped			*/
   96 #define NET_XMIT_CN		0x02	/* congestion notification	*/
   97 #define NET_XMIT_MASK		0x0f	/* qdisc flags in net/sch_generic.h */
   98 
   99 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  100  * indicates that the device will soon be dropping packets, or already drops
  101  * some packets of the same priority; prompting us to send less aggressively. */
  102 #define net_xmit_eval(e)	((e) == NET_XMIT_CN ? 0 : (e))
  103 #define net_xmit_errno(e)	((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  104 
  105 /* Driver transmit return codes */
  106 #define NETDEV_TX_MASK		0xf0
  107 
  108 enum netdev_tx {
  109 	__NETDEV_TX_MIN	 = INT_MIN,	/* make sure enum is signed */
  110 	NETDEV_TX_OK	 = 0x00,	/* driver took care of packet */
  111 	NETDEV_TX_BUSY	 = 0x10,	/* driver tx path was busy*/
  112 };
  113 typedef enum netdev_tx netdev_tx_t;
  114 
  115 /*
  116  * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
  117  * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
  118  */
  119 static inline bool dev_xmit_complete(int rc)
  120 {
  121 	/*
  122 	 * Positive cases with an skb consumed by a driver:
  123 	 * - successful transmission (rc == NETDEV_TX_OK)
  124 	 * - error while transmitting (rc < 0)
  125 	 * - error while queueing to a different device (rc & NET_XMIT_MASK)
  126 	 */
  127 	if (likely(rc < NET_XMIT_MASK))
  128 		return true;
  129 
  130 	return false;
  131 }
  132 
  133 /*
  134  *	Compute the worst-case header length according to the protocols
  135  *	used.
  136  */
  137 
  138 #if defined(CONFIG_HYPERV_NET)
  139 # define LL_MAX_HEADER 128
  140 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
  141 # if defined(CONFIG_MAC80211_MESH)
  142 #  define LL_MAX_HEADER 128
  143 # else
  144 #  define LL_MAX_HEADER 96
  145 # endif
  146 #else
  147 # define LL_MAX_HEADER 32
  148 #endif
  149 
  150 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
  151     !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
  152 #define MAX_HEADER LL_MAX_HEADER
  153 #else
  154 #define MAX_HEADER (LL_MAX_HEADER + 48)
  155 #endif
  156 
  157 /*
  158  *	Old network device statistics. Fields are native words
  159  *	(unsigned long) so they can be read and written atomically.
  160  */
  161 
  162 struct net_device_stats {
  163 	unsigned long	rx_packets;
  164 	unsigned long	tx_packets;
  165 	unsigned long	rx_bytes;
  166 	unsigned long	tx_bytes;
  167 	unsigned long	rx_errors;
  168 	unsigned long	tx_errors;
  169 	unsigned long	rx_dropped;
  170 	unsigned long	tx_dropped;
  171 	unsigned long	multicast;
  172 	unsigned long	collisions;
  173 	unsigned long	rx_length_errors;
  174 	unsigned long	rx_over_errors;
  175 	unsigned long	rx_crc_errors;
  176 	unsigned long	rx_frame_errors;
  177 	unsigned long	rx_fifo_errors;
  178 	unsigned long	rx_missed_errors;
  179 	unsigned long	tx_aborted_errors;
  180 	unsigned long	tx_carrier_errors;
  181 	unsigned long	tx_fifo_errors;
  182 	unsigned long	tx_heartbeat_errors;
  183 	unsigned long	tx_window_errors;
  184 	unsigned long	rx_compressed;
  185 	unsigned long	tx_compressed;
  186 };
  187 
  188 
  189 #include <linux/cache.h>
  190 #include <linux/skbuff.h>
  191 
  192 #ifdef CONFIG_RPS
  193 #include <linux/static_key.h>
  194 extern struct static_key rps_needed;
  195 extern struct static_key rfs_needed;
  196 #endif
  197 
  198 struct neighbour;
  199 struct neigh_parms;
  200 struct sk_buff;
  201 
  202 struct netdev_hw_addr {
  203 	struct list_head	list;
  204 	unsigned char		addr[MAX_ADDR_LEN];
  205 	unsigned char		type;
  206 #define NETDEV_HW_ADDR_T_LAN		1
  207 #define NETDEV_HW_ADDR_T_SAN		2
  208 #define NETDEV_HW_ADDR_T_SLAVE		3
  209 #define NETDEV_HW_ADDR_T_UNICAST	4
  210 #define NETDEV_HW_ADDR_T_MULTICAST	5
  211 	bool			global_use;
  212 	int			sync_cnt;
  213 	int			refcount;
  214 	int			synced;
  215 	struct rcu_head		rcu_head;
  216 };
  217 
  218 struct netdev_hw_addr_list {
  219 	struct list_head	list;
  220 	int			count;
  221 };
  222 
  223 #define netdev_hw_addr_list_count(l) ((l)->count)
  224 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
  225 #define netdev_hw_addr_list_for_each(ha, l) \
  226 	list_for_each_entry(ha, &(l)->list, list)
  227 
  228 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
  229 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
  230 #define netdev_for_each_uc_addr(ha, dev) \
  231 	netdev_hw_addr_list_for_each(ha, &(dev)->uc)
  232 
  233 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
  234 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
  235 #define netdev_for_each_mc_addr(ha, dev) \
  236 	netdev_hw_addr_list_for_each(ha, &(dev)->mc)
  237 
  238 struct hh_cache {
  239 	u16		hh_len;
  240 	u16		__pad;
  241 	seqlock_t	hh_lock;
  242 
  243 	/* cached hardware header; allow for machine alignment needs.        */
  244 #define HH_DATA_MOD	16
  245 #define HH_DATA_OFF(__len) \
  246 	(HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
  247 #define HH_DATA_ALIGN(__len) \
  248 	(((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
  249 	unsigned long	hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
  250 };
  251 
  252 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
  253  * Alternative is:
  254  *   dev->hard_header_len ? (dev->hard_header_len +
  255  *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
  256  *
  257  * We could use other alignment values, but we must maintain the
  258  * relationship HH alignment <= LL alignment.
  259  */
  260 #define LL_RESERVED_SPACE(dev) \
  261 	((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  262 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
  263 	((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  264 
  265 struct header_ops {
  266 	int	(*create) (struct sk_buff *skb, struct net_device *dev,
  267 			   unsigned short type, const void *daddr,
  268 			   const void *saddr, unsigned int len);
  269 	int	(*parse)(const struct sk_buff *skb, unsigned char *haddr);
  270 	int	(*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
  271 	void	(*cache_update)(struct hh_cache *hh,
  272 				const struct net_device *dev,
  273 				const unsigned char *haddr);
  274 	bool	(*validate)(const char *ll_header, unsigned int len);
  275 };
  276 
  277 /* These flag bits are private to the generic network queueing
  278  * layer; they may not be explicitly referenced by any other
  279  * code.
  280  */
  281 
  282 enum netdev_state_t {
  283 	__LINK_STATE_START,
  284 	__LINK_STATE_PRESENT,
  285 	__LINK_STATE_NOCARRIER,
  286 	__LINK_STATE_LINKWATCH_PENDING,
  287 	__LINK_STATE_DORMANT,
  288 };
  289 
  290 
  291 /*
  292  * This structure holds boot-time configured netdevice settings. They
  293  * are then used in the device probing.
  294  */
  295 struct netdev_boot_setup {
  296 	char name[IFNAMSIZ];
  297 	struct ifmap map;
  298 };
  299 #define NETDEV_BOOT_SETUP_MAX 8
  300 
  301 int __init netdev_boot_setup(char *str);
  302 
  303 /*
  304  * Structure for NAPI scheduling similar to tasklet but with weighting
  305  */
  306 struct napi_struct {
  307 	/* The poll_list must only be managed by the entity which
  308 	 * changes the state of the NAPI_STATE_SCHED bit.  This means
  309 	 * whoever atomically sets that bit can add this napi_struct
  310 	 * to the per-CPU poll_list, and whoever clears that bit
  311 	 * can remove from the list right before clearing the bit.
  312 	 */
  313 	struct list_head	poll_list;
  314 
  315 	unsigned long		state;
  316 	int			weight;
  317 	unsigned int		gro_count;
  318 	int			(*poll)(struct napi_struct *, int);
  319 #ifdef CONFIG_NETPOLL
  320 	int			poll_owner;
  321 #endif
  322 	struct net_device	*dev;
  323 	struct sk_buff		*gro_list;
  324 	struct sk_buff		*skb;
  325 	struct hrtimer		timer;
  326 	struct list_head	dev_list;
  327 	struct hlist_node	napi_hash_node;
  328 	unsigned int		napi_id;
  329 };
  330 
  331 enum {
  332 	NAPI_STATE_SCHED,	/* Poll is scheduled */
  333 	NAPI_STATE_MISSED,	/* reschedule a napi */
  334 	NAPI_STATE_DISABLE,	/* Disable pending */
  335 	NAPI_STATE_NPSVC,	/* Netpoll - don't dequeue from poll_list */
  336 	NAPI_STATE_HASHED,	/* In NAPI hash (busy polling possible) */
  337 	NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
  338 	NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
  339 };
  340 
  341 enum {
  342 	NAPIF_STATE_SCHED	 = BIT(NAPI_STATE_SCHED),
  343 	NAPIF_STATE_MISSED	 = BIT(NAPI_STATE_MISSED),
  344 	NAPIF_STATE_DISABLE	 = BIT(NAPI_STATE_DISABLE),
  345 	NAPIF_STATE_NPSVC	 = BIT(NAPI_STATE_NPSVC),
  346 	NAPIF_STATE_HASHED	 = BIT(NAPI_STATE_HASHED),
  347 	NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
  348 	NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
  349 };
  350 
  351 enum gro_result {
  352 	GRO_MERGED,
  353 	GRO_MERGED_FREE,
  354 	GRO_HELD,
  355 	GRO_NORMAL,
  356 	GRO_DROP,
  357 	GRO_CONSUMED,
  358 };
  359 typedef enum gro_result gro_result_t;
  360 
  361 /*
  362  * enum rx_handler_result - Possible return values for rx_handlers.
  363  * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
  364  * further.
  365  * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
  366  * case skb->dev was changed by rx_handler.
  367  * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
  368  * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
  369  *
  370  * rx_handlers are functions called from inside __netif_receive_skb(), to do
  371  * special processing of the skb, prior to delivery to protocol handlers.
  372  *
  373  * Currently, a net_device can only have a single rx_handler registered. Trying
  374  * to register a second rx_handler will return -EBUSY.
  375  *
  376  * To register a rx_handler on a net_device, use netdev_rx_handler_register().
  377  * To unregister a rx_handler on a net_device, use
  378  * netdev_rx_handler_unregister().
  379  *
  380  * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
  381  * do with the skb.
  382  *
  383  * If the rx_handler consumed the skb in some way, it should return
  384  * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
  385  * the skb to be delivered in some other way.
  386  *
  387  * If the rx_handler changed skb->dev, to divert the skb to another
  388  * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
  389  * new device will be called if it exists.
  390  *
  391  * If the rx_handler decides the skb should be ignored, it should return
  392  * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
  393  * are registered on exact device (ptype->dev == skb->dev).
  394  *
  395  * If the rx_handler didn't change skb->dev, but wants the skb to be normally
  396  * delivered, it should return RX_HANDLER_PASS.
  397  *
  398  * A device without a registered rx_handler will behave as if rx_handler
  399  * returned RX_HANDLER_PASS.
  400  */
  401 
  402 enum rx_handler_result {
  403 	RX_HANDLER_CONSUMED,
  404 	RX_HANDLER_ANOTHER,
  405 	RX_HANDLER_EXACT,
  406 	RX_HANDLER_PASS,
  407 };
  408 typedef enum rx_handler_result rx_handler_result_t;
  409 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
  410 
  411 void __napi_schedule(struct napi_struct *n);
  412 void __napi_schedule_irqoff(struct napi_struct *n);
  413 
  414 static inline bool napi_disable_pending(struct napi_struct *n)
  415 {
  416 	return test_bit(NAPI_STATE_DISABLE, &n->state);
  417 }
  418 
  419 bool napi_schedule_prep(struct napi_struct *n);
  420 
  421 /**
  422  *	napi_schedule - schedule NAPI poll
  423  *	@n: NAPI context
  424  *
  425  * Schedule NAPI poll routine to be called if it is not already
  426  * running.
  427  */
  428 static inline void napi_schedule(struct napi_struct *n)
  429 {
  430 	if (napi_schedule_prep(n))
  431 		__napi_schedule(n);
  432 }
  433 
  434 /**
  435  *	napi_schedule_irqoff - schedule NAPI poll
  436  *	@n: NAPI context
  437  *
  438  * Variant of napi_schedule(), assuming hard irqs are masked.
  439  */
  440 static inline void napi_schedule_irqoff(struct napi_struct *n)
  441 {
  442 	if (napi_schedule_prep(n))
  443 		__napi_schedule_irqoff(n);
  444 }
  445 
  446 /* Try to reschedule poll. Called by dev->poll() after napi_complete().  */
  447 static inline bool napi_reschedule(struct napi_struct *napi)
  448 {
  449 	if (napi_schedule_prep(napi)) {
  450 		__napi_schedule(napi);
  451 		return true;
  452 	}
  453 	return false;
  454 }
  455 
  456 bool napi_complete_done(struct napi_struct *n, int work_done);
  457 /**
  458  *	napi_complete - NAPI processing complete
  459  *	@n: NAPI context
  460  *
  461  * Mark NAPI processing as complete.
  462  * Consider using napi_complete_done() instead.
  463  * Return false if device should avoid rearming interrupts.
  464  */
  465 static inline bool napi_complete(struct napi_struct *n)
  466 {
  467 	return napi_complete_done(n, 0);
  468 }
  469 
  470 /**
  471  *	napi_hash_del - remove a NAPI from global table
  472  *	@napi: NAPI context
  473  *
  474  * Warning: caller must observe RCU grace period
  475  * before freeing memory containing @napi, if
  476  * this function returns true.
  477  * Note: core networking stack automatically calls it
  478  * from netif_napi_del().
  479  * Drivers might want to call this helper to combine all
  480  * the needed RCU grace periods into a single one.
  481  */
  482 bool napi_hash_del(struct napi_struct *napi);
  483 
  484 /**
  485  *	napi_disable - prevent NAPI from scheduling
  486  *	@n: NAPI context
  487  *
  488  * Stop NAPI from being scheduled on this context.
  489  * Waits till any outstanding processing completes.
  490  */
  491 void napi_disable(struct napi_struct *n);
  492 
  493 /**
  494  *	napi_enable - enable NAPI scheduling
  495  *	@n: NAPI context
  496  *
  497  * Resume NAPI from being scheduled on this context.
  498  * Must be paired with napi_disable.
  499  */
  500 static inline void napi_enable(struct napi_struct *n)
  501 {
  502 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  503 	smp_mb__before_atomic();
  504 	clear_bit(NAPI_STATE_SCHED, &n->state);
  505 	clear_bit(NAPI_STATE_NPSVC, &n->state);
  506 }
  507 
  508 /**
  509  *	napi_synchronize - wait until NAPI is not running
  510  *	@n: NAPI context
  511  *
  512  * Wait until NAPI is done being scheduled on this context.
  513  * Waits till any outstanding processing completes but
  514  * does not disable future activations.
  515  */
  516 static inline void napi_synchronize(const struct napi_struct *n)
  517 {
  518 	if (IS_ENABLED(CONFIG_SMP))
  519 		while (test_bit(NAPI_STATE_SCHED, &n->state))
  520 			msleep(1);
  521 	else
  522 		barrier();
  523 }
  524 
  525 enum netdev_queue_state_t {
  526 	__QUEUE_STATE_DRV_XOFF,
  527 	__QUEUE_STATE_STACK_XOFF,
  528 	__QUEUE_STATE_FROZEN,
  529 };
  530 
  531 #define QUEUE_STATE_DRV_XOFF	(1 << __QUEUE_STATE_DRV_XOFF)
  532 #define QUEUE_STATE_STACK_XOFF	(1 << __QUEUE_STATE_STACK_XOFF)
  533 #define QUEUE_STATE_FROZEN	(1 << __QUEUE_STATE_FROZEN)
  534 
  535 #define QUEUE_STATE_ANY_XOFF	(QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
  536 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
  537 					QUEUE_STATE_FROZEN)
  538 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
  539 					QUEUE_STATE_FROZEN)
  540 
  541 /*
  542  * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
  543  * netif_tx_* functions below are used to manipulate this flag.  The
  544  * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
  545  * queue independently.  The netif_xmit_*stopped functions below are called
  546  * to check if the queue has been stopped by the driver or stack (either
  547  * of the XOFF bits are set in the state).  Drivers should not need to call
  548  * netif_xmit*stopped functions, they should only be using netif_tx_*.
  549  */
  550 
  551 struct netdev_queue {
  552 /*
  553  * read-mostly part
  554  */
  555 	struct net_device	*dev;
  556 	struct Qdisc __rcu	*qdisc;
  557 	struct Qdisc		*qdisc_sleeping;
  558 #ifdef CONFIG_SYSFS
  559 	struct kobject		kobj;
  560 #endif
  561 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  562 	int			numa_node;
  563 #endif
  564 	unsigned long		tx_maxrate;
  565 	/*
  566 	 * Number of TX timeouts for this queue
  567 	 * (/sys/class/net/DEV/Q/trans_timeout)
  568 	 */
  569 	unsigned long		trans_timeout;
  570 /*
  571  * write-mostly part
  572  */
  573 	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
  574 	int			xmit_lock_owner;
  575 	/*
  576 	 * Time (in jiffies) of last Tx
  577 	 */
  578 	unsigned long		trans_start;
  579 
  580 	unsigned long		state;
  581 
  582 #ifdef CONFIG_BQL
  583 	struct dql		dql;
  584 #endif
  585 } ____cacheline_aligned_in_smp;
  586 
  587 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
  588 {
  589 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  590 	return q->numa_node;
  591 #else
  592 	return NUMA_NO_NODE;
  593 #endif
  594 }
  595 
  596 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
  597 {
  598 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  599 	q->numa_node = node;
  600 #endif
  601 }
  602 
  603 #ifdef CONFIG_RPS
  604 /*
  605  * This structure holds an RPS map which can be of variable length.  The
  606  * map is an array of CPUs.
  607  */
  608 struct rps_map {
  609 	unsigned int len;
  610 	struct rcu_head rcu;
  611 	u16 cpus[0];
  612 };
  613 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
  614 
  615 /*
  616  * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
  617  * tail pointer for that CPU's input queue at the time of last enqueue, and
  618  * a hardware filter index.
  619  */
  620 struct rps_dev_flow {
  621 	u16 cpu;
  622 	u16 filter;
  623 	unsigned int last_qtail;
  624 };
  625 #define RPS_NO_FILTER 0xffff
  626 
  627 /*
  628  * The rps_dev_flow_table structure contains a table of flow mappings.
  629  */
  630 struct rps_dev_flow_table {
  631 	unsigned int mask;
  632 	struct rcu_head rcu;
  633 	struct rps_dev_flow flows[0];
  634 };
  635 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
  636     ((_num) * sizeof(struct rps_dev_flow)))
  637 
  638 /*
  639  * The rps_sock_flow_table contains mappings of flows to the last CPU
  640  * on which they were processed by the application (set in recvmsg).
  641  * Each entry is a 32bit value. Upper part is the high-order bits
  642  * of flow hash, lower part is CPU number.
  643  * rps_cpu_mask is used to partition the space, depending on number of
  644  * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
  645  * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
  646  * meaning we use 32-6=26 bits for the hash.
  647  */
  648 struct rps_sock_flow_table {
  649 	u32	mask;
  650 
  651 	u32	ents[0] ____cacheline_aligned_in_smp;
  652 };
  653 #define	RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
  654 
  655 #define RPS_NO_CPU 0xffff
  656 
  657 extern u32 rps_cpu_mask;
  658 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
  659 
  660 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
  661 					u32 hash)
  662 {
  663 	if (table && hash) {
  664 		unsigned int index = hash & table->mask;
  665 		u32 val = hash & ~rps_cpu_mask;
  666 
  667 		/* We only give a hint, preemption can change CPU under us */
  668 		val |= raw_smp_processor_id();
  669 
  670 		if (table->ents[index] != val)
  671 			table->ents[index] = val;
  672 	}
  673 }
  674 
  675 #ifdef CONFIG_RFS_ACCEL
  676 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
  677 			 u16 filter_id);
  678 #endif
  679 #endif /* CONFIG_RPS */
  680 
  681 /* This structure contains an instance of an RX queue. */
  682 struct netdev_rx_queue {
  683 #ifdef CONFIG_RPS
  684 	struct rps_map __rcu		*rps_map;
  685 	struct rps_dev_flow_table __rcu	*rps_flow_table;
  686 #endif
  687 	struct kobject			kobj;
  688 	struct net_device		*dev;
  689 } ____cacheline_aligned_in_smp;
  690 
  691 /*
  692  * RX queue sysfs structures and functions.
  693  */
  694 struct rx_queue_attribute {
  695 	struct attribute attr;
  696 	ssize_t (*show)(struct netdev_rx_queue *queue,
  697 	    struct rx_queue_attribute *attr, char *buf);
  698 	ssize_t (*store)(struct netdev_rx_queue *queue,
  699 	    struct rx_queue_attribute *attr, const char *buf, size_t len);
  700 };
  701 
  702 #ifdef CONFIG_XPS
  703 /*
  704  * This structure holds an XPS map which can be of variable length.  The
  705  * map is an array of queues.
  706  */
  707 struct xps_map {
  708 	unsigned int len;
  709 	unsigned int alloc_len;
  710 	struct rcu_head rcu;
  711 	u16 queues[0];
  712 };
  713 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
  714 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
  715        - sizeof(struct xps_map)) / sizeof(u16))
  716 
  717 /*
  718  * This structure holds all XPS maps for device.  Maps are indexed by CPU.
  719  */
  720 struct xps_dev_maps {
  721 	struct rcu_head rcu;
  722 	struct xps_map __rcu *cpu_map[0];
  723 };
  724 #define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) +		\
  725 	(nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
  726 #endif /* CONFIG_XPS */
  727 
  728 #define TC_MAX_QUEUE	16
  729 #define TC_BITMASK	15
  730 /* HW offloaded queuing disciplines txq count and offset maps */
  731 struct netdev_tc_txq {
  732 	u16 count;
  733 	u16 offset;
  734 };
  735 
  736 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
  737 /*
  738  * This structure is to hold information about the device
  739  * configured to run FCoE protocol stack.
  740  */
  741 struct netdev_fcoe_hbainfo {
  742 	char	manufacturer[64];
  743 	char	serial_number[64];
  744 	char	hardware_version[64];
  745 	char	driver_version[64];
  746 	char	optionrom_version[64];
  747 	char	firmware_version[64];
  748 	char	model[256];
  749 	char	model_description[256];
  750 };
  751 #endif
  752 
  753 #define MAX_PHYS_ITEM_ID_LEN 32
  754 
  755 /* This structure holds a unique identifier to identify some
  756  * physical item (port for example) used by a netdevice.
  757  */
  758 struct netdev_phys_item_id {
  759 	unsigned char id[MAX_PHYS_ITEM_ID_LEN];
  760 	unsigned char id_len;
  761 };
  762 
  763 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
  764 					    struct netdev_phys_item_id *b)
  765 {
  766 	return a->id_len == b->id_len &&
  767 	       memcmp(a->id, b->id, a->id_len) == 0;
  768 }
  769 
  770 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  771 				       struct sk_buff *skb);
  772 
  773 /* These structures hold the attributes of qdisc and classifiers
  774  * that are being passed to the netdevice through the setup_tc op.
  775  */
  776 enum {
  777 	TC_SETUP_MQPRIO,
  778 	TC_SETUP_CLSU32,
  779 	TC_SETUP_CLSFLOWER,
  780 	TC_SETUP_MATCHALL,
  781 	TC_SETUP_CLSBPF,
  782 };
  783 
  784 struct tc_cls_u32_offload;
  785 
  786 struct tc_to_netdev {
  787 	unsigned int type;
  788 	union {
  789 		u8 tc;
  790 		struct tc_cls_u32_offload *cls_u32;
  791 		struct tc_cls_flower_offload *cls_flower;
  792 		struct tc_cls_matchall_offload *cls_mall;
  793 		struct tc_cls_bpf_offload *cls_bpf;
  794 	};
  795 	bool egress_dev;
  796 };
  797 
  798 /* These structures hold the attributes of xdp state that are being passed
  799  * to the netdevice through the xdp op.
  800  */
  801 enum xdp_netdev_command {
  802 	/* Set or clear a bpf program used in the earliest stages of packet
  803 	 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
  804 	 * is responsible for calling bpf_prog_put on any old progs that are
  805 	 * stored. In case of error, the callee need not release the new prog
  806 	 * reference, but on success it takes ownership and must bpf_prog_put
  807 	 * when it is no longer used.
  808 	 */
  809 	XDP_SETUP_PROG,
  810 	/* Check if a bpf program is set on the device.  The callee should
  811 	 * return true if a program is currently attached and running.
  812 	 */
  813 	XDP_QUERY_PROG,
  814 };
  815 
  816 struct netdev_xdp {
  817 	enum xdp_netdev_command command;
  818 	union {
  819 		/* XDP_SETUP_PROG */
  820 		struct bpf_prog *prog;
  821 		/* XDP_QUERY_PROG */
  822 		bool prog_attached;
  823 	};
  824 };
  825 
  826 /*
  827  * This structure defines the management hooks for network devices.
  828  * The following hooks can be defined; unless noted otherwise, they are
  829  * optional and can be filled with a null pointer.
  830  *
  831  * int (*ndo_init)(struct net_device *dev);
  832  *     This function is called once when a network device is registered.
  833  *     The network device can use this for any late stage initialization
  834  *     or semantic validation. It can fail with an error code which will
  835  *     be propagated back to register_netdev.
  836  *
  837  * void (*ndo_uninit)(struct net_device *dev);
  838  *     This function is called when device is unregistered or when registration
  839  *     fails. It is not called if init fails.
  840  *
  841  * int (*ndo_open)(struct net_device *dev);
  842  *     This function is called when a network device transitions to the up
  843  *     state.
  844  *
  845  * int (*ndo_stop)(struct net_device *dev);
  846  *     This function is called when a network device transitions to the down
  847  *     state.
  848  *
  849  * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
  850  *                               struct net_device *dev);
  851  *	Called when a packet needs to be transmitted.
  852  *	Returns NETDEV_TX_OK.  Can return NETDEV_TX_BUSY, but you should stop
  853  *	the queue before that can happen; it's for obsolete devices and weird
  854  *	corner cases, but the stack really does a non-trivial amount
  855  *	of useless work if you return NETDEV_TX_BUSY.
  856  *	Required; cannot be NULL.
  857  *
  858  * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
  859  *					   struct net_device *dev
  860  *					   netdev_features_t features);
  861  *	Called by core transmit path to determine if device is capable of
  862  *	performing offload operations on a given packet. This is to give
  863  *	the device an opportunity to implement any restrictions that cannot
  864  *	be otherwise expressed by feature flags. The check is called with
  865  *	the set of features that the stack has calculated and it returns
  866  *	those the driver believes to be appropriate.
  867  *
  868  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
  869  *                         void *accel_priv, select_queue_fallback_t fallback);
  870  *	Called to decide which queue to use when device supports multiple
  871  *	transmit queues.
  872  *
  873  * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
  874  *	This function is called to allow device receiver to make
  875  *	changes to configuration when multicast or promiscuous is enabled.
  876  *
  877  * void (*ndo_set_rx_mode)(struct net_device *dev);
  878  *	This function is called device changes address list filtering.
  879  *	If driver handles unicast address filtering, it should set
  880  *	IFF_UNICAST_FLT in its priv_flags.
  881  *
  882  * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
  883  *	This function  is called when the Media Access Control address
  884  *	needs to be changed. If this interface is not defined, the
  885  *	MAC address can not be changed.
  886  *
  887  * int (*ndo_validate_addr)(struct net_device *dev);
  888  *	Test if Media Access Control address is valid for the device.
  889  *
  890  * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
  891  *	Called when a user requests an ioctl which can't be handled by
  892  *	the generic interface code. If not defined ioctls return
  893  *	not supported error code.
  894  *
  895  * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
  896  *	Used to set network devices bus interface parameters. This interface
  897  *	is retained for legacy reasons; new devices should use the bus
  898  *	interface (PCI) for low level management.
  899  *
  900  * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
  901  *	Called when a user wants to change the Maximum Transfer Unit
  902  *	of a device. If not defined, any request to change MTU will
  903  *	will return an error.
  904  *
  905  * void (*ndo_tx_timeout)(struct net_device *dev);
  906  *	Callback used when the transmitter has not made any progress
  907  *	for dev->watchdog ticks.
  908  *
  909  * void (*ndo_get_stats64)(struct net_device *dev,
  910  *                         struct rtnl_link_stats64 *storage);
  911  * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  912  *	Called when a user wants to get the network device usage
  913  *	statistics. Drivers must do one of the following:
  914  *	1. Define @ndo_get_stats64 to fill in a zero-initialised
  915  *	   rtnl_link_stats64 structure passed by the caller.
  916  *	2. Define @ndo_get_stats to update a net_device_stats structure
  917  *	   (which should normally be dev->stats) and return a pointer to
  918  *	   it. The structure may be changed asynchronously only if each
  919  *	   field is written atomically.
  920  *	3. Update dev->stats asynchronously and atomically, and define
  921  *	   neither operation.
  922  *
  923  * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
  924  *	Return true if this device supports offload stats of this attr_id.
  925  *
  926  * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
  927  *	void *attr_data)
  928  *	Get statistics for offload operations by attr_id. Write it into the
  929  *	attr_data pointer.
  930  *
  931  * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
  932  *	If device supports VLAN filtering this function is called when a
  933  *	VLAN id is registered.
  934  *
  935  * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
  936  *	If device supports VLAN filtering this function is called when a
  937  *	VLAN id is unregistered.
  938  *
  939  * void (*ndo_poll_controller)(struct net_device *dev);
  940  *
  941  *	SR-IOV management functions.
  942  * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
  943  * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
  944  *			  u8 qos, __be16 proto);
  945  * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
  946  *			  int max_tx_rate);
  947  * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
  948  * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
  949  * int (*ndo_get_vf_config)(struct net_device *dev,
  950  *			    int vf, struct ifla_vf_info *ivf);
  951  * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
  952  * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
  953  *			  struct nlattr *port[]);
  954  *
  955  *      Enable or disable the VF ability to query its RSS Redirection Table and
  956  *      Hash Key. This is needed since on some devices VF share this information
  957  *      with PF and querying it may introduce a theoretical security risk.
  958  * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
  959  * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
  960  * int (*ndo_setup_tc)(struct net_device *dev, u32 handle,
  961  *		       __be16 protocol, struct tc_to_netdev *tc);
  962  *	Called to setup any 'tc' scheduler, classifier or action on @dev.
  963  *	This is always called from the stack with the rtnl lock held and netif
  964  *	tx queues stopped. This allows the netdevice to perform queue
  965  *	management safely.
  966  *
  967  *	Fiber Channel over Ethernet (FCoE) offload functions.
  968  * int (*ndo_fcoe_enable)(struct net_device *dev);
  969  *	Called when the FCoE protocol stack wants to start using LLD for FCoE
  970  *	so the underlying device can perform whatever needed configuration or
  971  *	initialization to support acceleration of FCoE traffic.
  972  *
  973  * int (*ndo_fcoe_disable)(struct net_device *dev);
  974  *	Called when the FCoE protocol stack wants to stop using LLD for FCoE
  975  *	so the underlying device can perform whatever needed clean-ups to
  976  *	stop supporting acceleration of FCoE traffic.
  977  *
  978  * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
  979  *			     struct scatterlist *sgl, unsigned int sgc);
  980  *	Called when the FCoE Initiator wants to initialize an I/O that
  981  *	is a possible candidate for Direct Data Placement (DDP). The LLD can
  982  *	perform necessary setup and returns 1 to indicate the device is set up
  983  *	successfully to perform DDP on this I/O, otherwise this returns 0.
  984  *
  985  * int (*ndo_fcoe_ddp_done)(struct net_device *dev,  u16 xid);
  986  *	Called when the FCoE Initiator/Target is done with the DDPed I/O as
  987  *	indicated by the FC exchange id 'xid', so the underlying device can
  988  *	clean up and reuse resources for later DDP requests.
  989  *
  990  * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
  991  *			      struct scatterlist *sgl, unsigned int sgc);
  992  *	Called when the FCoE Target wants to initialize an I/O that
  993  *	is a possible candidate for Direct Data Placement (DDP). The LLD can
  994  *	perform necessary setup and returns 1 to indicate the device is set up
  995  *	successfully to perform DDP on this I/O, otherwise this returns 0.
  996  *
  997  * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
  998  *			       struct netdev_fcoe_hbainfo *hbainfo);
  999  *	Called when the FCoE Protocol stack wants information on the underlying
 1000  *	device. This information is utilized by the FCoE protocol stack to
 1001  *	register attributes with Fiber Channel management service as per the
 1002  *	FC-GS Fabric Device Management Information(FDMI) specification.
 1003  *
 1004  * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
 1005  *	Called when the underlying device wants to override default World Wide
 1006  *	Name (WWN) generation mechanism in FCoE protocol stack to pass its own
 1007  *	World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
 1008  *	protocol stack to use.
 1009  *
 1010  *	RFS acceleration.
 1011  * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
 1012  *			    u16 rxq_index, u32 flow_id);
 1013  *	Set hardware filter for RFS.  rxq_index is the target queue index;
 1014  *	flow_id is a flow ID to be passed to rps_may_expire_flow() later.
 1015  *	Return the filter ID on success, or a negative error code.
 1016  *
 1017  *	Slave management functions (for bridge, bonding, etc).
 1018  * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
 1019  *	Called to make another netdev an underling.
 1020  *
 1021  * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
 1022  *	Called to release previously enslaved netdev.
 1023  *
 1024  *      Feature/offload setting functions.
 1025  * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
 1026  *		netdev_features_t features);
 1027  *	Adjusts the requested feature flags according to device-specific
 1028  *	constraints, and returns the resulting flags. Must not modify
 1029  *	the device state.
 1030  *
 1031  * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
 1032  *	Called to update device configuration to new features. Passed
 1033  *	feature set might be less than what was returned by ndo_fix_features()).
 1034  *	Must return >0 or -errno if it changed dev->features itself.
 1035  *
 1036  * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
 1037  *		      struct net_device *dev,
 1038  *		      const unsigned char *addr, u16 vid, u16 flags)
 1039  *	Adds an FDB entry to dev for addr.
 1040  * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
 1041  *		      struct net_device *dev,
 1042  *		      const unsigned char *addr, u16 vid)
 1043  *	Deletes the FDB entry from dev coresponding to addr.
 1044  * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
 1045  *		       struct net_device *dev, struct net_device *filter_dev,
 1046  *		       int *idx)
 1047  *	Used to add FDB entries to dump requests. Implementers should add
 1048  *	entries to skb and update idx with the number of entries.
 1049  *
 1050  * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
 1051  *			     u16 flags)
 1052  * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
 1053  *			     struct net_device *dev, u32 filter_mask,
 1054  *			     int nlflags)
 1055  * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
 1056  *			     u16 flags);
 1057  *
 1058  * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
 1059  *	Called to change device carrier. Soft-devices (like dummy, team, etc)
 1060  *	which do not represent real hardware may define this to allow their
 1061  *	userspace components to manage their virtual carrier state. Devices
 1062  *	that determine carrier state from physical hardware properties (eg
 1063  *	network cables) or protocol-dependent mechanisms (eg
 1064  *	USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
 1065  *
 1066  * int (*ndo_get_phys_port_id)(struct net_device *dev,
 1067  *			       struct netdev_phys_item_id *ppid);
 1068  *	Called to get ID of physical port of this device. If driver does
 1069  *	not implement this, it is assumed that the hw is not able to have
 1070  *	multiple net devices on single physical port.
 1071  *
 1072  * void (*ndo_udp_tunnel_add)(struct net_device *dev,
 1073  *			      struct udp_tunnel_info *ti);
 1074  *	Called by UDP tunnel to notify a driver about the UDP port and socket
 1075  *	address family that a UDP tunnel is listnening to. It is called only
 1076  *	when a new port starts listening. The operation is protected by the
 1077  *	RTNL.
 1078  *
 1079  * void (*ndo_udp_tunnel_del)(struct net_device *dev,
 1080  *			      struct udp_tunnel_info *ti);
 1081  *	Called by UDP tunnel to notify the driver about a UDP port and socket
 1082  *	address family that the UDP tunnel is not listening to anymore. The
 1083  *	operation is protected by the RTNL.
 1084  *
 1085  * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
 1086  *				 struct net_device *dev)
 1087  *	Called by upper layer devices to accelerate switching or other
 1088  *	station functionality into hardware. 'pdev is the lowerdev
 1089  *	to use for the offload and 'dev' is the net device that will
 1090  *	back the offload. Returns a pointer to the private structure
 1091  *	the upper layer will maintain.
 1092  * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
 1093  *	Called by upper layer device to delete the station created
 1094  *	by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
 1095  *	the station and priv is the structure returned by the add
 1096  *	operation.
 1097  * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
 1098  *				      struct net_device *dev,
 1099  *				      void *priv);
 1100  *	Callback to use for xmit over the accelerated station. This
 1101  *	is used in place of ndo_start_xmit on accelerated net
 1102  *	devices.
 1103  * int (*ndo_set_tx_maxrate)(struct net_device *dev,
 1104  *			     int queue_index, u32 maxrate);
 1105  *	Called when a user wants to set a max-rate limitation of specific
 1106  *	TX queue.
 1107  * int (*ndo_get_iflink)(const struct net_device *dev);
 1108  *	Called to get the iflink value of this device.
 1109  * void (*ndo_change_proto_down)(struct net_device *dev,
 1110  *				 bool proto_down);
 1111  *	This function is used to pass protocol port error state information
 1112  *	to the switch driver. The switch driver can react to the proto_down
 1113  *      by doing a phys down on the associated switch port.
 1114  * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
 1115  *	This function is used to get egress tunnel information for given skb.
 1116  *	This is useful for retrieving outer tunnel header parameters while
 1117  *	sampling packet.
 1118  * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
 1119  *	This function is used to specify the headroom that the skb must
 1120  *	consider when allocation skb during packet reception. Setting
 1121  *	appropriate rx headroom value allows avoiding skb head copy on
 1122  *	forward. Setting a negative value resets the rx headroom to the
 1123  *	default value.
 1124  * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp);
 1125  *	This function is used to set or query state related to XDP on the
 1126  *	netdevice. See definition of enum xdp_netdev_command for details.
 1127  *
 1128  */
 1129 struct net_device_ops {
 1130 	int			(*ndo_init)(struct net_device *dev);
 1131 	void			(*ndo_uninit)(struct net_device *dev);
 1132 	int			(*ndo_open)(struct net_device *dev);
 1133 	int			(*ndo_stop)(struct net_device *dev);
 1134 	netdev_tx_t		(*ndo_start_xmit)(struct sk_buff *skb,
 1135 						  struct net_device *dev);
 1136 	netdev_features_t	(*ndo_features_check)(struct sk_buff *skb,
 1137 						      struct net_device *dev,
 1138 						      netdev_features_t features);
 1139 	u16			(*ndo_select_queue)(struct net_device *dev,
 1140 						    struct sk_buff *skb,
 1141 						    void *accel_priv,
 1142 						    select_queue_fallback_t fallback);
 1143 	void			(*ndo_change_rx_flags)(struct net_device *dev,
 1144 						       int flags);
 1145 	void			(*ndo_set_rx_mode)(struct net_device *dev);
 1146 	int			(*ndo_set_mac_address)(struct net_device *dev,
 1147 						       void *addr);
 1148 	int			(*ndo_validate_addr)(struct net_device *dev);
 1149 	int			(*ndo_do_ioctl)(struct net_device *dev,
 1150 					        struct ifreq *ifr, int cmd);
 1151 	int			(*ndo_set_config)(struct net_device *dev,
 1152 					          struct ifmap *map);
 1153 	int			(*ndo_change_mtu)(struct net_device *dev,
 1154 						  int new_mtu);
 1155 	int			(*ndo_neigh_setup)(struct net_device *dev,
 1156 						   struct neigh_parms *);
 1157 	void			(*ndo_tx_timeout) (struct net_device *dev);
 1158 
 1159 	void			(*ndo_get_stats64)(struct net_device *dev,
 1160 						   struct rtnl_link_stats64 *storage);
 1161 	bool			(*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
 1162 	int			(*ndo_get_offload_stats)(int attr_id,
 1163 							 const struct net_device *dev,
 1164 							 void *attr_data);
 1165 	struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
 1166 
 1167 	int			(*ndo_vlan_rx_add_vid)(struct net_device *dev,
 1168 						       __be16 proto, u16 vid);
 1169 	int			(*ndo_vlan_rx_kill_vid)(struct net_device *dev,
 1170 						        __be16 proto, u16 vid);
 1171 #ifdef CONFIG_NET_POLL_CONTROLLER
 1172 	void                    (*ndo_poll_controller)(struct net_device *dev);
 1173 	int			(*ndo_netpoll_setup)(struct net_device *dev,
 1174 						     struct netpoll_info *info);
 1175 	void			(*ndo_netpoll_cleanup)(struct net_device *dev);
 1176 #endif
 1177 	int			(*ndo_set_vf_mac)(struct net_device *dev,
 1178 						  int queue, u8 *mac);
 1179 	int			(*ndo_set_vf_vlan)(struct net_device *dev,
 1180 						   int queue, u16 vlan,
 1181 						   u8 qos, __be16 proto);
 1182 	int			(*ndo_set_vf_rate)(struct net_device *dev,
 1183 						   int vf, int min_tx_rate,
 1184 						   int max_tx_rate);
 1185 	int			(*ndo_set_vf_spoofchk)(struct net_device *dev,
 1186 						       int vf, bool setting);
 1187 	int			(*ndo_set_vf_trust)(struct net_device *dev,
 1188 						    int vf, bool setting);
 1189 	int			(*ndo_get_vf_config)(struct net_device *dev,
 1190 						     int vf,
 1191 						     struct ifla_vf_info *ivf);
 1192 	int			(*ndo_set_vf_link_state)(struct net_device *dev,
 1193 							 int vf, int link_state);
 1194 	int			(*ndo_get_vf_stats)(struct net_device *dev,
 1195 						    int vf,
 1196 						    struct ifla_vf_stats
 1197 						    *vf_stats);
 1198 	int			(*ndo_set_vf_port)(struct net_device *dev,
 1199 						   int vf,
 1200 						   struct nlattr *port[]);
 1201 	int			(*ndo_get_vf_port)(struct net_device *dev,
 1202 						   int vf, struct sk_buff *skb);
 1203 	int			(*ndo_set_vf_guid)(struct net_device *dev,
 1204 						   int vf, u64 guid,
 1205 						   int guid_type);
 1206 	int			(*ndo_set_vf_rss_query_en)(
 1207 						   struct net_device *dev,
 1208 						   int vf, bool setting);
 1209 	int			(*ndo_setup_tc)(struct net_device *dev,
 1210 						u32 handle,
 1211 						__be16 protocol,
 1212 						struct tc_to_netdev *tc);
 1213 #if IS_ENABLED(CONFIG_FCOE)
 1214 	int			(*ndo_fcoe_enable)(struct net_device *dev);
 1215 	int			(*ndo_fcoe_disable)(struct net_device *dev);
 1216 	int			(*ndo_fcoe_ddp_setup)(struct net_device *dev,
 1217 						      u16 xid,
 1218 						      struct scatterlist *sgl,
 1219 						      unsigned int sgc);
 1220 	int			(*ndo_fcoe_ddp_done)(struct net_device *dev,
 1221 						     u16 xid);
 1222 	int			(*ndo_fcoe_ddp_target)(struct net_device *dev,
 1223 						       u16 xid,
 1224 						       struct scatterlist *sgl,
 1225 						       unsigned int sgc);
 1226 	int			(*ndo_fcoe_get_hbainfo)(struct net_device *dev,
 1227 							struct netdev_fcoe_hbainfo *hbainfo);
 1228 #endif
 1229 
 1230 #if IS_ENABLED(CONFIG_LIBFCOE)
 1231 #define NETDEV_FCOE_WWNN 0
 1232 #define NETDEV_FCOE_WWPN 1
 1233 	int			(*ndo_fcoe_get_wwn)(struct net_device *dev,
 1234 						    u64 *wwn, int type);
 1235 #endif
 1236 
 1237 #ifdef CONFIG_RFS_ACCEL
 1238 	int			(*ndo_rx_flow_steer)(struct net_device *dev,
 1239 						     const struct sk_buff *skb,
 1240 						     u16 rxq_index,
 1241 						     u32 flow_id);
 1242 #endif
 1243 	int			(*ndo_add_slave)(struct net_device *dev,
 1244 						 struct net_device *slave_dev);
 1245 	int			(*ndo_del_slave)(struct net_device *dev,
 1246 						 struct net_device *slave_dev);
 1247 	netdev_features_t	(*ndo_fix_features)(struct net_device *dev,
 1248 						    netdev_features_t features);
 1249 	int			(*ndo_set_features)(struct net_device *dev,
 1250 						    netdev_features_t features);
 1251 	int			(*ndo_neigh_construct)(struct net_device *dev,
 1252 						       struct neighbour *n);
 1253 	void			(*ndo_neigh_destroy)(struct net_device *dev,
 1254 						     struct neighbour *n);
 1255 
 1256 	int			(*ndo_fdb_add)(struct ndmsg *ndm,
 1257 					       struct nlattr *tb[],
 1258 					       struct net_device *dev,
 1259 					       const unsigned char *addr,
 1260 					       u16 vid,
 1261 					       u16 flags);
 1262 	int			(*ndo_fdb_del)(struct ndmsg *ndm,
 1263 					       struct nlattr *tb[],
 1264 					       struct net_device *dev,
 1265 					       const unsigned char *addr,
 1266 					       u16 vid);
 1267 	int			(*ndo_fdb_dump)(struct sk_buff *skb,
 1268 						struct netlink_callback *cb,
 1269 						struct net_device *dev,
 1270 						struct net_device *filter_dev,
 1271 						int *idx);
 1272 
 1273 	int			(*ndo_bridge_setlink)(struct net_device *dev,
 1274 						      struct nlmsghdr *nlh,
 1275 						      u16 flags);
 1276 	int			(*ndo_bridge_getlink)(struct sk_buff *skb,
 1277 						      u32 pid, u32 seq,
 1278 						      struct net_device *dev,
 1279 						      u32 filter_mask,
 1280 						      int nlflags);
 1281 	int			(*ndo_bridge_dellink)(struct net_device *dev,
 1282 						      struct nlmsghdr *nlh,
 1283 						      u16 flags);
 1284 	int			(*ndo_change_carrier)(struct net_device *dev,
 1285 						      bool new_carrier);
 1286 	int			(*ndo_get_phys_port_id)(struct net_device *dev,
 1287 							struct netdev_phys_item_id *ppid);
 1288 	int			(*ndo_get_phys_port_name)(struct net_device *dev,
 1289 							  char *name, size_t len);
 1290 	void			(*ndo_udp_tunnel_add)(struct net_device *dev,
 1291 						      struct udp_tunnel_info *ti);
 1292 	void			(*ndo_udp_tunnel_del)(struct net_device *dev,
 1293 						      struct udp_tunnel_info *ti);
 1294 	void*			(*ndo_dfwd_add_station)(struct net_device *pdev,
 1295 							struct net_device *dev);
 1296 	void			(*ndo_dfwd_del_station)(struct net_device *pdev,
 1297 							void *priv);
 1298 
 1299 	netdev_tx_t		(*ndo_dfwd_start_xmit) (struct sk_buff *skb,
 1300 							struct net_device *dev,
 1301 							void *priv);
 1302 	int			(*ndo_get_lock_subclass)(struct net_device *dev);
 1303 	int			(*ndo_set_tx_maxrate)(struct net_device *dev,
 1304 						      int queue_index,
 1305 						      u32 maxrate);
 1306 	int			(*ndo_get_iflink)(const struct net_device *dev);
 1307 	int			(*ndo_change_proto_down)(struct net_device *dev,
 1308 							 bool proto_down);
 1309 	int			(*ndo_fill_metadata_dst)(struct net_device *dev,
 1310 						       struct sk_buff *skb);
 1311 	void			(*ndo_set_rx_headroom)(struct net_device *dev,
 1312 						       int needed_headroom);
 1313 	int			(*ndo_xdp)(struct net_device *dev,
 1314 					   struct netdev_xdp *xdp);
 1315 };
 1316 
 1317 /**
 1318  * enum net_device_priv_flags - &struct net_device priv_flags
 1319  *
 1320  * These are the &struct net_device, they are only set internally
 1321  * by drivers and used in the kernel. These flags are invisible to
 1322  * userspace; this means that the order of these flags can change
 1323  * during any kernel release.
 1324  *
 1325  * You should have a pretty good reason to be extending these flags.
 1326  *
 1327  * @IFF_802_1Q_VLAN: 802.1Q VLAN device
 1328  * @IFF_EBRIDGE: Ethernet bridging device
 1329  * @IFF_BONDING: bonding master or slave
 1330  * @IFF_ISATAP: ISATAP interface (RFC4214)
 1331  * @IFF_WAN_HDLC: WAN HDLC device
 1332  * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
 1333  *	release skb->dst
 1334  * @IFF_DONT_BRIDGE: disallow bridging this ether dev
 1335  * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
 1336  * @IFF_MACVLAN_PORT: device used as macvlan port
 1337  * @IFF_BRIDGE_PORT: device used as bridge port
 1338  * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
 1339  * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
 1340  * @IFF_UNICAST_FLT: Supports unicast filtering
 1341  * @IFF_TEAM_PORT: device used as team port
 1342  * @IFF_SUPP_NOFCS: device supports sending custom FCS
 1343  * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
 1344  *	change when it's running
 1345  * @IFF_MACVLAN: Macvlan device
 1346  * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
 1347  *	underlying stacked devices
 1348  * @IFF_IPVLAN_MASTER: IPvlan master device
 1349  * @IFF_IPVLAN_SLAVE: IPvlan slave device
 1350  * @IFF_L3MDEV_MASTER: device is an L3 master device
 1351  * @IFF_NO_QUEUE: device can run without qdisc attached
 1352  * @IFF_OPENVSWITCH: device is a Open vSwitch master
 1353  * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
 1354  * @IFF_TEAM: device is a team device
 1355  * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
 1356  * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
 1357  *	entity (i.e. the master device for bridged veth)
 1358  * @IFF_MACSEC: device is a MACsec device
 1359  */
 1360 enum netdev_priv_flags {
 1361 	IFF_802_1Q_VLAN			= 1<<0,
 1362 	IFF_EBRIDGE			= 1<<1,
 1363 	IFF_BONDING			= 1<<2,
 1364 	IFF_ISATAP			= 1<<3,
 1365 	IFF_WAN_HDLC			= 1<<4,
 1366 	IFF_XMIT_DST_RELEASE		= 1<<5,
 1367 	IFF_DONT_BRIDGE			= 1<<6,
 1368 	IFF_DISABLE_NETPOLL		= 1<<7,
 1369 	IFF_MACVLAN_PORT		= 1<<8,
 1370 	IFF_BRIDGE_PORT			= 1<<9,
 1371 	IFF_OVS_DATAPATH		= 1<<10,
 1372 	IFF_TX_SKB_SHARING		= 1<<11,
 1373 	IFF_UNICAST_FLT			= 1<<12,
 1374 	IFF_TEAM_PORT			= 1<<13,
 1375 	IFF_SUPP_NOFCS			= 1<<14,
 1376 	IFF_LIVE_ADDR_CHANGE		= 1<<15,
 1377 	IFF_MACVLAN			= 1<<16,
 1378 	IFF_XMIT_DST_RELEASE_PERM	= 1<<17,
 1379 	IFF_IPVLAN_MASTER		= 1<<18,
 1380 	IFF_IPVLAN_SLAVE		= 1<<19,
 1381 	IFF_L3MDEV_MASTER		= 1<<20,
 1382 	IFF_NO_QUEUE			= 1<<21,
 1383 	IFF_OPENVSWITCH			= 1<<22,
 1384 	IFF_L3MDEV_SLAVE		= 1<<23,
 1385 	IFF_TEAM			= 1<<24,
 1386 	IFF_RXFH_CONFIGURED		= 1<<25,
 1387 	IFF_PHONY_HEADROOM		= 1<<26,
 1388 	IFF_MACSEC			= 1<<27,
 1389 };
 1390 
 1391 #define IFF_802_1Q_VLAN			IFF_802_1Q_VLAN
 1392 #define IFF_EBRIDGE			IFF_EBRIDGE
 1393 #define IFF_BONDING			IFF_BONDING
 1394 #define IFF_ISATAP			IFF_ISATAP
 1395 #define IFF_WAN_HDLC			IFF_WAN_HDLC
 1396 #define IFF_XMIT_DST_RELEASE		IFF_XMIT_DST_RELEASE
 1397 #define IFF_DONT_BRIDGE			IFF_DONT_BRIDGE
 1398 #define IFF_DISABLE_NETPOLL		IFF_DISABLE_NETPOLL
 1399 #define IFF_MACVLAN_PORT		IFF_MACVLAN_PORT
 1400 #define IFF_BRIDGE_PORT			IFF_BRIDGE_PORT
 1401 #define IFF_OVS_DATAPATH		IFF_OVS_DATAPATH
 1402 #define IFF_TX_SKB_SHARING		IFF_TX_SKB_SHARING
 1403 #define IFF_UNICAST_FLT			IFF_UNICAST_FLT
 1404 #define IFF_TEAM_PORT			IFF_TEAM_PORT
 1405 #define IFF_SUPP_NOFCS			IFF_SUPP_NOFCS
 1406 #define IFF_LIVE_ADDR_CHANGE		IFF_LIVE_ADDR_CHANGE
 1407 #define IFF_MACVLAN			IFF_MACVLAN
 1408 #define IFF_XMIT_DST_RELEASE_PERM	IFF_XMIT_DST_RELEASE_PERM
 1409 #define IFF_IPVLAN_MASTER		IFF_IPVLAN_MASTER
 1410 #define IFF_IPVLAN_SLAVE		IFF_IPVLAN_SLAVE
 1411 #define IFF_L3MDEV_MASTER		IFF_L3MDEV_MASTER
 1412 #define IFF_NO_QUEUE			IFF_NO_QUEUE
 1413 #define IFF_OPENVSWITCH			IFF_OPENVSWITCH
 1414 #define IFF_L3MDEV_SLAVE		IFF_L3MDEV_SLAVE
 1415 #define IFF_TEAM			IFF_TEAM
 1416 #define IFF_RXFH_CONFIGURED		IFF_RXFH_CONFIGURED
 1417 #define IFF_MACSEC			IFF_MACSEC
 1418 
 1419 /**
 1420  *	struct net_device - The DEVICE structure.
 1421  *		Actually, this whole structure is a big mistake.  It mixes I/O
 1422  *		data with strictly "high-level" data, and it has to know about
 1423  *		almost every data structure used in the INET module.
 1424  *
 1425  *	@name:	This is the first field of the "visible" part of this structure
 1426  *		(i.e. as seen by users in the "Space.c" file).  It is the name
 1427  *	 	of the interface.
 1428  *
 1429  *	@name_hlist: 	Device name hash chain, please keep it close to name[]
 1430  *	@ifalias:	SNMP alias
 1431  *	@mem_end:	Shared memory end
 1432  *	@mem_start:	Shared memory start
 1433  *	@base_addr:	Device I/O address
 1434  *	@irq:		Device IRQ number
 1435  *
 1436  *	@carrier_changes:	Stats to monitor carrier on<->off transitions
 1437  *
 1438  *	@state:		Generic network queuing layer state, see netdev_state_t
 1439  *	@dev_list:	The global list of network devices
 1440  *	@napi_list:	List entry used for polling NAPI devices
 1441  *	@unreg_list:	List entry  when we are unregistering the
 1442  *			device; see the function unregister_netdev
 1443  *	@close_list:	List entry used when we are closing the device
 1444  *	@ptype_all:     Device-specific packet handlers for all protocols
 1445  *	@ptype_specific: Device-specific, protocol-specific packet handlers
 1446  *
 1447  *	@adj_list:	Directly linked devices, like slaves for bonding
 1448  *	@features:	Currently active device features
 1449  *	@hw_features:	User-changeable features
 1450  *
 1451  *	@wanted_features:	User-requested features
 1452  *	@vlan_features:		Mask of features inheritable by VLAN devices
 1453  *
 1454  *	@hw_enc_features:	Mask of features inherited by encapsulating devices
 1455  *				This field indicates what encapsulation
 1456  *				offloads the hardware is capable of doing,
 1457  *				and drivers will need to set them appropriately.
 1458  *
 1459  *	@mpls_features:	Mask of features inheritable by MPLS
 1460  *
 1461  *	@ifindex:	interface index
 1462  *	@group:		The group the device belongs to
 1463  *
 1464  *	@stats:		Statistics struct, which was left as a legacy, use
 1465  *			rtnl_link_stats64 instead
 1466  *
 1467  *	@rx_dropped:	Dropped packets by core network,
 1468  *			do not use this in drivers
 1469  *	@tx_dropped:	Dropped packets by core network,
 1470  *			do not use this in drivers
 1471  *	@rx_nohandler:	nohandler dropped packets by core network on
 1472  *			inactive devices, do not use this in drivers
 1473  *
 1474  *	@wireless_handlers:	List of functions to handle Wireless Extensions,
 1475  *				instead of ioctl,
 1476  *				see <net/iw_handler.h> for details.
 1477  *	@wireless_data:	Instance data managed by the core of wireless extensions
 1478  *
 1479  *	@netdev_ops:	Includes several pointers to callbacks,
 1480  *			if one wants to override the ndo_*() functions
 1481  *	@ethtool_ops:	Management operations
 1482  *	@ndisc_ops:	Includes callbacks for different IPv6 neighbour
 1483  *			discovery handling. Necessary for e.g. 6LoWPAN.
 1484  *	@header_ops:	Includes callbacks for creating,parsing,caching,etc
 1485  *			of Layer 2 headers.
 1486  *
 1487  *	@flags:		Interface flags (a la BSD)
 1488  *	@priv_flags:	Like 'flags' but invisible to userspace,
 1489  *			see if.h for the definitions
 1490  *	@gflags:	Global flags ( kept as legacy )
 1491  *	@padded:	How much padding added by alloc_netdev()
 1492  *	@operstate:	RFC2863 operstate
 1493  *	@link_mode:	Mapping policy to operstate
 1494  *	@if_port:	Selectable AUI, TP, ...
 1495  *	@dma:		DMA channel
 1496  *	@mtu:		Interface MTU value
 1497  *	@min_mtu:	Interface Minimum MTU value
 1498  *	@max_mtu:	Interface Maximum MTU value
 1499  *	@type:		Interface hardware type
 1500  *	@hard_header_len: Maximum hardware header length.
 1501  *	@min_header_len:  Minimum hardware header length
 1502  *
 1503  *	@needed_headroom: Extra headroom the hardware may need, but not in all
 1504  *			  cases can this be guaranteed
 1505  *	@needed_tailroom: Extra tailroom the hardware may need, but not in all
 1506  *			  cases can this be guaranteed. Some cases also use
 1507  *			  LL_MAX_HEADER instead to allocate the skb
 1508  *
 1509  *	interface address info:
 1510  *
 1511  * 	@perm_addr:		Permanent hw address
 1512  * 	@addr_assign_type:	Hw address assignment type
 1513  * 	@addr_len:		Hardware address length
 1514  *	@neigh_priv_len:	Used in neigh_alloc()
 1515  * 	@dev_id:		Used to differentiate devices that share
 1516  * 				the same link layer address
 1517  * 	@dev_port:		Used to differentiate devices that share
 1518  * 				the same function
 1519  *	@addr_list_lock:	XXX: need comments on this one
 1520  *	@uc_promisc:		Counter that indicates promiscuous mode
 1521  *				has been enabled due to the need to listen to
 1522  *				additional unicast addresses in a device that
 1523  *				does not implement ndo_set_rx_mode()
 1524  *	@uc:			unicast mac addresses
 1525  *	@mc:			multicast mac addresses
 1526  *	@dev_addrs:		list of device hw addresses
 1527  *	@queues_kset:		Group of all Kobjects in the Tx and RX queues
 1528  *	@promiscuity:		Number of times the NIC is told to work in
 1529  *				promiscuous mode; if it becomes 0 the NIC will
 1530  *				exit promiscuous mode
 1531  *	@allmulti:		Counter, enables or disables allmulticast mode
 1532  *
 1533  *	@vlan_info:	VLAN info
 1534  *	@dsa_ptr:	dsa specific data
 1535  *	@tipc_ptr:	TIPC specific data
 1536  *	@atalk_ptr:	AppleTalk link
 1537  *	@ip_ptr:	IPv4 specific data
 1538  *	@dn_ptr:	DECnet specific data
 1539  *	@ip6_ptr:	IPv6 specific data
 1540  *	@ax25_ptr:	AX.25 specific data
 1541  *	@ieee80211_ptr:	IEEE 802.11 specific data, assign before registering
 1542  *
 1543  *	@dev_addr:	Hw address (before bcast,
 1544  *			because most packets are unicast)
 1545  *
 1546  *	@_rx:			Array of RX queues
 1547  *	@num_rx_queues:		Number of RX queues
 1548  *				allocated at register_netdev() time
 1549  *	@real_num_rx_queues: 	Number of RX queues currently active in device
 1550  *
 1551  *	@rx_handler:		handler for received packets
 1552  *	@rx_handler_data: 	XXX: need comments on this one
 1553  *	@ingress_queue:		XXX: need comments on this one
 1554  *	@broadcast:		hw bcast address
 1555  *
 1556  *	@rx_cpu_rmap:	CPU reverse-mapping for RX completion interrupts,
 1557  *			indexed by RX queue number. Assigned by driver.
 1558  *			This must only be set if the ndo_rx_flow_steer
 1559  *			operation is defined
 1560  *	@index_hlist:		Device index hash chain
 1561  *
 1562  *	@_tx:			Array of TX queues
 1563  *	@num_tx_queues:		Number of TX queues allocated at alloc_netdev_mq() time
 1564  *	@real_num_tx_queues: 	Number of TX queues currently active in device
 1565  *	@qdisc:			Root qdisc from userspace point of view
 1566  *	@tx_queue_len:		Max frames per queue allowed
 1567  *	@tx_global_lock: 	XXX: need comments on this one
 1568  *
 1569  *	@xps_maps:	XXX: need comments on this one
 1570  *
 1571  *	@watchdog_timeo:	Represents the timeout that is used by
 1572  *				the watchdog (see dev_watchdog())
 1573  *	@watchdog_timer:	List of timers
 1574  *
 1575  *	@pcpu_refcnt:		Number of references to this device
 1576  *	@todo_list:		Delayed register/unregister
 1577  *	@link_watch_list:	XXX: need comments on this one
 1578  *
 1579  *	@reg_state:		Register/unregister state machine
 1580  *	@dismantle:		Device is going to be freed
 1581  *	@rtnl_link_state:	This enum represents the phases of creating
 1582  *				a new link
 1583  *
 1584  *	@destructor:		Called from unregister,
 1585  *				can be used to call free_netdev
 1586  *	@npinfo:		XXX: need comments on this one
 1587  * 	@nd_net:		Network namespace this network device is inside
 1588  *
 1589  * 	@ml_priv:	Mid-layer private
 1590  * 	@lstats:	Loopback statistics
 1591  * 	@tstats:	Tunnel statistics
 1592  * 	@dstats:	Dummy statistics
 1593  * 	@vstats:	Virtual ethernet statistics
 1594  *
 1595  *	@garp_port:	GARP
 1596  *	@mrp_port:	MRP
 1597  *
 1598  *	@dev:		Class/net/name entry
 1599  *	@sysfs_groups:	Space for optional device, statistics and wireless
 1600  *			sysfs groups
 1601  *
 1602  *	@sysfs_rx_queue_group:	Space for optional per-rx queue attributes
 1603  *	@rtnl_link_ops:	Rtnl_link_ops
 1604  *
 1605  *	@gso_max_size:	Maximum size of generic segmentation offload
 1606  *	@gso_max_segs:	Maximum number of segments that can be passed to the
 1607  *			NIC for GSO
 1608  *
 1609  *	@dcbnl_ops:	Data Center Bridging netlink ops
 1610  *	@num_tc:	Number of traffic classes in the net device
 1611  *	@tc_to_txq:	XXX: need comments on this one
 1612  *	@prio_tc_map:	XXX: need comments on this one
 1613  *
 1614  *	@fcoe_ddp_xid:	Max exchange id for FCoE LRO by ddp
 1615  *
 1616  *	@priomap:	XXX: need comments on this one
 1617  *	@phydev:	Physical device may attach itself
 1618  *			for hardware timestamping
 1619  *
 1620  *	@qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
 1621  *	@qdisc_running_key: lockdep class annotating Qdisc->running seqcount
 1622  *
 1623  *	@proto_down:	protocol port state information can be sent to the
 1624  *			switch driver and used to set the phys state of the
 1625  *			switch port.
 1626  *
 1627  *	FIXME: cleanup struct net_device such that network protocol info
 1628  *	moves out.
 1629  */
 1630 
 1631 struct net_device {
 1632 	char			name[IFNAMSIZ];
 1633 	struct hlist_node	name_hlist;
 1634 	char 			*ifalias;
 1635 	/*
 1636 	 *	I/O specific fields
 1637 	 *	FIXME: Merge these and struct ifmap into one
 1638 	 */
 1639 	unsigned long		mem_end;
 1640 	unsigned long		mem_start;
 1641 	unsigned long		base_addr;
 1642 	int			irq;
 1643 
 1644 	atomic_t		carrier_changes;
 1645 
 1646 	/*
 1647 	 *	Some hardware also needs these fields (state,dev_list,
 1648 	 *	napi_list,unreg_list,close_list) but they are not
 1649 	 *	part of the usual set specified in Space.c.
 1650 	 */
 1651 
 1652 	unsigned long		state;
 1653 
 1654 	struct list_head	dev_list;
 1655 	struct list_head	napi_list;
 1656 	struct list_head	unreg_list;
 1657 	struct list_head	close_list;
 1658 	struct list_head	ptype_all;
 1659 	struct list_head	ptype_specific;
 1660 
 1661 	struct {
 1662 		struct list_head upper;
 1663 		struct list_head lower;
 1664 	} adj_list;
 1665 
 1666 	netdev_features_t	features;
 1667 	netdev_features_t	hw_features;
 1668 	netdev_features_t	wanted_features;
 1669 	netdev_features_t	vlan_features;
 1670 	netdev_features_t	hw_enc_features;
 1671 	netdev_features_t	mpls_features;
 1672 	netdev_features_t	gso_partial_features;
 1673 
 1674 	int			ifindex;
 1675 	int			group;
 1676 
 1677 	struct net_device_stats	stats;
 1678 
 1679 	atomic_long_t		rx_dropped;
 1680 	atomic_long_t		tx_dropped;
 1681 	atomic_long_t		rx_nohandler;
 1682 
 1683 #ifdef CONFIG_WIRELESS_EXT
 1684 	const struct iw_handler_def *wireless_handlers;
 1685 	struct iw_public_data	*wireless_data;
 1686 #endif
 1687 	const struct net_device_ops *netdev_ops;
 1688 	const struct ethtool_ops *ethtool_ops;
 1689 #ifdef CONFIG_NET_SWITCHDEV
 1690 	const struct switchdev_ops *switchdev_ops;
 1691 #endif
 1692 #ifdef CONFIG_NET_L3_MASTER_DEV
 1693 	const struct l3mdev_ops	*l3mdev_ops;
 1694 #endif
 1695 #if IS_ENABLED(CONFIG_IPV6)
 1696 	const struct ndisc_ops *ndisc_ops;
 1697 #endif
 1698 
 1699 	const struct header_ops *header_ops;
 1700 
 1701 	unsigned int		flags;
 1702 	unsigned int		priv_flags;
 1703 
 1704 	unsigned short		gflags;
 1705 	unsigned short		padded;
 1706 
 1707 	unsigned char		operstate;
 1708 	unsigned char		link_mode;
 1709 
 1710 	unsigned char		if_port;
 1711 	unsigned char		dma;
 1712 
 1713 	unsigned int		mtu;
 1714 	unsigned int		min_mtu;
 1715 	unsigned int		max_mtu;
 1716 	unsigned short		type;
 1717 	unsigned short		hard_header_len;
 1718 	unsigned short		min_header_len;
 1719 
 1720 	unsigned short		needed_headroom;
 1721 	unsigned short		needed_tailroom;
 1722 
 1723 	/* Interface address info. */
 1724 	unsigned char		perm_addr[MAX_ADDR_LEN];
 1725 	unsigned char		addr_assign_type;
 1726 	unsigned char		addr_len;
 1727 	unsigned short		neigh_priv_len;
 1728 	unsigned short          dev_id;
 1729 	unsigned short          dev_port;
 1730 	spinlock_t		addr_list_lock;
 1731 	unsigned char		name_assign_type;
 1732 	bool			uc_promisc;
 1733 	struct netdev_hw_addr_list	uc;
 1734 	struct netdev_hw_addr_list	mc;
 1735 	struct netdev_hw_addr_list	dev_addrs;
 1736 
 1737 #ifdef CONFIG_SYSFS
 1738 	struct kset		*queues_kset;
 1739 #endif
 1740 	unsigned int		promiscuity;
 1741 	unsigned int		allmulti;
 1742 
 1743 
 1744 	/* Protocol-specific pointers */
 1745 
 1746 #if IS_ENABLED(CONFIG_VLAN_8021Q)
 1747 	struct vlan_info __rcu	*vlan_info;
 1748 #endif
 1749 #if IS_ENABLED(CONFIG_NET_DSA)
 1750 	struct dsa_switch_tree	*dsa_ptr;
 1751 #endif
 1752 #if IS_ENABLED(CONFIG_TIPC)
 1753 	struct tipc_bearer __rcu *tipc_ptr;
 1754 #endif
 1755 	void 			*atalk_ptr;
 1756 	struct in_device __rcu	*ip_ptr;
 1757 	struct dn_dev __rcu     *dn_ptr;
 1758 	struct inet6_dev __rcu	*ip6_ptr;
 1759 	void			*ax25_ptr;
 1760 	struct wireless_dev	*ieee80211_ptr;
 1761 	struct wpan_dev		*ieee802154_ptr;
 1762 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
 1763 	struct mpls_dev __rcu	*mpls_ptr;
 1764 #endif
 1765 
 1766 /*
 1767  * Cache lines mostly used on receive path (including eth_type_trans())
 1768  */
 1769 	/* Interface address info used in eth_type_trans() */
 1770 	unsigned char		*dev_addr;
 1771 
 1772 #ifdef CONFIG_SYSFS
 1773 	struct netdev_rx_queue	*_rx;
 1774 
 1775 	unsigned int		num_rx_queues;
 1776 	unsigned int		real_num_rx_queues;
 1777 #endif
 1778 
 1779 	unsigned long		gro_flush_timeout;
 1780 	rx_handler_func_t __rcu	*rx_handler;
 1781 	void __rcu		*rx_handler_data;
 1782 
 1783 #ifdef CONFIG_NET_CLS_ACT
 1784 	struct tcf_proto __rcu  *ingress_cl_list;
 1785 #endif
 1786 	struct netdev_queue __rcu *ingress_queue;
 1787 #ifdef CONFIG_NETFILTER_INGRESS
 1788 	struct nf_hook_entry __rcu *nf_hooks_ingress;
 1789 #endif
 1790 
 1791 	unsigned char		broadcast[MAX_ADDR_LEN];
 1792 #ifdef CONFIG_RFS_ACCEL
 1793 	struct cpu_rmap		*rx_cpu_rmap;
 1794 #endif
 1795 	struct hlist_node	index_hlist;
 1796 
 1797 /*
 1798  * Cache lines mostly used on transmit path
 1799  */
 1800 	struct netdev_queue	*_tx ____cacheline_aligned_in_smp;
 1801 	unsigned int		num_tx_queues;
 1802 	unsigned int		real_num_tx_queues;
 1803 	struct Qdisc		*qdisc;
 1804 #ifdef CONFIG_NET_SCHED
 1805 	DECLARE_HASHTABLE	(qdisc_hash, 4);
 1806 #endif
 1807 	unsigned long		tx_queue_len;
 1808 	spinlock_t		tx_global_lock;
 1809 	int			watchdog_timeo;
 1810 
 1811 #ifdef CONFIG_XPS
 1812 	struct xps_dev_maps __rcu *xps_maps;
 1813 #endif
 1814 #ifdef CONFIG_NET_CLS_ACT
 1815 	struct tcf_proto __rcu  *egress_cl_list;
 1816 #endif
 1817 
 1818 	/* These may be needed for future network-power-down code. */
 1819 	struct timer_list	watchdog_timer;
 1820 
 1821 	int __percpu		*pcpu_refcnt;
 1822 	struct list_head	todo_list;
 1823 
 1824 	struct list_head	link_watch_list;
 1825 
 1826 	enum { NETREG_UNINITIALIZED=0,
 1827 	       NETREG_REGISTERED,	/* completed register_netdevice */
 1828 	       NETREG_UNREGISTERING,	/* called unregister_netdevice */
 1829 	       NETREG_UNREGISTERED,	/* completed unregister todo */
 1830 	       NETREG_RELEASED,		/* called free_netdev */
 1831 	       NETREG_DUMMY,		/* dummy device for NAPI poll */
 1832 	} reg_state:8;
 1833 
 1834 	bool dismantle;
 1835 
 1836 	enum {
 1837 		RTNL_LINK_INITIALIZED,
 1838 		RTNL_LINK_INITIALIZING,
 1839 	} rtnl_link_state:16;
 1840 
 1841 	void (*destructor)(struct net_device *dev);
 1842 
 1843 #ifdef CONFIG_NETPOLL
 1844 	struct netpoll_info __rcu	*npinfo;
 1845 #endif
 1846 
 1847 	possible_net_t			nd_net;
 1848 
 1849 	/* mid-layer private */
 1850 	union {
 1851 		void					*ml_priv;
 1852 		struct pcpu_lstats __percpu		*lstats;
 1853 		struct pcpu_sw_netstats __percpu	*tstats;
 1854 		struct pcpu_dstats __percpu		*dstats;
 1855 		struct pcpu_vstats __percpu		*vstats;
 1856 	};
 1857 
 1858 #if IS_ENABLED(CONFIG_GARP)
 1859 	struct garp_port __rcu	*garp_port;
 1860 #endif
 1861 #if IS_ENABLED(CONFIG_MRP)
 1862 	struct mrp_port __rcu	*mrp_port;
 1863 #endif
 1864 
 1865 	struct device		dev;
 1866 	const struct attribute_group *sysfs_groups[4];
 1867 	const struct attribute_group *sysfs_rx_queue_group;
 1868 
 1869 	const struct rtnl_link_ops *rtnl_link_ops;
 1870 
 1871 	/* for setting kernel sock attribute on TCP connection setup */
 1872 #define GSO_MAX_SIZE		65536
 1873 	unsigned int		gso_max_size;
 1874 #define GSO_MAX_SEGS		65535
 1875 	u16			gso_max_segs;
 1876 
 1877 #ifdef CONFIG_DCB
 1878 	const struct dcbnl_rtnl_ops *dcbnl_ops;
 1879 #endif
 1880 	u8			num_tc;
 1881 	struct netdev_tc_txq	tc_to_txq[TC_MAX_QUEUE];
 1882 	u8			prio_tc_map[TC_BITMASK + 1];
 1883 
 1884 #if IS_ENABLED(CONFIG_FCOE)
 1885 	unsigned int		fcoe_ddp_xid;
 1886 #endif
 1887 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
 1888 	struct netprio_map __rcu *priomap;
 1889 #endif
 1890 	struct phy_device	*phydev;
 1891 	struct lock_class_key	*qdisc_tx_busylock;
 1892 	struct lock_class_key	*qdisc_running_key;
 1893 	bool			proto_down;
 1894 };
 1895 #define to_net_dev(d) container_of(d, struct net_device, dev)
 1896 
 1897 #define	NETDEV_ALIGN		32
 1898 
 1899 static inline
 1900 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
 1901 {
 1902 	return dev->prio_tc_map[prio & TC_BITMASK];
 1903 }
 1904 
 1905 static inline
 1906 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
 1907 {
 1908 	if (tc >= dev->num_tc)
 1909 		return -EINVAL;
 1910 
 1911 	dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
 1912 	return 0;
 1913 }
 1914 
 1915 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
 1916 void netdev_reset_tc(struct net_device *dev);
 1917 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
 1918 int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
 1919 
 1920 static inline
 1921 int netdev_get_num_tc(struct net_device *dev)
 1922 {
 1923 	return dev->num_tc;
 1924 }
 1925 
 1926 static inline
 1927 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
 1928 					 unsigned int index)
 1929 {
 1930 	return &dev->_tx[index];
 1931 }
 1932 
 1933 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
 1934 						    const struct sk_buff *skb)
 1935 {
 1936 	return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
 1937 }
 1938 
 1939 static inline void netdev_for_each_tx_queue(struct net_device *dev,
 1940 					    void (*f)(struct net_device *,
 1941 						      struct netdev_queue *,
 1942 						      void *),
 1943 					    void *arg)
 1944 {
 1945 	unsigned int i;
 1946 
 1947 	for (i = 0; i < dev->num_tx_queues; i++)
 1948 		f(dev, &dev->_tx[i], arg);
 1949 }
 1950 
 1951 #define netdev_lockdep_set_classes(dev)				\
 1952 {								\
 1953 	static struct lock_class_key qdisc_tx_busylock_key;	\
 1954 	static struct lock_class_key qdisc_running_key;		\
 1955 	static struct lock_class_key qdisc_xmit_lock_key;	\
 1956 	static struct lock_class_key dev_addr_list_lock_key;	\
 1957 	unsigned int i;						\
 1958 								\
 1959 	(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key;	\
 1960 	(dev)->qdisc_running_key = &qdisc_running_key;		\
 1961 	lockdep_set_class(&(dev)->addr_list_lock,		\
 1962 			  &dev_addr_list_lock_key); 		\
 1963 	for (i = 0; i < (dev)->num_tx_queues; i++)		\
 1964 		lockdep_set_class(&(dev)->_tx[i]._xmit_lock,	\
 1965 				  &qdisc_xmit_lock_key);	\
 1966 }
 1967 
 1968 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
 1969 				    struct sk_buff *skb,
 1970 				    void *accel_priv);
 1971 
 1972 /* returns the headroom that the master device needs to take in account
 1973  * when forwarding to this dev
 1974  */
 1975 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
 1976 {
 1977 	return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
 1978 }
 1979 
 1980 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
 1981 {
 1982 	if (dev->netdev_ops->ndo_set_rx_headroom)
 1983 		dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
 1984 }
 1985 
 1986 /* set the device rx headroom to the dev's default */
 1987 static inline void netdev_reset_rx_headroom(struct net_device *dev)
 1988 {
 1989 	netdev_set_rx_headroom(dev, -1);
 1990 }
 1991 
 1992 /*
 1993  * Net namespace inlines
 1994  */
 1995 static inline
 1996 struct net *dev_net(const struct net_device *dev)
 1997 {
 1998 	return read_pnet(&dev->nd_net);
 1999 }
 2000 
 2001 static inline
 2002 void dev_net_set(struct net_device *dev, struct net *net)
 2003 {
 2004 	write_pnet(&dev->nd_net, net);
 2005 }
 2006 
 2007 static inline bool netdev_uses_dsa(struct net_device *dev)
 2008 {
 2009 #if IS_ENABLED(CONFIG_NET_DSA)
 2010 	if (dev->dsa_ptr != NULL)
 2011 		return dsa_uses_tagged_protocol(dev->dsa_ptr);
 2012 #endif
 2013 	return false;
 2014 }
 2015 
 2016 /**
 2017  *	netdev_priv - access network device private data
 2018  *	@dev: network device
 2019  *
 2020  * Get network device private data
 2021  */
 2022 static inline void *netdev_priv(const struct net_device *dev)
 2023 {
 2024 	return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
 2025 }
 2026 
 2027 /* Set the sysfs physical device reference for the network logical device
 2028  * if set prior to registration will cause a symlink during initialization.
 2029  */
 2030 #define SET_NETDEV_DEV(net, pdev)	((net)->dev.parent = (pdev))
 2031 
 2032 /* Set the sysfs device type for the network logical device to allow
 2033  * fine-grained identification of different network device types. For
 2034  * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
 2035  */
 2036 #define SET_NETDEV_DEVTYPE(net, devtype)	((net)->dev.type = (devtype))
 2037 
 2038 /* Default NAPI poll() weight
 2039  * Device drivers are strongly advised to not use bigger value
 2040  */
 2041 #define NAPI_POLL_WEIGHT 64
 2042 
 2043 /**
 2044  *	netif_napi_add - initialize a NAPI context
 2045  *	@dev:  network device
 2046  *	@napi: NAPI context
 2047  *	@poll: polling function
 2048  *	@weight: default weight
 2049  *
 2050  * netif_napi_add() must be used to initialize a NAPI context prior to calling
 2051  * *any* of the other NAPI-related functions.
 2052  */
 2053 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
 2054 		    int (*poll)(struct napi_struct *, int), int weight);
 2055 
 2056 /**
 2057  *	netif_tx_napi_add - initialize a NAPI context
 2058  *	@dev:  network device
 2059  *	@napi: NAPI context
 2060  *	@poll: polling function
 2061  *	@weight: default weight
 2062  *
 2063  * This variant of netif_napi_add() should be used from drivers using NAPI
 2064  * to exclusively poll a TX queue.
 2065  * This will avoid we add it into napi_hash[], thus polluting this hash table.
 2066  */
 2067 static inline void netif_tx_napi_add(struct net_device *dev,
 2068 				     struct napi_struct *napi,
 2069 				     int (*poll)(struct napi_struct *, int),
 2070 				     int weight)
 2071 {
 2072 	set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
 2073 	netif_napi_add(dev, napi, poll, weight);
 2074 }
 2075 
 2076 /**
 2077  *  netif_napi_del - remove a NAPI context
 2078  *  @napi: NAPI context
 2079  *
 2080  *  netif_napi_del() removes a NAPI context from the network device NAPI list
 2081  */
 2082 void netif_napi_del(struct napi_struct *napi);
 2083 
 2084 struct napi_gro_cb {
 2085 	/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
 2086 	void	*frag0;
 2087 
 2088 	/* Length of frag0. */
 2089 	unsigned int frag0_len;
 2090 
 2091 	/* This indicates where we are processing relative to skb->data. */
 2092 	int	data_offset;
 2093 
 2094 	/* This is non-zero if the packet cannot be merged with the new skb. */
 2095 	u16	flush;
 2096 
 2097 	/* Save the IP ID here and check when we get to the transport layer */
 2098 	u16	flush_id;
 2099 
 2100 	/* Number of segments aggregated. */
 2101 	u16	count;
 2102 
 2103 	/* Start offset for remote checksum offload */
 2104 	u16	gro_remcsum_start;
 2105 
 2106 	/* jiffies when first packet was created/queued */
 2107 	unsigned long age;
 2108 
 2109 	/* Used in ipv6_gro_receive() and foo-over-udp */
 2110 	u16	proto;
 2111 
 2112 	/* This is non-zero if the packet may be of the same flow. */
 2113 	u8	same_flow:1;
 2114 
 2115 	/* Used in tunnel GRO receive */
 2116 	u8	encap_mark:1;
 2117 
 2118 	/* GRO checksum is valid */
 2119 	u8	csum_valid:1;
 2120 
 2121 	/* Number of checksums via CHECKSUM_UNNECESSARY */
 2122 	u8	csum_cnt:3;
 2123 
 2124 	/* Free the skb? */
 2125 	u8	free:2;
 2126 #define NAPI_GRO_FREE		  1
 2127 #define NAPI_GRO_FREE_STOLEN_HEAD 2
 2128 
 2129 	/* Used in foo-over-udp, set in udp[46]_gro_receive */
 2130 	u8	is_ipv6:1;
 2131 
 2132 	/* Used in GRE, set in fou/gue_gro_receive */
 2133 	u8	is_fou:1;
 2134 
 2135 	/* Used to determine if flush_id can be ignored */
 2136 	u8	is_atomic:1;
 2137 
 2138 	/* Number of gro_receive callbacks this packet already went through */
 2139 	u8 recursion_counter:4;
 2140 
 2141 	/* 1 bit hole */
 2142 
 2143 	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
 2144 	__wsum	csum;
 2145 
 2146 	/* used in skb_gro_receive() slow path */
 2147 	struct sk_buff *last;
 2148 };
 2149 
 2150 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
 2151 
 2152 #define GRO_RECURSION_LIMIT 15
 2153 static inline int gro_recursion_inc_test(struct sk_buff *skb)
 2154 {
 2155 	return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
 2156 }
 2157 
 2158 typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
 2159 static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
 2160 						struct sk_buff **head,
 2161 						struct sk_buff *skb)
 2162 {
 2163 	if (unlikely(gro_recursion_inc_test(skb))) {
 2164 		NAPI_GRO_CB(skb)->flush |= 1;
 2165 		return NULL;
 2166 	}
 2167 
 2168 	return cb(head, skb);
 2169 }
 2170 
 2171 typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
 2172 					     struct sk_buff *);
 2173 static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
 2174 						   struct sock *sk,
 2175 						   struct sk_buff **head,
 2176 						   struct sk_buff *skb)
 2177 {
 2178 	if (unlikely(gro_recursion_inc_test(skb))) {
 2179 		NAPI_GRO_CB(skb)->flush |= 1;
 2180 		return NULL;
 2181 	}
 2182 
 2183 	return cb(sk, head, skb);
 2184 }
 2185 
 2186 struct packet_type {
 2187 	__be16			type;	/* This is really htons(ether_type). */
 2188 	struct net_device	*dev;	/* NULL is wildcarded here	     */
 2189 	int			(*func) (struct sk_buff *,
 2190 					 struct net_device *,
 2191 					 struct packet_type *,
 2192 					 struct net_device *);
 2193 	bool			(*id_match)(struct packet_type *ptype,
 2194 					    struct sock *sk);
 2195 	void			*af_packet_priv;
 2196 	struct list_head	list;
 2197 };
 2198 
 2199 struct offload_callbacks {
 2200 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
 2201 						netdev_features_t features);
 2202 	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
 2203 						 struct sk_buff *skb);
 2204 	int			(*gro_complete)(struct sk_buff *skb, int nhoff);
 2205 };
 2206 
 2207 struct packet_offload {
 2208 	__be16			 type;	/* This is really htons(ether_type). */
 2209 	u16			 priority;
 2210 	struct offload_callbacks callbacks;
 2211 	struct list_head	 list;
 2212 };
 2213 
 2214 /* often modified stats are per-CPU, other are shared (netdev->stats) */
 2215 struct pcpu_sw_netstats {
 2216 	u64     rx_packets;
 2217 	u64     rx_bytes;
 2218 	u64     tx_packets;
 2219 	u64     tx_bytes;
 2220 	struct u64_stats_sync   syncp;
 2221 };
 2222 
 2223 #define __netdev_alloc_pcpu_stats(type, gfp)				\
 2224 ({									\
 2225 	typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
 2226 	if (pcpu_stats)	{						\
 2227 		int __cpu;						\
 2228 		for_each_possible_cpu(__cpu) {				\
 2229 			typeof(type) *stat;				\
 2230 			stat = per_cpu_ptr(pcpu_stats, __cpu);		\
 2231 			u64_stats_init(&stat->syncp);			\
 2232 		}							\
 2233 	}								\
 2234 	pcpu_stats;							\
 2235 })
 2236 
 2237 #define netdev_alloc_pcpu_stats(type)					\
 2238 	__netdev_alloc_pcpu_stats(type, GFP_KERNEL)
 2239 
 2240 enum netdev_lag_tx_type {
 2241 	NETDEV_LAG_TX_TYPE_UNKNOWN,
 2242 	NETDEV_LAG_TX_TYPE_RANDOM,
 2243 	NETDEV_LAG_TX_TYPE_BROADCAST,
 2244 	NETDEV_LAG_TX_TYPE_ROUNDROBIN,
 2245 	NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
 2246 	NETDEV_LAG_TX_TYPE_HASH,
 2247 };
 2248 
 2249 struct netdev_lag_upper_info {
 2250 	enum netdev_lag_tx_type tx_type;
 2251 };
 2252 
 2253 struct netdev_lag_lower_state_info {
 2254 	u8 link_up : 1,
 2255 	   tx_enabled : 1;
 2256 };
 2257 
 2258 #include <linux/notifier.h>
 2259 
 2260 /* netdevice notifier chain. Please remember to update the rtnetlink
 2261  * notification exclusion list in rtnetlink_event() when adding new
 2262  * types.
 2263  */
 2264 #define NETDEV_UP	0x0001	/* For now you can't veto a device up/down */
 2265 #define NETDEV_DOWN	0x0002
 2266 #define NETDEV_REBOOT	0x0003	/* Tell a protocol stack a network interface
 2267 				   detected a hardware crash and restarted
 2268 				   - we can use this eg to kick tcp sessions
 2269 				   once done */
 2270 #define NETDEV_CHANGE	0x0004	/* Notify device state change */
 2271 #define NETDEV_REGISTER 0x0005
 2272 #define NETDEV_UNREGISTER	0x0006
 2273 #define NETDEV_CHANGEMTU	0x0007 /* notify after mtu change happened */
 2274 #define NETDEV_CHANGEADDR	0x0008
 2275 #define NETDEV_GOING_DOWN	0x0009
 2276 #define NETDEV_CHANGENAME	0x000A
 2277 #define NETDEV_FEAT_CHANGE	0x000B
 2278 #define NETDEV_BONDING_FAILOVER 0x000C
 2279 #define NETDEV_PRE_UP		0x000D
 2280 #define NETDEV_PRE_TYPE_CHANGE	0x000E
 2281 #define NETDEV_POST_TYPE_CHANGE	0x000F
 2282 #define NETDEV_POST_INIT	0x0010
 2283 #define NETDEV_UNREGISTER_FINAL 0x0011
 2284 #define NETDEV_RELEASE		0x0012
 2285 #define NETDEV_NOTIFY_PEERS	0x0013
 2286 #define NETDEV_JOIN		0x0014
 2287 #define NETDEV_CHANGEUPPER	0x0015
 2288 #define NETDEV_RESEND_IGMP	0x0016
 2289 #define NETDEV_PRECHANGEMTU	0x0017 /* notify before mtu change happened */
 2290 #define NETDEV_CHANGEINFODATA	0x0018
 2291 #define NETDEV_BONDING_INFO	0x0019
 2292 #define NETDEV_PRECHANGEUPPER	0x001A
 2293 #define NETDEV_CHANGELOWERSTATE	0x001B
 2294 #define NETDEV_UDP_TUNNEL_PUSH_INFO	0x001C
 2295 #define NETDEV_CHANGE_TX_QUEUE_LEN	0x001E
 2296 
 2297 int register_netdevice_notifier(struct notifier_block *nb);
 2298 int unregister_netdevice_notifier(struct notifier_block *nb);
 2299 
 2300 struct netdev_notifier_info {
 2301 	struct net_device *dev;
 2302 };
 2303 
 2304 struct netdev_notifier_change_info {
 2305 	struct netdev_notifier_info info; /* must be first */
 2306 	unsigned int flags_changed;
 2307 };
 2308 
 2309 struct netdev_notifier_changeupper_info {
 2310 	struct netdev_notifier_info info; /* must be first */
 2311 	struct net_device *upper_dev; /* new upper dev */
 2312 	bool master; /* is upper dev master */
 2313 	bool linking; /* is the notification for link or unlink */
 2314 	void *upper_info; /* upper dev info */
 2315 };
 2316 
 2317 struct netdev_notifier_changelowerstate_info {
 2318 	struct netdev_notifier_info info; /* must be first */
 2319 	void *lower_state_info; /* is lower dev state */
 2320 };
 2321 
 2322 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
 2323 					     struct net_device *dev)
 2324 {
 2325 	info->dev = dev;
 2326 }
 2327 
 2328 static inline struct net_device *
 2329 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
 2330 {
 2331 	return info->dev;
 2332 }
 2333 
 2334 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
 2335 
 2336 
 2337 extern rwlock_t				dev_base_lock;		/* Device list lock */
 2338 
 2339 #define for_each_netdev(net, d)		\
 2340 		list_for_each_entry(d, &(net)->dev_base_head, dev_list)
 2341 #define for_each_netdev_reverse(net, d)	\
 2342 		list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
 2343 #define for_each_netdev_rcu(net, d)		\
 2344 		list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
 2345 #define for_each_netdev_safe(net, d, n)	\
 2346 		list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
 2347 #define for_each_netdev_continue(net, d)		\
 2348 		list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
 2349 #define for_each_netdev_continue_rcu(net, d)		\
 2350 	list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
 2351 #define for_each_netdev_in_bond_rcu(bond, slave)	\
 2352 		for_each_netdev_rcu(&init_net, slave)	\
 2353 			if (netdev_master_upper_dev_get_rcu(slave) == (bond))
 2354 #define net_device_entry(lh)	list_entry(lh, struct net_device, dev_list)
 2355 
 2356 static inline struct net_device *next_net_device(struct net_device *dev)
 2357 {
 2358 	struct list_head *lh;
 2359 	struct net *net;
 2360 
 2361 	net = dev_net(dev);
 2362 	lh = dev->dev_list.next;
 2363 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 2364 }
 2365 
 2366 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
 2367 {
 2368 	struct list_head *lh;
 2369 	struct net *net;
 2370 
 2371 	net = dev_net(dev);
 2372 	lh = rcu_dereference(list_next_rcu(&dev->dev_list));
 2373 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 2374 }
 2375 
 2376 static inline struct net_device *first_net_device(struct net *net)
 2377 {
 2378 	return list_empty(&net->dev_base_head) ? NULL :
 2379 		net_device_entry(net->dev_base_head.next);
 2380 }
 2381 
 2382 static inline struct net_device *first_net_device_rcu(struct net *net)
 2383 {
 2384 	struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
 2385 
 2386 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 2387 }
 2388 
 2389 int netdev_boot_setup_check(struct net_device *dev);
 2390 unsigned long netdev_boot_base(const char *prefix, int unit);
 2391 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
 2392 				       const char *hwaddr);
 2393 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
 2394 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
 2395 void dev_add_pack(struct packet_type *pt);
 2396 void dev_remove_pack(struct packet_type *pt);
 2397 void __dev_remove_pack(struct packet_type *pt);
 2398 void dev_add_offload(struct packet_offload *po);
 2399 void dev_remove_offload(struct packet_offload *po);
 2400 
 2401 int dev_get_iflink(const struct net_device *dev);
 2402 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
 2403 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
 2404 				      unsigned short mask);
 2405 struct net_device *dev_get_by_name(struct net *net, const char *name);
 2406 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
 2407 struct net_device *__dev_get_by_name(struct net *net, const char *name);
 2408 int dev_alloc_name(struct net_device *dev, const char *name);
 2409 int dev_open(struct net_device *dev);
 2410 int dev_close(struct net_device *dev);
 2411 int dev_close_many(struct list_head *head, bool unlink);
 2412 void dev_disable_lro(struct net_device *dev);
 2413 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
 2414 int dev_queue_xmit(struct sk_buff *skb);
 2415 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
 2416 int register_netdevice(struct net_device *dev);
 2417 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
 2418 void unregister_netdevice_many(struct list_head *head);
 2419 static inline void unregister_netdevice(struct net_device *dev)
 2420 {
 2421 	unregister_netdevice_queue(dev, NULL);
 2422 }
 2423 
 2424 int netdev_refcnt_read(const struct net_device *dev);
 2425 void free_netdev(struct net_device *dev);
 2426 void netdev_freemem(struct net_device *dev);
 2427 void synchronize_net(void);
 2428 int init_dummy_netdev(struct net_device *dev);
 2429 
 2430 DECLARE_PER_CPU(int, xmit_recursion);
 2431 #define XMIT_RECURSION_LIMIT	10
 2432 
 2433 static inline int dev_recursion_level(void)
 2434 {
 2435 	return this_cpu_read(xmit_recursion);
 2436 }
 2437 
 2438 struct net_device *dev_get_by_index(struct net *net, int ifindex);
 2439 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
 2440 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
 2441 int netdev_get_name(struct net *net, char *name, int ifindex);
 2442 int dev_restart(struct net_device *dev);
 2443 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
 2444 
 2445 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
 2446 {
 2447 	return NAPI_GRO_CB(skb)->data_offset;
 2448 }
 2449 
 2450 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
 2451 {
 2452 	return skb->len - NAPI_GRO_CB(skb)->data_offset;
 2453 }
 2454 
 2455 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
 2456 {
 2457 	NAPI_GRO_CB(skb)->data_offset += len;
 2458 }
 2459 
 2460 static inline void *skb_gro_header_fast(struct sk_buff *skb,
 2461 					unsigned int offset)
 2462 {
 2463 	return NAPI_GRO_CB(skb)->frag0 + offset;
 2464 }
 2465 
 2466 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
 2467 {
 2468 	return NAPI_GRO_CB(skb)->frag0_len < hlen;
 2469 }
 2470 
 2471 static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
 2472 {
 2473 	NAPI_GRO_CB(skb)->frag0 = NULL;
 2474 	NAPI_GRO_CB(skb)->frag0_len = 0;
 2475 }
 2476 
 2477 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
 2478 					unsigned int offset)
 2479 {
 2480 	if (!pskb_may_pull(skb, hlen))
 2481 		return NULL;
 2482 
 2483 	skb_gro_frag0_invalidate(skb);
 2484 	return skb->data + offset;
 2485 }
 2486 
 2487 static inline void *skb_gro_network_header(struct sk_buff *skb)
 2488 {
 2489 	return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
 2490 	       skb_network_offset(skb);
 2491 }
 2492 
 2493 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
 2494 					const void *start, unsigned int len)
 2495 {
 2496 	if (NAPI_GRO_CB(skb)->csum_valid)
 2497 		NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
 2498 						  csum_partial(start, len, 0));
 2499 }
 2500 
 2501 /* GRO checksum functions. These are logical equivalents of the normal
 2502  * checksum functions (in skbuff.h) except that they operate on the GRO
 2503  * offsets and fields in sk_buff.
 2504  */
 2505 
 2506 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
 2507 
 2508 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
 2509 {
 2510 	return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
 2511 }
 2512 
 2513 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
 2514 						      bool zero_okay,
 2515 						      __sum16 check)
 2516 {
 2517 	return ((skb->ip_summed != CHECKSUM_PARTIAL ||
 2518 		skb_checksum_start_offset(skb) <
 2519 		 skb_gro_offset(skb)) &&
 2520 		!skb_at_gro_remcsum_start(skb) &&
 2521 		NAPI_GRO_CB(skb)->csum_cnt == 0 &&
 2522 		(!zero_okay || check));
 2523 }
 2524 
 2525 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
 2526 							   __wsum psum)
 2527 {
 2528 	if (NAPI_GRO_CB(skb)->csum_valid &&
 2529 	    !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
 2530 		return 0;
 2531 
 2532 	NAPI_GRO_CB(skb)->csum = psum;
 2533 
 2534 	return __skb_gro_checksum_complete(skb);
 2535 }
 2536 
 2537 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
 2538 {
 2539 	if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
 2540 		/* Consume a checksum from CHECKSUM_UNNECESSARY */
 2541 		NAPI_GRO_CB(skb)->csum_cnt--;
 2542 	} else {
 2543 		/* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
 2544 		 * verified a new top level checksum or an encapsulated one
 2545 		 * during GRO. This saves work if we fallback to normal path.
 2546 		 */
 2547 		__skb_incr_checksum_unnecessary(skb);
 2548 	}
 2549 }
 2550 
 2551 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check,	\
 2552 				    compute_pseudo)			\
 2553 ({									\
 2554 	__sum16 __ret = 0;						\
 2555 	if (__skb_gro_checksum_validate_needed(skb, zero_okay, check))	\
 2556 		__ret = __skb_gro_checksum_validate_complete(skb,	\
 2557 				compute_pseudo(skb, proto));		\
 2558 	if (__ret)							\
 2559 		__skb_mark_checksum_bad(skb);				\
 2560 	else								\
 2561 		skb_gro_incr_csum_unnecessary(skb);			\
 2562 	__ret;								\
 2563 })
 2564 
 2565 #define skb_gro_checksum_validate(skb, proto, compute_pseudo)		\
 2566 	__skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
 2567 
 2568 #define skb_gro_checksum_validate_zero_check(skb, proto, check,		\
 2569 					     compute_pseudo)		\
 2570 	__skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
 2571 
 2572 #define skb_gro_checksum_simple_validate(skb)				\
 2573 	__skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
 2574 
 2575 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
 2576 {
 2577 	return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
 2578 		!NAPI_GRO_CB(skb)->csum_valid);
 2579 }
 2580 
 2581 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
 2582 					      __sum16 check, __wsum pseudo)
 2583 {
 2584 	NAPI_GRO_CB(skb)->csum = ~pseudo;
 2585 	NAPI_GRO_CB(skb)->csum_valid = 1;
 2586 }
 2587 
 2588 #define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo)	\
 2589 do {									\
 2590 	if (__skb_gro_checksum_convert_check(skb))			\
 2591 		__skb_gro_checksum_convert(skb, check,			\
 2592 					   compute_pseudo(skb, proto));	\
 2593 } while (0)
 2594 
 2595 struct gro_remcsum {
 2596 	int offset;
 2597 	__wsum delta;
 2598 };
 2599 
 2600 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
 2601 {
 2602 	grc->offset = 0;
 2603 	grc->delta = 0;
 2604 }
 2605 
 2606 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
 2607 					    unsigned int off, size_t hdrlen,
 2608 					    int start, int offset,
 2609 					    struct gro_remcsum *grc,
 2610 					    bool nopartial)
 2611 {
 2612 	__wsum delta;
 2613 	size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
 2614 
 2615 	BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
 2616 
 2617 	if (!nopartial) {
 2618 		NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
 2619 		return ptr;
 2620 	}
 2621 
 2622 	ptr = skb_gro_header_fast(skb, off);
 2623 	if (skb_gro_header_hard(skb, off + plen)) {
 2624 		ptr = skb_gro_header_slow(skb, off + plen, off);
 2625 		if (!ptr)
 2626 			return NULL;
 2627 	}
 2628 
 2629 	delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
 2630 			       start, offset);
 2631 
 2632 	/* Adjust skb->csum since we changed the packet */
 2633 	NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
 2634 
 2635 	grc->offset = off + hdrlen + offset;
 2636 	grc->delta = delta;
 2637 
 2638 	return ptr;
 2639 }
 2640 
 2641 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
 2642 					   struct gro_remcsum *grc)
 2643 {
 2644 	void *ptr;
 2645 	size_t plen = grc->offset + sizeof(u16);
 2646 
 2647 	if (!grc->delta)
 2648 		return;
 2649 
 2650 	ptr = skb_gro_header_fast(skb, grc->offset);
 2651 	if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
 2652 		ptr = skb_gro_header_slow(skb, plen, grc->offset);
 2653 		if (!ptr)
 2654 			return;
 2655 	}
 2656 
 2657 	remcsum_unadjust((__sum16 *)ptr, grc->delta);
 2658 }
 2659 
 2660 #ifdef CONFIG_XFRM_OFFLOAD
 2661 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
 2662 {
 2663 	if (PTR_ERR(pp) != -EINPROGRESS)
 2664 		NAPI_GRO_CB(skb)->flush |= flush;
 2665 }
 2666 #else
 2667 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
 2668 {
 2669 	NAPI_GRO_CB(skb)->flush |= flush;
 2670 }
 2671 #endif
 2672 
 2673 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
 2674 				  unsigned short type,
 2675 				  const void *daddr, const void *saddr,
 2676 				  unsigned int len)
 2677 {
 2678 	if (!dev->header_ops || !dev->header_ops->create)
 2679 		return 0;
 2680 
 2681 	return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
 2682 }
 2683 
 2684 static inline int dev_parse_header(const struct sk_buff *skb,
 2685 				   unsigned char *haddr)
 2686 {
 2687 	const struct net_device *dev = skb->dev;
 2688 
 2689 	if (!dev->header_ops || !dev->header_ops->parse)
 2690 		return 0;
 2691 	return dev->header_ops->parse(skb, haddr);
 2692 }
 2693 
 2694 /* ll_header must have at least hard_header_len allocated */
 2695 static inline bool dev_validate_header(const struct net_device *dev,
 2696 				       char *ll_header, int len)
 2697 {
 2698 	if (likely(len >= dev->hard_header_len))
 2699 		return true;
 2700 	if (len < dev->min_header_len)
 2701 		return false;
 2702 
 2703 	if (capable(CAP_SYS_RAWIO)) {
 2704 		memset(ll_header + len, 0, dev->hard_header_len - len);
 2705 		return true;
 2706 	}
 2707 
 2708 	if (dev->header_ops && dev->header_ops->validate)
 2709 		return dev->header_ops->validate(ll_header, len);
 2710 
 2711 	return false;
 2712 }
 2713 
 2714 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
 2715 int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
 2716 static inline int unregister_gifconf(unsigned int family)
 2717 {
 2718 	return register_gifconf(family, NULL);
 2719 }
 2720 
 2721 #ifdef CONFIG_NET_FLOW_LIMIT
 2722 #define FLOW_LIMIT_HISTORY	(1 << 7)  /* must be ^2 and !overflow buckets */
 2723 struct sd_flow_limit {
 2724 	u64			count;
 2725 	unsigned int		num_buckets;
 2726 	unsigned int		history_head;
 2727 	u16			history[FLOW_LIMIT_HISTORY];
 2728 	u8			buckets[];
 2729 };
 2730 
 2731 extern int netdev_flow_limit_table_len;
 2732 #endif /* CONFIG_NET_FLOW_LIMIT */
 2733 
 2734 /*
 2735  * Incoming packets are placed on per-CPU queues
 2736  */
 2737 struct softnet_data {
 2738 	struct list_head	poll_list;
 2739 	struct sk_buff_head	process_queue;
 2740 
 2741 	/* stats */
 2742 	unsigned int		processed;
 2743 	unsigned int		time_squeeze;
 2744 	unsigned int		received_rps;
 2745 #ifdef CONFIG_RPS
 2746 	struct softnet_data	*rps_ipi_list;
 2747 #endif
 2748 #ifdef CONFIG_NET_FLOW_LIMIT
 2749 	struct sd_flow_limit __rcu *flow_limit;
 2750 #endif
 2751 	struct Qdisc		*output_queue;
 2752 	struct Qdisc		**output_queue_tailp;
 2753 	struct sk_buff		*completion_queue;
 2754 
 2755 #ifdef CONFIG_RPS
 2756 	/* input_queue_head should be written by cpu owning this struct,
 2757 	 * and only read by other cpus. Worth using a cache line.
 2758 	 */
 2759 	unsigned int		input_queue_head ____cacheline_aligned_in_smp;
 2760 
 2761 	/* Elements below can be accessed between CPUs for RPS/RFS */
 2762 	struct call_single_data	csd ____cacheline_aligned_in_smp;
 2763 	struct softnet_data	*rps_ipi_next;
 2764 	unsigned int		cpu;
 2765 	unsigned int		input_queue_tail;
 2766 #endif
 2767 	unsigned int		dropped;
 2768 	struct sk_buff_head	input_pkt_queue;
 2769 	struct napi_struct	backlog;
 2770 
 2771 };
 2772 
 2773 static inline void input_queue_head_incr(struct softnet_data *sd)
 2774 {
 2775 #ifdef CONFIG_RPS
 2776 	sd->input_queue_head++;
 2777 #endif
 2778 }
 2779 
 2780 static inline void input_queue_tail_incr_save(struct softnet_data *sd,
 2781 					      unsigned int *qtail)
 2782 {
 2783 #ifdef CONFIG_RPS
 2784 	*qtail = ++sd->input_queue_tail;
 2785 #endif
 2786 }
 2787 
 2788 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 2789 
 2790 void __netif_schedule(struct Qdisc *q);
 2791 void netif_schedule_queue(struct netdev_queue *txq);
 2792 
 2793 static inline void netif_tx_schedule_all(struct net_device *dev)
 2794 {
 2795 	unsigned int i;
 2796 
 2797 	for (i = 0; i < dev->num_tx_queues; i++)
 2798 		netif_schedule_queue(netdev_get_tx_queue(dev, i));
 2799 }
 2800 
 2801 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
 2802 {
 2803 	clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 2804 }
 2805 
 2806 /**
 2807  *	netif_start_queue - allow transmit
 2808  *	@dev: network device
 2809  *
 2810  *	Allow upper layers to call the device hard_start_xmit routine.
 2811  */
 2812 static inline void netif_start_queue(struct net_device *dev)
 2813 {
 2814 	netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
 2815 }
 2816 
 2817 static inline void netif_tx_start_all_queues(struct net_device *dev)
 2818 {
 2819 	unsigned int i;
 2820 
 2821 	for (i = 0; i < dev->num_tx_queues; i++) {
 2822 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 2823 		netif_tx_start_queue(txq);
 2824 	}
 2825 }
 2826 
 2827 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
 2828 
 2829 /**
 2830  *	netif_wake_queue - restart transmit
 2831  *	@dev: network device
 2832  *
 2833  *	Allow upper layers to call the device hard_start_xmit routine.
 2834  *	Used for flow control when transmit resources are available.
 2835  */
 2836 static inline void netif_wake_queue(struct net_device *dev)
 2837 {
 2838 	netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
 2839 }
 2840 
 2841 static inline void netif_tx_wake_all_queues(struct net_device *dev)
 2842 {
 2843 	unsigned int i;
 2844 
 2845 	for (i = 0; i < dev->num_tx_queues; i++) {
 2846 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 2847 		netif_tx_wake_queue(txq);
 2848 	}
 2849 }
 2850 
 2851 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
 2852 {
 2853 	set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 2854 }
 2855 
 2856 /**
 2857  *	netif_stop_queue - stop transmitted packets
 2858  *	@dev: network device
 2859  *
 2860  *	Stop upper layers calling the device hard_start_xmit routine.
 2861  *	Used for flow control when transmit resources are unavailable.
 2862  */
 2863 static inline void netif_stop_queue(struct net_device *dev)
 2864 {
 2865 	netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
 2866 }
 2867 
 2868 void netif_tx_stop_all_queues(struct net_device *dev);
 2869 
 2870 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
 2871 {
 2872 	return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 2873 }
 2874 
 2875 /**
 2876  *	netif_queue_stopped - test if transmit queue is flowblocked
 2877  *	@dev: network device
 2878  *
 2879  *	Test if transmit queue on device is currently unable to send.
 2880  */
 2881 static inline bool netif_queue_stopped(const struct net_device *dev)
 2882 {
 2883 	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
 2884 }
 2885 
 2886 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
 2887 {
 2888 	return dev_queue->state & QUEUE_STATE_ANY_XOFF;
 2889 }
 2890 
 2891 static inline bool
 2892 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
 2893 {
 2894 	return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
 2895 }
 2896 
 2897 static inline bool
 2898 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
 2899 {
 2900 	return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
 2901 }
 2902 
 2903 /**
 2904  *	netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
 2905  *	@dev_queue: pointer to transmit queue
 2906  *
 2907  * BQL enabled drivers might use this helper in their ndo_start_xmit(),
 2908  * to give appropriate hint to the CPU.
 2909  */
 2910 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
 2911 {
 2912 #ifdef CONFIG_BQL
 2913 	prefetchw(&dev_queue->dql.num_queued);
 2914 #endif
 2915 }
 2916 
 2917 /**
 2918  *	netdev_txq_bql_complete_prefetchw - prefetch bql data for write
 2919  *	@dev_queue: pointer to transmit queue
 2920  *
 2921  * BQL enabled drivers might use this helper in their TX completion path,
 2922  * to give appropriate hint to the CPU.
 2923  */
 2924 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
 2925 {
 2926 #ifdef CONFIG_BQL
 2927 	prefetchw(&dev_queue->dql.limit);
 2928 #endif
 2929 }
 2930 
 2931 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
 2932 					unsigned int bytes)
 2933 {
 2934 #ifdef CONFIG_BQL
 2935 	dql_queued(&dev_queue->dql, bytes);
 2936 
 2937 	if (likely(dql_avail(&dev_queue->dql) >= 0))
 2938 		return;
 2939 
 2940 	set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
 2941 
 2942 	/*
 2943 	 * The XOFF flag must be set before checking the dql_avail below,
 2944 	 * because in netdev_tx_completed_queue we update the dql_completed
 2945 	 * before checking the XOFF flag.
 2946 	 */
 2947 	smp_mb();
 2948 
 2949 	/* check again in case another CPU has just made room avail */
 2950 	if (unlikely(dql_avail(&dev_queue->dql) >= 0))
 2951 		clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
 2952 #endif
 2953 }
 2954 
 2955 /**
 2956  * 	netdev_sent_queue - report the number of bytes queued to hardware
 2957  * 	@dev: network device
 2958  * 	@bytes: number of bytes queued to the hardware device queue
 2959  *
 2960  * 	Report the number of bytes queued for sending/completion to the network
 2961  * 	device hardware queue. @bytes should be a good approximation and should
 2962  * 	exactly match netdev_completed_queue() @bytes
 2963  */
 2964 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
 2965 {
 2966 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
 2967 }
 2968 
 2969 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
 2970 					     unsigned int pkts, unsigned int bytes)
 2971 {
 2972 #ifdef CONFIG_BQL
 2973 	if (unlikely(!bytes))
 2974 		return;
 2975 
 2976 	dql_completed(&dev_queue->dql, bytes);
 2977 
 2978 	/*
 2979 	 * Without the memory barrier there is a small possiblity that
 2980 	 * netdev_tx_sent_queue will miss the update and cause the queue to
 2981 	 * be stopped forever
 2982 	 */
 2983 	smp_mb();
 2984 
 2985 	if (dql_avail(&dev_queue->dql) < 0)
 2986 		return;
 2987 
 2988 	if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
 2989 		netif_schedule_queue(dev_queue);
 2990 #endif
 2991 }
 2992 
 2993 /**
 2994  * 	netdev_completed_queue - report bytes and packets completed by device
 2995  * 	@dev: network device
 2996  * 	@pkts: actual number of packets sent over the medium
 2997  * 	@bytes: actual number of bytes sent over the medium
 2998  *
 2999  * 	Report the number of bytes and packets transmitted by the network device
 3000  * 	hardware queue over the physical medium, @bytes must exactly match the
 3001  * 	@bytes amount passed to netdev_sent_queue()
 3002  */
 3003 static inline void netdev_completed_queue(struct net_device *dev,
 3004 					  unsigned int pkts, unsigned int bytes)
 3005 {
 3006 	netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
 3007 }
 3008 
 3009 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
 3010 {
 3011 #ifdef CONFIG_BQL
 3012 	clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
 3013 	dql_reset(&q->dql);
 3014 #endif
 3015 }
 3016 
 3017 /**
 3018  * 	netdev_reset_queue - reset the packets and bytes count of a network device
 3019  * 	@dev_queue: network device
 3020  *
 3021  * 	Reset the bytes and packet count of a network device and clear the
 3022  * 	software flow control OFF bit for this network device
 3023  */
 3024 static inline void netdev_reset_queue(struct net_device *dev_queue)
 3025 {
 3026 	netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
 3027 }
 3028 
 3029 /**
 3030  * 	netdev_cap_txqueue - check if selected tx queue exceeds device queues
 3031  * 	@dev: network device
 3032  * 	@queue_index: given tx queue index
 3033  *
 3034  * 	Returns 0 if given tx queue index >= number of device tx queues,
 3035  * 	otherwise returns the originally passed tx queue index.
 3036  */
 3037 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
 3038 {
 3039 	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
 3040 		net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
 3041 				     dev->name, queue_index,
 3042 				     dev->real_num_tx_queues);
 3043 		return 0;
 3044 	}
 3045 
 3046 	return queue_index;
 3047 }
 3048 
 3049 /**
 3050  *	netif_running - test if up
 3051  *	@dev: network device
 3052  *
 3053  *	Test if the device has been brought up.
 3054  */
 3055 static inline bool netif_running(const struct net_device *dev)
 3056 {
 3057 	return test_bit(__LINK_STATE_START, &dev->state);
 3058 }
 3059 
 3060 /*
 3061  * Routines to manage the subqueues on a device.  We only need start,
 3062  * stop, and a check if it's stopped.  All other device management is
 3063  * done at the overall netdevice level.
 3064  * Also test the device if we're multiqueue.
 3065  */
 3066 
 3067 /**
 3068  *	netif_start_subqueue - allow sending packets on subqueue
 3069  *	@dev: network device
 3070  *	@queue_index: sub queue index
 3071  *
 3072  * Start individual transmit queue of a device with multiple transmit queues.
 3073  */
 3074 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
 3075 {
 3076 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 3077 
 3078 	netif_tx_start_queue(txq);
 3079 }
 3080 
 3081 /**
 3082  *	netif_stop_subqueue - stop sending packets on subqueue
 3083  *	@dev: network device
 3084  *	@queue_index: sub queue index
 3085  *
 3086  * Stop individual transmit queue of a device with multiple transmit queues.
 3087  */
 3088 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
 3089 {
 3090 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 3091 	netif_tx_stop_queue(txq);
 3092 }
 3093 
 3094 /**
 3095  *	netif_subqueue_stopped - test status of subqueue
 3096  *	@dev: network device
 3097  *	@queue_index: sub queue index
 3098  *
 3099  * Check individual transmit queue of a device with multiple transmit queues.
 3100  */
 3101 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
 3102 					    u16 queue_index)
 3103 {
 3104 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 3105 
 3106 	return netif_tx_queue_stopped(txq);
 3107 }
 3108 
 3109 static inline bool netif_subqueue_stopped(const struct net_device *dev,
 3110 					  struct sk_buff *skb)
 3111 {
 3112 	return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
 3113 }
 3114 
 3115 /**
 3116  *	netif_wake_subqueue - allow sending packets on subqueue
 3117  *	@dev: network device
 3118  *	@queue_index: sub queue index
 3119  *
 3120  * Resume individual transmit queue of a device with multiple transmit queues.
 3121  */
 3122 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
 3123 {
 3124 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 3125 
 3126 	netif_tx_wake_queue(txq);
 3127 }
 3128 
 3129 #ifdef CONFIG_XPS
 3130 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
 3131 			u16 index);
 3132 #else
 3133 static inline int netif_set_xps_queue(struct net_device *dev,
 3134 				      const struct cpumask *mask,
 3135 				      u16 index)
 3136 {
 3137 	return 0;
 3138 }
 3139 #endif
 3140 
 3141 u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
 3142 		  unsigned int num_tx_queues);
 3143 
 3144 /*
 3145  * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
 3146  * as a distribution range limit for the returned value.
 3147  */
 3148 static inline u16 skb_tx_hash(const struct net_device *dev,
 3149 			      struct sk_buff *skb)
 3150 {
 3151 	return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
 3152 }
 3153 
 3154 /**
 3155  *	netif_is_multiqueue - test if device has multiple transmit queues
 3156  *	@dev: network device
 3157  *
 3158  * Check if device has multiple transmit queues
 3159  */
 3160 static inline bool netif_is_multiqueue(const struct net_device *dev)
 3161 {
 3162 	return dev->num_tx_queues > 1;
 3163 }
 3164 
 3165 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
 3166 
 3167 #ifdef CONFIG_SYSFS
 3168 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
 3169 #else
 3170 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
 3171 						unsigned int rxq)
 3172 {
 3173 	return 0;
 3174 }
 3175 #endif
 3176 
 3177 #ifdef CONFIG_SYSFS
 3178 static inline unsigned int get_netdev_rx_queue_index(
 3179 		struct netdev_rx_queue *queue)
 3180 {
 3181 	struct net_device *dev = queue->dev;
 3182 	int index = queue - dev->_rx;
 3183 
 3184 	BUG_ON(index >= dev->num_rx_queues);
 3185 	return index;
 3186 }
 3187 #endif
 3188 
 3189 #define DEFAULT_MAX_NUM_RSS_QUEUES	(8)
 3190 int netif_get_num_default_rss_queues(void);
 3191 
 3192 enum skb_free_reason {
 3193 	SKB_REASON_CONSUMED,
 3194 	SKB_REASON_DROPPED,
 3195 };
 3196 
 3197 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
 3198 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
 3199 
 3200 /*
 3201  * It is not allowed to call kfree_skb() or consume_skb() from hardware
 3202  * interrupt context or with hardware interrupts being disabled.
 3203  * (in_irq() || irqs_disabled())
 3204  *
 3205  * We provide four helpers that can be used in following contexts :
 3206  *
 3207  * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
 3208  *  replacing kfree_skb(skb)
 3209  *
 3210  * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
 3211  *  Typically used in place of consume_skb(skb) in TX completion path
 3212  *
 3213  * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
 3214  *  replacing kfree_skb(skb)
 3215  *
 3216  * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
 3217  *  and consumed a packet. Used in place of consume_skb(skb)
 3218  */
 3219 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
 3220 {
 3221 	__dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
 3222 }
 3223 
 3224 static inline void dev_consume_skb_irq(struct sk_buff *skb)
 3225 {
 3226 	__dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
 3227 }
 3228 
 3229 static inline void dev_kfree_skb_any(struct sk_buff *skb)
 3230 {
 3231 	__dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
 3232 }
 3233 
 3234 static inline void dev_consume_skb_any(struct sk_buff *skb)
 3235 {
 3236 	__dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
 3237 }
 3238 
 3239 int netif_rx(struct sk_buff *skb);
 3240 int netif_rx_ni(struct sk_buff *skb);
 3241 int netif_receive_skb(struct sk_buff *skb);
 3242 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
 3243 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
 3244 struct sk_buff *napi_get_frags(struct napi_struct *napi);
 3245 gro_result_t napi_gro_frags(struct napi_struct *napi);
 3246 struct packet_offload *gro_find_receive_by_type(__be16 type);
 3247 struct packet_offload *gro_find_complete_by_type(__be16 type);
 3248 
 3249 static inline void napi_free_frags(struct napi_struct *napi)
 3250 {
 3251 	kfree_skb(napi->skb);
 3252 	napi->skb = NULL;
 3253 }
 3254 
 3255 bool netdev_is_rx_handler_busy(struct net_device *dev);
 3256 int netdev_rx_handler_register(struct net_device *dev,
 3257 			       rx_handler_func_t *rx_handler,
 3258 			       void *rx_handler_data);
 3259 void netdev_rx_handler_unregister(struct net_device *dev);
 3260 
 3261 bool dev_valid_name(const char *name);
 3262 int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
 3263 int dev_ethtool(struct net *net, struct ifreq *);
 3264 unsigned int dev_get_flags(const struct net_device *);
 3265 int __dev_change_flags(struct net_device *, unsigned int flags);
 3266 int dev_change_flags(struct net_device *, unsigned int);
 3267 void __dev_notify_flags(struct net_device *, unsigned int old_flags,
 3268 			unsigned int gchanges);
 3269 int dev_change_name(struct net_device *, const char *);
 3270 int dev_set_alias(struct net_device *, const char *, size_t);
 3271 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
 3272 int dev_set_mtu(struct net_device *, int);
 3273 void dev_set_group(struct net_device *, int);
 3274 int dev_set_mac_address(struct net_device *, struct sockaddr *);
 3275 int dev_change_carrier(struct net_device *, bool new_carrier);
 3276 int dev_get_phys_port_id(struct net_device *dev,
 3277 			 struct netdev_phys_item_id *ppid);
 3278 int dev_get_phys_port_name(struct net_device *dev,
 3279 			   char *name, size_t len);
 3280 int dev_change_proto_down(struct net_device *dev, bool proto_down);
 3281 int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags);
 3282 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
 3283 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 3284 				    struct netdev_queue *txq, int *ret);
 3285 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 3286 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 3287 bool is_skb_forwardable(const struct net_device *dev,
 3288 			const struct sk_buff *skb);
 3289 
 3290 static __always_inline int ____dev_forward_skb(struct net_device *dev,
 3291 					       struct sk_buff *skb)
 3292 {
 3293 	if (skb_orphan_frags(skb, GFP_ATOMIC) ||
 3294 	    unlikely(!is_skb_forwardable(dev, skb))) {
 3295 		atomic_long_inc(&dev->rx_dropped);
 3296 		kfree_skb(skb);
 3297 		return NET_RX_DROP;
 3298 	}
 3299 
 3300 	skb_scrub_packet(skb, true);
 3301 	skb->priority = 0;
 3302 	return 0;
 3303 }
 3304 
 3305 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
 3306 
 3307 extern int		netdev_budget;
 3308 
 3309 /* Called by rtnetlink.c:rtnl_unlock() */
 3310 void netdev_run_todo(void);
 3311 
 3312 /**
 3313  *	dev_put - release reference to device
 3314  *	@dev: network device
 3315  *
 3316  * Release reference to device to allow it to be freed.
 3317  */
 3318 static inline void dev_put(struct net_device *dev)
 3319 {
 3320 	this_cpu_dec(*dev->pcpu_refcnt);
 3321 }
 3322 
 3323 /**
 3324  *	dev_hold - get reference to device
 3325  *	@dev: network device
 3326  *
 3327  * Hold reference to device to keep it from being freed.
 3328  */
 3329 static inline void dev_hold(struct net_device *dev)
 3330 {
 3331 	this_cpu_inc(*dev->pcpu_refcnt);
 3332 }
 3333 
 3334 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
 3335  * and _off may be called from IRQ context, but it is caller
 3336  * who is responsible for serialization of these calls.
 3337  *
 3338  * The name carrier is inappropriate, these functions should really be
 3339  * called netif_lowerlayer_*() because they represent the state of any
 3340  * kind of lower layer not just hardware media.
 3341  */
 3342 
 3343 void linkwatch_init_dev(struct net_device *dev);
 3344 void linkwatch_fire_event(struct net_device *dev);
 3345 void linkwatch_forget_dev(struct net_device *dev);
 3346 
 3347 /**
 3348  *	netif_carrier_ok - test if carrier present
 3349  *	@dev: network device
 3350  *
 3351  * Check if carrier is present on device
 3352  */
 3353 static inline bool netif_carrier_ok(const struct net_device *dev)
 3354 {
 3355 	return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
 3356 }
 3357 
 3358 unsigned long dev_trans_start(struct net_device *dev);
 3359 
 3360 void __netdev_watchdog_up(struct net_device *dev);
 3361 
 3362 void netif_carrier_on(struct net_device *dev);
 3363 
 3364 void netif_carrier_off(struct net_device *dev);
 3365 
 3366 /**
 3367  *	netif_dormant_on - mark device as dormant.
 3368  *	@dev: network device
 3369  *
 3370  * Mark device as dormant (as per RFC2863).
 3371  *
 3372  * The dormant state indicates that the relevant interface is not
 3373  * actually in a condition to pass packets (i.e., it is not 'up') but is
 3374  * in a "pending" state, waiting for some external event.  For "on-
 3375  * demand" interfaces, this new state identifies the situation where the
 3376  * interface is waiting for events to place it in the up state.
 3377  */
 3378 static inline void netif_dormant_on(struct net_device *dev)
 3379 {
 3380 	if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
 3381 		linkwatch_fire_event(dev);
 3382 }
 3383 
 3384 /**
 3385  *	netif_dormant_off - set device as not dormant.
 3386  *	@dev: network device
 3387  *
 3388  * Device is not in dormant state.
 3389  */
 3390 static inline void netif_dormant_off(struct net_device *dev)
 3391 {
 3392 	if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
 3393 		linkwatch_fire_event(dev);
 3394 }
 3395 
 3396 /**
 3397  *	netif_dormant - test if carrier present
 3398  *	@dev: network device
 3399  *
 3400  * Check if carrier is present on device
 3401  */
 3402 static inline bool netif_dormant(const struct net_device *dev)
 3403 {
 3404 	return test_bit(__LINK_STATE_DORMANT, &dev->state);
 3405 }
 3406 
 3407 
 3408 /**
 3409  *	netif_oper_up - test if device is operational
 3410  *	@dev: network device
 3411  *
 3412  * Check if carrier is operational
 3413  */
 3414 static inline bool netif_oper_up(const struct net_device *dev)
 3415 {
 3416 	return (dev->operstate == IF_OPER_UP ||
 3417 		dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
 3418 }
 3419 
 3420 /**
 3421  *	netif_device_present - is device available or removed
 3422  *	@dev: network device
 3423  *
 3424  * Check if device has not been removed from system.
 3425  */
 3426 static inline bool netif_device_present(struct net_device *dev)
 3427 {
 3428 	return test_bit(__LINK_STATE_PRESENT, &dev->state);
 3429 }
 3430 
 3431 void netif_device_detach(struct net_device *dev);
 3432 
 3433 void netif_device_attach(struct net_device *dev);
 3434 
 3435 /*
 3436  * Network interface message level settings
 3437  */
 3438 
 3439 enum {
 3440 	NETIF_MSG_DRV		= 0x0001,
 3441 	NETIF_MSG_PROBE		= 0x0002,
 3442 	NETIF_MSG_LINK		= 0x0004,
 3443 	NETIF_MSG_TIMER		= 0x0008,
 3444 	NETIF_MSG_IFDOWN	= 0x0010,
 3445 	NETIF_MSG_IFUP		= 0x0020,
 3446 	NETIF_MSG_RX_ERR	= 0x0040,
 3447 	NETIF_MSG_TX_ERR	= 0x0080,
 3448 	NETIF_MSG_TX_QUEUED	= 0x0100,
 3449 	NETIF_MSG_INTR		= 0x0200,
 3450 	NETIF_MSG_TX_DONE	= 0x0400,
 3451 	NETIF_MSG_RX_STATUS	= 0x0800,
 3452 	NETIF_MSG_PKTDATA	= 0x1000,
 3453 	NETIF_MSG_HW		= 0x2000,
 3454 	NETIF_MSG_WOL		= 0x4000,
 3455 };
 3456 
 3457 #define netif_msg_drv(p)	((p)->msg_enable & NETIF_MSG_DRV)
 3458 #define netif_msg_probe(p)	((p)->msg_enable & NETIF_MSG_PROBE)
 3459 #define netif_msg_link(p)	((p)->msg_enable & NETIF_MSG_LINK)
 3460 #define netif_msg_timer(p)	((p)->msg_enable & NETIF_MSG_TIMER)
 3461 #define netif_msg_ifdown(p)	((p)->msg_enable & NETIF_MSG_IFDOWN)
 3462 #define netif_msg_ifup(p)	((p)->msg_enable & NETIF_MSG_IFUP)
 3463 #define netif_msg_rx_err(p)	((p)->msg_enable & NETIF_MSG_RX_ERR)
 3464 #define netif_msg_tx_err(p)	((p)->msg_enable & NETIF_MSG_TX_ERR)
 3465 #define netif_msg_tx_queued(p)	((p)->msg_enable & NETIF_MSG_TX_QUEUED)
 3466 #define netif_msg_intr(p)	((p)->msg_enable & NETIF_MSG_INTR)
 3467 #define netif_msg_tx_done(p)	((p)->msg_enable & NETIF_MSG_TX_DONE)
 3468 #define netif_msg_rx_status(p)	((p)->msg_enable & NETIF_MSG_RX_STATUS)
 3469 #define netif_msg_pktdata(p)	((p)->msg_enable & NETIF_MSG_PKTDATA)
 3470 #define netif_msg_hw(p)		((p)->msg_enable & NETIF_MSG_HW)
 3471 #define netif_msg_wol(p)	((p)->msg_enable & NETIF_MSG_WOL)
 3472 
 3473 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
 3474 {
 3475 	/* use default */
 3476 	if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
 3477 		return default_msg_enable_bits;
 3478 	if (debug_value == 0)	/* no output */
 3479 		return 0;
 3480 	/* set low N bits */
 3481 	return (1 << debug_value) - 1;
 3482 }
 3483 
 3484 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
 3485 {
 3486 	spin_lock(&txq->_xmit_lock);
 3487 	txq->xmit_lock_owner = cpu;
 3488 }
 3489 
 3490 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
 3491 {
 3492 	__acquire(&txq->_xmit_lock);
 3493 	return true;
 3494 }
 3495 
 3496 static inline void __netif_tx_release(struct netdev_queue *txq)
 3497 {
 3498 	__release(&txq->_xmit_lock);
 3499 }
 3500 
 3501 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
 3502 {
 3503 	spin_lock_bh(&txq->_xmit_lock);
 3504 	txq->xmit_lock_owner = smp_processor_id();
 3505 }
 3506 
 3507 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
 3508 {
 3509 	bool ok = spin_trylock(&txq->_xmit_lock);
 3510 	if (likely(ok))
 3511 		txq->xmit_lock_owner = smp_processor_id();
 3512 	return ok;
 3513 }
 3514 
 3515 static inline void __netif_tx_unlock(struct netdev_queue *txq)
 3516 {
 3517 	txq->xmit_lock_owner = -1;
 3518 	spin_unlock(&txq->_xmit_lock);
 3519 }
 3520 
 3521 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
 3522 {
 3523 	txq->xmit_lock_owner = -1;
 3524 	spin_unlock_bh(&txq->_xmit_lock);
 3525 }
 3526 
 3527 static inline void txq_trans_update(struct netdev_queue *txq)
 3528 {
 3529 	if (txq->xmit_lock_owner != -1)
 3530 		txq->trans_start = jiffies;
 3531 }
 3532 
 3533 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
 3534 static inline void netif_trans_update(struct net_device *dev)
 3535 {
 3536 	struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
 3537 
 3538 	if (txq->trans_start != jiffies)
 3539 		txq->trans_start = jiffies;
 3540 }
 3541 
 3542 /**
 3543  *	netif_tx_lock - grab network device transmit lock
 3544  *	@dev: network device
 3545  *
 3546  * Get network device transmit lock
 3547  */
 3548 static inline void netif_tx_lock(struct net_device *dev)
 3549 {
 3550 	unsigned int i;
 3551 	int cpu;
 3552 
 3553 	spin_lock(&dev->tx_global_lock);
 3554 	cpu = smp_processor_id();
 3555 	for (i = 0; i < dev->num_tx_queues; i++) {
 3556 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 3557 
 3558 		/* We are the only thread of execution doing a
 3559 		 * freeze, but we have to grab the _xmit_lock in
 3560 		 * order to synchronize with threads which are in
 3561 		 * the ->hard_start_xmit() handler and already
 3562 		 * checked the frozen bit.
 3563 		 */
 3564 		__netif_tx_lock(txq, cpu);
 3565 		set_bit(__QUEUE_STATE_FROZEN, &txq->state);
 3566 		__netif_tx_unlock(txq);
 3567 	}
 3568 }
 3569 
 3570 static inline void netif_tx_lock_bh(struct net_device *dev)
 3571 {
 3572 	local_bh_disable();
 3573 	netif_tx_lock(dev);
 3574 }
 3575 
 3576 static inline void netif_tx_unlock(struct net_device *dev)
 3577 {
 3578 	unsigned int i;
 3579 
 3580 	for (i = 0; i < dev->num_tx_queues; i++) {
 3581 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 3582 
 3583 		/* No need to grab the _xmit_lock here.  If the
 3584 		 * queue is not stopped for another reason, we
 3585 		 * force a schedule.
 3586 		 */
 3587 		clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
 3588 		netif_schedule_queue(txq);
 3589 	}
 3590 	spin_unlock(&dev->tx_global_lock);
 3591 }
 3592 
 3593 static inline void netif_tx_unlock_bh(struct net_device *dev)
 3594 {
 3595 	netif_tx_unlock(dev);
 3596 	local_bh_enable();
 3597 }
 3598 
 3599 #define HARD_TX_LOCK(dev, txq, cpu) {			\
 3600 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
 3601 		__netif_tx_lock(txq, cpu);		\
 3602 	} else {					\
 3603 		__netif_tx_acquire(txq);		\
 3604 	}						\
 3605 }
 3606 
 3607 #define HARD_TX_TRYLOCK(dev, txq)			\
 3608 	(((dev->features & NETIF_F_LLTX) == 0) ?	\
 3609 		__netif_tx_trylock(txq) :		\
 3610 		__netif_tx_acquire(txq))
 3611 
 3612 #define HARD_TX_UNLOCK(dev, txq) {			\
 3613 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
 3614 		__netif_tx_unlock(txq);			\
 3615 	} else {					\
 3616 		__netif_tx_release(txq);		\
 3617 	}						\
 3618 }
 3619 
 3620 static inline void netif_tx_disable(struct net_device *dev)
 3621 {
 3622 	unsigned int i;
 3623 	int cpu;
 3624 
 3625 	local_bh_disable();
 3626 	cpu = smp_processor_id();
 3627 	for (i = 0; i < dev->num_tx_queues; i++) {
 3628 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 3629 
 3630 		__netif_tx_lock(txq, cpu);
 3631 		netif_tx_stop_queue(txq);
 3632 		__netif_tx_unlock(txq);
 3633 	}
 3634 	local_bh_enable();
 3635 }
 3636 
 3637 static inline void netif_addr_lock(struct net_device *dev)
 3638 {
 3639 	spin_lock(&dev->addr_list_lock);
 3640 }
 3641 
 3642 static inline void netif_addr_lock_nested(struct net_device *dev)
 3643 {
 3644 	int subclass = SINGLE_DEPTH_NESTING;
 3645 
 3646 	if (dev->netdev_ops->ndo_get_lock_subclass)
 3647 		subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
 3648 
 3649 	spin_lock_nested(&dev->addr_list_lock, subclass);
 3650 }
 3651 
 3652 static inline void netif_addr_lock_bh(struct net_device *dev)
 3653 {
 3654 	spin_lock_bh(&dev->addr_list_lock);
 3655 }
 3656 
 3657 static inline void netif_addr_unlock(struct net_device *dev)
 3658 {
 3659 	spin_unlock(&dev->addr_list_lock);
 3660 }
 3661 
 3662 static inline void netif_addr_unlock_bh(struct net_device *dev)
 3663 {
 3664 	spin_unlock_bh(&dev->addr_list_lock);
 3665 }
 3666 
 3667 /*
 3668  * dev_addrs walker. Should be used only for read access. Call with
 3669  * rcu_read_lock held.
 3670  */
 3671 #define for_each_dev_addr(dev, ha) \
 3672 		list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
 3673 
 3674 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
 3675 
 3676 void ether_setup(struct net_device *dev);
 3677 
 3678 /* Support for loadable net-drivers */
 3679 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 3680 				    unsigned char name_assign_type,
 3681 				    void (*setup)(struct net_device *),
 3682 				    unsigned int txqs, unsigned int rxqs);
 3683 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
 3684 	alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
 3685 
 3686 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
 3687 	alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
 3688 			 count)
 3689 
 3690 int register_netdev(struct net_device *dev);
 3691 void unregister_netdev(struct net_device *dev);
 3692 
 3693 /* General hardware address lists handling functions */
 3694 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
 3695 		   struct netdev_hw_addr_list *from_list, int addr_len);
 3696 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
 3697 		      struct netdev_hw_addr_list *from_list, int addr_len);
 3698 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
 3699 		       struct net_device *dev,
 3700 		       int (*sync)(struct net_device *, const unsigned char *),
 3701 		       int (*unsync)(struct net_device *,
 3702 				     const unsigned char *));
 3703 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
 3704 			  struct net_device *dev,
 3705 			  int (*unsync)(struct net_device *,
 3706 					const unsigned char *));
 3707 void __hw_addr_init(struct netdev_hw_addr_list *list);
 3708 
 3709 /* Functions used for device addresses handling */
 3710 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
 3711 		 unsigned char addr_type);
 3712 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
 3713 		 unsigned char addr_type);
 3714 void dev_addr_flush(struct net_device *dev);
 3715 int dev_addr_init(struct net_device *dev);
 3716 
 3717 /* Functions used for unicast addresses handling */
 3718 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
 3719 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
 3720 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
 3721 int dev_uc_sync(struct net_device *to, struct net_device *from);
 3722 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
 3723 void dev_uc_unsync(struct net_device *to, struct net_device *from);
 3724 void dev_uc_flush(struct net_device *dev);
 3725 void dev_uc_init(struct net_device *dev);
 3726 
 3727 /**
 3728  *  __dev_uc_sync - Synchonize device's unicast list
 3729  *  @dev:  device to sync
 3730  *  @sync: function to call if address should be added
 3731  *  @unsync: function to call if address should be removed
 3732  *
 3733  *  Add newly added addresses to the interface, and release
 3734  *  addresses that have been deleted.
 3735  */
 3736 static inline int __dev_uc_sync(struct net_device *dev,
 3737 				int (*sync)(struct net_device *,
 3738 					    const unsigned char *),
 3739 				int (*unsync)(struct net_device *,
 3740 					      const unsigned char *))
 3741 {
 3742 	return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
 3743 }
 3744 
 3745 /**
 3746  *  __dev_uc_unsync - Remove synchronized addresses from device
 3747  *  @dev:  device to sync
 3748  *  @unsync: function to call if address should be removed
 3749  *
 3750  *  Remove all addresses that were added to the device by dev_uc_sync().
 3751  */
 3752 static inline void __dev_uc_unsync(struct net_device *dev,
 3753 				   int (*unsync)(struct net_device *,
 3754 						 const unsigned char *))
 3755 {
 3756 	__hw_addr_unsync_dev(&dev->uc, dev, unsync);
 3757 }
 3758 
 3759 /* Functions used for multicast addresses handling */
 3760 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
 3761 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
 3762 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
 3763 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
 3764 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
 3765 int dev_mc_sync(struct net_device *to, struct net_device *from);
 3766 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
 3767 void dev_mc_unsync(struct net_device *to, struct net_device *from);
 3768 void dev_mc_flush(struct net_device *dev);
 3769 void dev_mc_init(struct net_device *dev);
 3770 
 3771 /**
 3772  *  __dev_mc_sync - Synchonize device's multicast list
 3773  *  @dev:  device to sync
 3774  *  @sync: function to call if address should be added
 3775  *  @unsync: function to call if address should be removed
 3776  *
 3777  *  Add newly added addresses to the interface, and release
 3778  *  addresses that have been deleted.
 3779  */
 3780 static inline int __dev_mc_sync(struct net_device *dev,
 3781 				int (*sync)(struct net_device *,
 3782 					    const unsigned char *),
 3783 				int (*unsync)(struct net_device *,
 3784 					      const unsigned char *))
 3785 {
 3786 	return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
 3787 }
 3788 
 3789 /**
 3790  *  __dev_mc_unsync - Remove synchronized addresses from device
 3791  *  @dev:  device to sync
 3792  *  @unsync: function to call if address should be removed
 3793  *
 3794  *  Remove all addresses that were added to the device by dev_mc_sync().
 3795  */
 3796 static inline void __dev_mc_unsync(struct net_device *dev,
 3797 				   int (*unsync)(struct net_device *,
 3798 						 const unsigned char *))
 3799 {
 3800 	__hw_addr_unsync_dev(&dev->mc, dev, unsync);
 3801 }
 3802 
 3803 /* Functions used for secondary unicast and multicast support */
 3804 void dev_set_rx_mode(struct net_device *dev);
 3805 void __dev_set_rx_mode(struct net_device *dev);
 3806 int dev_set_promiscuity(struct net_device *dev, int inc);
 3807 int dev_set_allmulti(struct net_device *dev, int inc);
 3808 void netdev_state_change(struct net_device *dev);
 3809 void netdev_notify_peers(struct net_device *dev);
 3810 void netdev_features_change(struct net_device *dev);
 3811 /* Load a device via the kmod */
 3812 void dev_load(struct net *net, const char *name);
 3813 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
 3814 					struct rtnl_link_stats64 *storage);
 3815 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
 3816 			     const struct net_device_stats *netdev_stats);
 3817 
 3818 extern int		netdev_max_backlog;
 3819 extern int		netdev_tstamp_prequeue;
 3820 extern int		weight_p;
 3821 extern int		dev_weight_rx_bias;
 3822 extern int		dev_weight_tx_bias;
 3823 extern int		dev_rx_weight;
 3824 extern int		dev_tx_weight;
 3825 
 3826 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
 3827 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
 3828 						     struct list_head **iter);
 3829 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
 3830 						     struct list_head **iter);
 3831 
 3832 /* iterate through upper list, must be called under RCU read lock */
 3833 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
 3834 	for (iter = &(dev)->adj_list.upper, \
 3835 	     updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
 3836 	     updev; \
 3837 	     updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
 3838 
 3839 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
 3840 				  int (*fn)(struct net_device *upper_dev,
 3841 					    void *data),
 3842 				  void *data);
 3843 
 3844 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
 3845 				  struct net_device *upper_dev);
 3846 
 3847 void *netdev_lower_get_next_private(struct net_device *dev,
 3848 				    struct list_head **iter);
 3849 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
 3850 					struct list_head **iter);
 3851 
 3852 #define netdev_for_each_lower_private(dev, priv, iter) \
 3853 	for (iter = (dev)->adj_list.lower.next, \
 3854 	     priv = netdev_lower_get_next_private(dev, &(iter)); \
 3855 	     priv; \
 3856 	     priv = netdev_lower_get_next_private(dev, &(iter)))
 3857 
 3858 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
 3859 	for (iter = &(dev)->adj_list.lower, \
 3860 	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
 3861 	     priv; \
 3862 	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
 3863 
 3864 void *netdev_lower_get_next(struct net_device *dev,
 3865 				struct list_head **iter);
 3866 
 3867 #define netdev_for_each_lower_dev(dev, ldev, iter) \
 3868 	for (iter = (dev)->adj_list.lower.next, \
 3869 	     ldev = netdev_lower_get_next(dev, &(iter)); \
 3870 	     ldev; \
 3871 	     ldev = netdev_lower_get_next(dev, &(iter)))
 3872 
 3873 struct net_device *netdev_all_lower_get_next(struct net_device *dev,
 3874 					     struct list_head **iter);
 3875 struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
 3876 						 struct list_head **iter);
 3877 
 3878 int netdev_walk_all_lower_dev(struct net_device *dev,
 3879 			      int (*fn)(struct net_device *lower_dev,
 3880 					void *data),
 3881 			      void *data);
 3882 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
 3883 				  int (*fn)(struct net_device *lower_dev,
 3884 					    void *data),
 3885 				  void *data);
 3886 
 3887 void *netdev_adjacent_get_private(struct list_head *adj_list);
 3888 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
 3889 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
 3890 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
 3891 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
 3892 int netdev_master_upper_dev_link(struct net_device *dev,
 3893 				 struct net_device *upper_dev,
 3894 				 void *upper_priv, void *upper_info);
 3895 void netdev_upper_dev_unlink(struct net_device *dev,
 3896 			     struct net_device *upper_dev);
 3897 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
 3898 void *netdev_lower_dev_get_private(struct net_device *dev,
 3899 				   struct net_device *lower_dev);
 3900 void netdev_lower_state_changed(struct net_device *lower_dev,
 3901 				void *lower_state_info);
 3902 
 3903 /* RSS keys are 40 or 52 bytes long */
 3904 #define NETDEV_RSS_KEY_LEN 52
 3905 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
 3906 void netdev_rss_key_fill(void *buffer, size_t len);
 3907 
 3908 int dev_get_nest_level(struct net_device *dev);
 3909 int skb_checksum_help(struct sk_buff *skb);
 3910 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
 3911 				  netdev_features_t features, bool tx_path);
 3912 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
 3913 				    netdev_features_t features);
 3914 
 3915 struct netdev_bonding_info {
 3916 	ifslave	slave;
 3917 	ifbond	master;
 3918 };
 3919 
 3920 struct netdev_notifier_bonding_info {
 3921 	struct netdev_notifier_info info; /* must be first */
 3922 	struct netdev_bonding_info  bonding_info;
 3923 };
 3924 
 3925 void netdev_bonding_info_change(struct net_device *dev,
 3926 				struct netdev_bonding_info *bonding_info);
 3927 
 3928 static inline
 3929 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
 3930 {
 3931 	return __skb_gso_segment(skb, features, true);
 3932 }
 3933 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
 3934 
 3935 static inline bool can_checksum_protocol(netdev_features_t features,
 3936 					 __be16 protocol)
 3937 {
 3938 	if (protocol == htons(ETH_P_FCOE))
 3939 		return !!(features & NETIF_F_FCOE_CRC);
 3940 
 3941 	/* Assume this is an IP checksum (not SCTP CRC) */
 3942 
 3943 	if (features & NETIF_F_HW_CSUM) {
 3944 		/* Can checksum everything */
 3945 		return true;
 3946 	}
 3947 
 3948 	switch (protocol) {
 3949 	case htons(ETH_P_IP):
 3950 		return !!(features & NETIF_F_IP_CSUM);
 3951 	case htons(ETH_P_IPV6):
 3952 		return !!(features & NETIF_F_IPV6_CSUM);
 3953 	default:
 3954 		return false;
 3955 	}
 3956 }
 3957 
 3958 #ifdef CONFIG_BUG
 3959 void netdev_rx_csum_fault(struct net_device *dev);
 3960 #else
 3961 static inline void netdev_rx_csum_fault(struct net_device *dev)
 3962 {
 3963 }
 3964 #endif
 3965 /* rx skb timestamps */
 3966 void net_enable_timestamp(void);
 3967 void net_disable_timestamp(void);
 3968 
 3969 #ifdef CONFIG_PROC_FS
 3970 int __init dev_proc_init(void);
 3971 #else
 3972 #define dev_proc_init() 0
 3973 #endif
 3974 
 3975 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
 3976 					      struct sk_buff *skb, struct net_device *dev,
 3977 					      bool more)
 3978 {
 3979 	skb->xmit_more = more ? 1 : 0;
 3980 	return ops->ndo_start_xmit(skb, dev);
 3981 }
 3982 
 3983 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
 3984 					    struct netdev_queue *txq, bool more)
 3985 {
 3986 	const struct net_device_ops *ops = dev->netdev_ops;
 3987 	int rc;
 3988 
 3989 	rc = __netdev_start_xmit(ops, skb, dev, more);
 3990 	if (rc == NETDEV_TX_OK)
 3991 		txq_trans_update(txq);
 3992 
 3993 	return rc;
 3994 }
 3995 
 3996 int netdev_class_create_file_ns(struct class_attribute *class_attr,
 3997 				const void *ns);
 3998 void netdev_class_remove_file_ns(struct class_attribute *class_attr,
 3999 				 const void *ns);
 4000 
 4001 static inline int netdev_class_create_file(struct class_attribute *class_attr)
 4002 {
 4003 	return netdev_class_create_file_ns(class_attr, NULL);
 4004 }
 4005 
 4006 static inline void netdev_class_remove_file(struct class_attribute *class_attr)
 4007 {
 4008 	netdev_class_remove_file_ns(class_attr, NULL);
 4009 }
 4010 
 4011 extern struct kobj_ns_type_operations net_ns_type_operations;
 4012 
 4013 const char *netdev_drivername(const struct net_device *dev);
 4014 
 4015 void linkwatch_run_queue(void);
 4016 
 4017 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
 4018 							  netdev_features_t f2)
 4019 {
 4020 	if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
 4021 		if (f1 & NETIF_F_HW_CSUM)
 4022 			f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
 4023 		else
 4024 			f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
 4025 	}
 4026 
 4027 	return f1 & f2;
 4028 }
 4029 
 4030 static inline netdev_features_t netdev_get_wanted_features(
 4031 	struct net_device *dev)
 4032 {
 4033 	return (dev->features & ~dev->hw_features) | dev->wanted_features;
 4034 }
 4035 netdev_features_t netdev_increment_features(netdev_features_t all,
 4036 	netdev_features_t one, netdev_features_t mask);
 4037 
 4038 /* Allow TSO being used on stacked device :
 4039  * Performing the GSO segmentation before last device
 4040  * is a performance improvement.
 4041  */
 4042 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
 4043 							netdev_features_t mask)
 4044 {
 4045 	return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
 4046 }
 4047 
 4048 int __netdev_update_features(struct net_device *dev);
 4049 void netdev_update_features(struct net_device *dev);
 4050 void netdev_change_features(struct net_device *dev);
 4051 
 4052 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 4053 					struct net_device *dev);
 4054 
 4055 netdev_features_t passthru_features_check(struct sk_buff *skb,
 4056 					  struct net_device *dev,
 4057 					  netdev_features_t features);
 4058 netdev_features_t netif_skb_features(struct sk_buff *skb);
 4059 
 4060 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 4061 {
 4062 	netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
 4063 
 4064 	/* check flags correspondence */
 4065 	BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
 4066 	BUILD_BUG_ON(SKB_GSO_UDP     != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
 4067 	BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
 4068 	BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
 4069 	BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
 4070 	BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
 4071 	BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
 4072 	BUILD_BUG_ON(SKB_GSO_GRE     != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
 4073 	BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
 4074 	BUILD_BUG_ON(SKB_GSO_IPXIP4  != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
 4075 	BUILD_BUG_ON(SKB_GSO_IPXIP6  != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
 4076 	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
 4077 	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
 4078 	BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
 4079 	BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
 4080 	BUILD_BUG_ON(SKB_GSO_SCTP    != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
 4081 
 4082 	return (features & feature) == feature;
 4083 }
 4084 
 4085 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
 4086 {
 4087 	return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
 4088 	       (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
 4089 }
 4090 
 4091 static inline bool netif_needs_gso(struct sk_buff *skb,
 4092 				   netdev_features_t features)
 4093 {
 4094 	return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
 4095 		unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
 4096 			 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
 4097 }
 4098 
 4099 static inline void netif_set_gso_max_size(struct net_device *dev,
 4100 					  unsigned int size)
 4101 {
 4102 	dev->gso_max_size = size;
 4103 }
 4104 
 4105 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
 4106 					int pulled_hlen, u16 mac_offset,
 4107 					int mac_len)
 4108 {
 4109 	skb->protocol = protocol;
 4110 	skb->encapsulation = 1;
 4111 	skb_push(skb, pulled_hlen);
 4112 	skb_reset_transport_header(skb);
 4113 	skb->mac_header = mac_offset;
 4114 	skb->network_header = skb->mac_header + mac_len;
 4115 	skb->mac_len = mac_len;
 4116 }
 4117 
 4118 static inline bool netif_is_macsec(const struct net_device *dev)
 4119 {
 4120 	return dev->priv_flags & IFF_MACSEC;
 4121 }
 4122 
 4123 static inline bool netif_is_macvlan(const struct net_device *dev)
 4124 {
 4125 	return dev->priv_flags & IFF_MACVLAN;
 4126 }
 4127 
 4128 static inline bool netif_is_macvlan_port(const struct net_device *dev)
 4129 {
 4130 	return dev->priv_flags & IFF_MACVLAN_PORT;
 4131 }
 4132 
 4133 static inline bool netif_is_ipvlan(const struct net_device *dev)
 4134 {
 4135 	return dev->priv_flags & IFF_IPVLAN_SLAVE;
 4136 }
 4137 
 4138 static inline bool netif_is_ipvlan_port(const struct net_device *dev)
 4139 {
 4140 	return dev->priv_flags & IFF_IPVLAN_MASTER;
 4141 }
 4142 
 4143 static inline bool netif_is_bond_master(const struct net_device *dev)
 4144 {
 4145 	return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
 4146 }
 4147 
 4148 static inline bool netif_is_bond_slave(const struct net_device *dev)
 4149 {
 4150 	return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
 4151 }
 4152 
 4153 static inline bool netif_supports_nofcs(struct net_device *dev)
 4154 {
 4155 	return dev->priv_flags & IFF_SUPP_NOFCS;
 4156 }
 4157 
 4158 static inline bool netif_is_l3_master(const struct net_device *dev)
 4159 {
 4160 	return dev->priv_flags & IFF_L3MDEV_MASTER;
 4161 }
 4162 
 4163 static inline bool netif_is_l3_slave(const struct net_device *dev)
 4164 {
 4165 	return dev->priv_flags & IFF_L3MDEV_SLAVE;
 4166 }
 4167 
 4168 static inline bool netif_is_bridge_master(const struct net_device *dev)
 4169 {
 4170 	return dev->priv_flags & IFF_EBRIDGE;
 4171 }
 4172 
 4173 static inline bool netif_is_bridge_port(const struct net_device *dev)
 4174 {
 4175 	return dev->priv_flags & IFF_BRIDGE_PORT;
 4176 }
 4177 
 4178 static inline bool netif_is_ovs_master(const struct net_device *dev)
 4179 {
 4180 	return dev->priv_flags & IFF_OPENVSWITCH;
 4181 }
 4182 
 4183 static inline bool netif_is_team_master(const struct net_device *dev)
 4184 {
 4185 	return dev->priv_flags & IFF_TEAM;
 4186 }
 4187 
 4188 static inline bool netif_is_team_port(const struct net_device *dev)
 4189 {
 4190 	return dev->priv_flags & IFF_TEAM_PORT;
 4191 }
 4192 
 4193 static inline bool netif_is_lag_master(const struct net_device *dev)
 4194 {
 4195 	return netif_is_bond_master(dev) || netif_is_team_master(dev);
 4196 }
 4197 
 4198 static inline bool netif_is_lag_port(const struct net_device *dev)
 4199 {
 4200 	return netif_is_bond_slave(dev) || netif_is_team_port(dev);
 4201 }
 4202 
 4203 static inline bool netif_is_rxfh_configured(const struct net_device *dev)
 4204 {
 4205 	return dev->priv_flags & IFF_RXFH_CONFIGURED;
 4206 }
 4207 
 4208 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
 4209 static inline void netif_keep_dst(struct net_device *dev)
 4210 {
 4211 	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
 4212 }
 4213 
 4214 /* return true if dev can't cope with mtu frames that need vlan tag insertion */
 4215 static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
 4216 {
 4217 	/* TODO: reserve and use an additional IFF bit, if we get more users */
 4218 	return dev->priv_flags & IFF_MACSEC;
 4219 }
 4220 
 4221 extern struct pernet_operations __net_initdata loopback_net_ops;
 4222 
 4223 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 4224 
 4225 /* netdev_printk helpers, similar to dev_printk */
 4226 
 4227 static inline const char *netdev_name(const struct net_device *dev)
 4228 {
 4229 	if (!dev->name[0] || strchr(dev->name, '%'))
 4230 		return "(unnamed net_device)";
 4231 	return dev->name;
 4232 }
 4233 
 4234 static inline const char *netdev_reg_state(const struct net_device *dev)
 4235 {
 4236 	switch (dev->reg_state) {
 4237 	case NETREG_UNINITIALIZED: return " (uninitialized)";
 4238 	case NETREG_REGISTERED: return "";
 4239 	case NETREG_UNREGISTERING: return " (unregistering)";
 4240 	case NETREG_UNREGISTERED: return " (unregistered)";
 4241 	case NETREG_RELEASED: return " (released)";
 4242 	case NETREG_DUMMY: return " (dummy)";
 4243 	}
 4244 
 4245 	WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
 4246 	return " (unknown)";
 4247 }
 4248 
 4249 __printf(3, 4)
 4250 void netdev_printk(const char *level, const struct net_device *dev,
 4251 		   const char *format, ...);
 4252 __printf(2, 3)
 4253 void netdev_emerg(const struct net_device *dev, const char *format, ...);
 4254 __printf(2, 3)
 4255 void netdev_alert(const struct net_device *dev, const char *format, ...);
 4256 __printf(2, 3)
 4257 void netdev_crit(const struct net_device *dev, const char *format, ...);
 4258 __printf(2, 3)
 4259 void netdev_err(const struct net_device *dev, const char *format, ...);
 4260 __printf(2, 3)
 4261 void netdev_warn(const struct net_device *dev, const char *format, ...);
 4262 __printf(2, 3)
 4263 void netdev_notice(const struct net_device *dev, const char *format, ...);
 4264 __printf(2, 3)
 4265 void netdev_info(const struct net_device *dev, const char *format, ...);
 4266 
 4267 #define MODULE_ALIAS_NETDEV(device) \
 4268 	MODULE_ALIAS("netdev-" device)
 4269 
 4270 #if defined(CONFIG_DYNAMIC_DEBUG)
 4271 #define netdev_dbg(__dev, format, args...)			\
 4272 do {								\
 4273 	dynamic_netdev_dbg(__dev, format, ##args);		\
 4274 } while (0)
 4275 #elif defined(DEBUG)
 4276 #define netdev_dbg(__dev, format, args...)			\
 4277 	netdev_printk(KERN_DEBUG, __dev, format, ##args)
 4278 #else
 4279 #define netdev_dbg(__dev, format, args...)			\
 4280 ({								\
 4281 	if (0)							\
 4282 		netdev_printk(KERN_DEBUG, __dev, format, ##args); \
 4283 })
 4284 #endif
 4285 
 4286 #if defined(VERBOSE_DEBUG)
 4287 #define netdev_vdbg	netdev_dbg
 4288 #else
 4289 
 4290 #define netdev_vdbg(dev, format, args...)			\
 4291 ({								\
 4292 	if (0)							\
 4293 		netdev_printk(KERN_DEBUG, dev, format, ##args);	\
 4294 	0;							\
 4295 })
 4296 #endif
 4297 
 4298 /*
 4299  * netdev_WARN() acts like dev_printk(), but with the key difference
 4300  * of using a WARN/WARN_ON to get the message out, including the
 4301  * file/line information and a backtrace.
 4302  */
 4303 #define netdev_WARN(dev, format, args...)			\
 4304 	WARN(1, "netdevice: %s%s\n" format, netdev_name(dev),	\
 4305 	     netdev_reg_state(dev), ##args)
 4306 
 4307 /* netif printk helpers, similar to netdev_printk */
 4308 
 4309 #define netif_printk(priv, type, level, dev, fmt, args...)	\
 4310 do {					  			\
 4311 	if (netif_msg_##type(priv))				\
 4312 		netdev_printk(level, (dev), fmt, ##args);	\
 4313 } while (0)
 4314 
 4315 #define netif_level(level, priv, type, dev, fmt, args...)	\
 4316 do {								\
 4317 	if (netif_msg_##type(priv))				\
 4318 		netdev_##level(dev, fmt, ##args);		\
 4319 } while (0)
 4320 
 4321 #define netif_emerg(priv, type, dev, fmt, args...)		\
 4322 	netif_level(emerg, priv, type, dev, fmt, ##args)
 4323 #define netif_alert(priv, type, dev, fmt, args...)		\
 4324 	netif_level(alert, priv, type, dev, fmt, ##args)
 4325 #define netif_crit(priv, type, dev, fmt, args...)		\
 4326 	netif_level(crit, priv, type, dev, fmt, ##args)
 4327 #define netif_err(priv, type, dev, fmt, args...)		\
 4328 	netif_level(err, priv, type, dev, fmt, ##args)
 4329 #define netif_warn(priv, type, dev, fmt, args...)		\
 4330 	netif_level(warn, priv, type, dev, fmt, ##args)
 4331 #define netif_notice(priv, type, dev, fmt, args...)		\
 4332 	netif_level(notice, priv, type, dev, fmt, ##args)
 4333 #define netif_info(priv, type, dev, fmt, args...)		\
 4334 	netif_level(info, priv, type, dev, fmt, ##args)
 4335 
 4336 #if defined(CONFIG_DYNAMIC_DEBUG)
 4337 #define netif_dbg(priv, type, netdev, format, args...)		\
 4338 do {								\
 4339 	if (netif_msg_##type(priv))				\
 4340 		dynamic_netdev_dbg(netdev, format, ##args);	\
 4341 } while (0)
 4342 #elif defined(DEBUG)
 4343 #define netif_dbg(priv, type, dev, format, args...)		\
 4344 	netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
 4345 #else
 4346 #define netif_dbg(priv, type, dev, format, args...)			\
 4347 ({									\
 4348 	if (0)								\
 4349 		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
 4350 	0;								\
 4351 })
 4352 #endif
 4353 
 4354 /* if @cond then downgrade to debug, else print at @level */
 4355 #define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...)     \
 4356 	do {                                                              \
 4357 		if (cond)                                                 \
 4358 			netif_dbg(priv, type, netdev, fmt, ##args);       \
 4359 		else                                                      \
 4360 			netif_ ## level(priv, type, netdev, fmt, ##args); \
 4361 	} while (0)
 4362 
 4363 #if defined(VERBOSE_DEBUG)
 4364 #define netif_vdbg	netif_dbg
 4365 #else
 4366 #define netif_vdbg(priv, type, dev, format, args...)		\
 4367 ({								\
 4368 	if (0)							\
 4369 		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
 4370 	0;							\
 4371 })
 4372 #endif
 4373 
 4374 /*
 4375  *	The list of packet types we will receive (as opposed to discard)
 4376  *	and the routines to invoke.
 4377  *
 4378  *	Why 16. Because with 16 the only overlap we get on a hash of the
 4379  *	low nibble of the protocol value is RARP/SNAP/X.25.
 4380  *
 4381  *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
 4382  *             sure which should go first, but I bet it won't make much
 4383  *             difference if we are running VLANs.  The good news is that
 4384  *             this protocol won't be in the list unless compiled in, so
 4385  *             the average user (w/out VLANs) will not be adversely affected.
 4386  *             --BLG
 4387  *
 4388  *		0800	IP
 4389  *		8100    802.1Q VLAN
 4390  *		0001	802.3
 4391  *		0002	AX.25
 4392  *		0004	802.2
 4393  *		8035	RARP
 4394  *		0005	SNAP
 4395  *		0805	X.25
 4396  *		0806	ARP
 4397  *		8137	IPX
 4398  *		0009	Localtalk
 4399  *		86DD	IPv6
 4400  */
 4401 #define PTYPE_HASH_SIZE	(16)
 4402 #define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1)
 4403 
 4404 #endif	/* _LINUX_NETDEVICE_H */                 1 /* include this file if the platform implements the dma_ DMA Mapping API
    2  * and wants to provide the pci_ DMA Mapping API in terms of it */
    3 
    4 #ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
    5 #define _ASM_GENERIC_PCI_DMA_COMPAT_H
    6 
    7 #include <linux/dma-mapping.h>
    8 
    9 /* This defines the direction arg to the DMA mapping routines. */
   10 #define PCI_DMA_BIDIRECTIONAL	0
   11 #define PCI_DMA_TODEVICE	1
   12 #define PCI_DMA_FROMDEVICE	2
   13 #define PCI_DMA_NONE		3
   14 
   15 static inline void *
   16 pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
   17 		     dma_addr_t *dma_handle)
   18 {
   19 	return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
   20 }
   21 
   22 static inline void *
   23 pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
   24 		      dma_addr_t *dma_handle)
   25 {
   26 	return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev,
   27 				   size, dma_handle, GFP_ATOMIC);
   28 }
   29 
   30 static inline void
   31 pci_free_consistent(struct pci_dev *hwdev, size_t size,
   32 		    void *vaddr, dma_addr_t dma_handle)
   33 {
   34 	dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
   35 }
   36 
   37 static inline dma_addr_t
   38 pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
   39 {
   40 	return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
   41 }
   42 
   43 static inline void
   44 pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
   45 		 size_t size, int direction)
   46 {
   47 	dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
   48 }
   49 
   50 static inline dma_addr_t
   51 pci_map_page(struct pci_dev *hwdev, struct page *page,
   52 	     unsigned long offset, size_t size, int direction)
   53 {
   54 	return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
   55 }
   56 
   57 static inline void
   58 pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
   59 	       size_t size, int direction)
   60 {
   61 	dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
   62 }
   63 
   64 static inline int
   65 pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
   66 	   int nents, int direction)
   67 {
   68 	return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
   69 }
   70 
   71 static inline void
   72 pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
   73 	     int nents, int direction)
   74 {
   75 	dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
   76 }
   77 
   78 static inline void
   79 pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
   80 		    size_t size, int direction)
   81 {
   82 	dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
   83 }
   84 
   85 static inline void
   86 pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
   87 		    size_t size, int direction)
   88 {
   89 	dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
   90 }
   91 
   92 static inline void
   93 pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
   94 		int nelems, int direction)
   95 {
   96 	dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
   97 }
   98 
   99 static inline void
  100 pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
  101 		int nelems, int direction)
  102 {
  103 	dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
  104 }
  105 
  106 static inline int
  107 pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
  108 {
  109 	return dma_mapping_error(&pdev->dev, dma_addr);
  110 }
  111 
  112 #ifdef CONFIG_PCI
  113 static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
  114 {
  115 	return dma_set_mask(&dev->dev, mask);
  116 }
  117 
  118 static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
  119 {
  120 	return dma_set_coherent_mask(&dev->dev, mask);
  121 }
  122 
  123 static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
  124 					   unsigned int size)
  125 {
  126 	return dma_set_max_seg_size(&dev->dev, size);
  127 }
  128 
  129 static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
  130 					   unsigned long mask)
  131 {
  132 	return dma_set_seg_boundary(&dev->dev, mask);
  133 }
  134 #else
  135 static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
  136 { return -EIO; }
  137 static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
  138 { return -EIO; }
  139 static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
  140 					   unsigned int size)
  141 { return -EIO; }
  142 static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
  143 					   unsigned long mask)
  144 { return -EIO; }
  145 #endif
  146 
  147 #endif                 1 /*
    2  * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
    3  *
    4  * (C) SGI 2006, Christoph Lameter
    5  * 	Cleaned up and restructured to ease the addition of alternative
    6  * 	implementations of SLAB allocators.
    7  * (C) Linux Foundation 2008-2013
    8  *      Unified interface for all slab allocators
    9  */
   10 
   11 #ifndef _LINUX_SLAB_H
   12 #define	_LINUX_SLAB_H
   13 
   14 #include <linux/gfp.h>
   15 #include <linux/types.h>
   16 #include <linux/workqueue.h>
   17 
   18 
   19 /*
   20  * Flags to pass to kmem_cache_create().
   21  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
   22  */
   23 #define SLAB_CONSISTENCY_CHECKS	0x00000100UL	/* DEBUG: Perform (expensive) checks on alloc/free */
   24 #define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
   25 #define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
   26 #define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
   27 #define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
   28 #define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
   29 #define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
   30 /*
   31  * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
   32  *
   33  * This delays freeing the SLAB page by a grace period, it does _NOT_
   34  * delay object freeing. This means that if you do kmem_cache_free()
   35  * that memory location is free to be reused at any time. Thus it may
   36  * be possible to see another object there in the same RCU grace period.
   37  *
   38  * This feature only ensures the memory location backing the object
   39  * stays valid, the trick to using this is relying on an independent
   40  * object validation pass. Something like:
   41  *
   42  *  rcu_read_lock()
   43  * again:
   44  *  obj = lockless_lookup(key);
   45  *  if (obj) {
   46  *    if (!try_get_ref(obj)) // might fail for free objects
   47  *      goto again;
   48  *
   49  *    if (obj->key != key) { // not the object we expected
   50  *      put_ref(obj);
   51  *      goto again;
   52  *    }
   53  *  }
   54  *  rcu_read_unlock();
   55  *
   56  * This is useful if we need to approach a kernel structure obliquely,
   57  * from its address obtained without the usual locking. We can lock
   58  * the structure to stabilize it and check it's still at the given address,
   59  * only if we can be sure that the memory has not been meanwhile reused
   60  * for some other kind of object (which our subsystem's lock might corrupt).
   61  *
   62  * rcu_read_lock before reading the address, then rcu_read_unlock after
   63  * taking the spinlock within the structure expected at that address.
   64  */
   65 #define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
   66 #define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
   67 #define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
   68 
   69 /* Flag to prevent checks on free */
   70 #ifdef CONFIG_DEBUG_OBJECTS
   71 # define SLAB_DEBUG_OBJECTS	0x00400000UL
   72 #else
   73 # define SLAB_DEBUG_OBJECTS	0x00000000UL
   74 #endif
   75 
   76 #define SLAB_NOLEAKTRACE	0x00800000UL	/* Avoid kmemleak tracing */
   77 
   78 /* Don't track use of uninitialized memory */
   79 #ifdef CONFIG_KMEMCHECK
   80 # define SLAB_NOTRACK		0x01000000UL
   81 #else
   82 # define SLAB_NOTRACK		0x00000000UL
   83 #endif
   84 #ifdef CONFIG_FAILSLAB
   85 # define SLAB_FAILSLAB		0x02000000UL	/* Fault injection mark */
   86 #else
   87 # define SLAB_FAILSLAB		0x00000000UL
   88 #endif
   89 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
   90 # define SLAB_ACCOUNT		0x04000000UL	/* Account to memcg */
   91 #else
   92 # define SLAB_ACCOUNT		0x00000000UL
   93 #endif
   94 
   95 #ifdef CONFIG_KASAN
   96 #define SLAB_KASAN		0x08000000UL
   97 #else
   98 #define SLAB_KASAN		0x00000000UL
   99 #endif
  100 
  101 /* The following flags affect the page allocator grouping pages by mobility */
  102 #define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
  103 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
  104 /*
  105  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
  106  *
  107  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
  108  *
  109  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
  110  * Both make kfree a no-op.
  111  */
  112 #define ZERO_SIZE_PTR ((void *)16)
  113 
  114 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
  115 				(unsigned long)ZERO_SIZE_PTR)
  116 
  117 #include <linux/kmemleak.h>
  118 #include <linux/kasan.h>
  119 
  120 struct mem_cgroup;
  121 /*
  122  * struct kmem_cache related prototypes
  123  */
  124 void __init kmem_cache_init(void);
  125 bool slab_is_available(void);
  126 
  127 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
  128 			unsigned long,
  129 			void (*)(void *));
  130 void kmem_cache_destroy(struct kmem_cache *);
  131 int kmem_cache_shrink(struct kmem_cache *);
  132 
  133 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
  134 void memcg_deactivate_kmem_caches(struct mem_cgroup *);
  135 void memcg_destroy_kmem_caches(struct mem_cgroup *);
  136 
  137 /*
  138  * Please use this macro to create slab caches. Simply specify the
  139  * name of the structure and maybe some flags that are listed above.
  140  *
  141  * The alignment of the struct determines object alignment. If you
  142  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
  143  * then the objects will be properly aligned in SMP configurations.
  144  */
  145 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
  146 		sizeof(struct __struct), __alignof__(struct __struct),\
  147 		(__flags), NULL)
  148 
  149 /*
  150  * Common kmalloc functions provided by all allocators
  151  */
  152 void * __must_check __krealloc(const void *, size_t, gfp_t);
  153 void * __must_check krealloc(const void *, size_t, gfp_t);
  154 void kfree(const void *);
  155 void kzfree(const void *);
  156 size_t ksize(const void *);
  157 
  158 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
  159 const char *__check_heap_object(const void *ptr, unsigned long n,
  160 				struct page *page);
  161 #else
  162 static inline const char *__check_heap_object(const void *ptr,
  163 					      unsigned long n,
  164 					      struct page *page)
  165 {
  166 	return NULL;
  167 }
  168 #endif
  169 
  170 /*
  171  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
  172  * alignment larger than the alignment of a 64-bit integer.
  173  * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
  174  */
  175 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
  176 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
  177 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
  178 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
  179 #else
  180 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
  181 #endif
  182 
  183 /*
  184  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
  185  * Intended for arches that get misalignment faults even for 64 bit integer
  186  * aligned buffers.
  187  */
  188 #ifndef ARCH_SLAB_MINALIGN
  189 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
  190 #endif
  191 
  192 /*
  193  * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
  194  * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
  195  * aligned pointers.
  196  */
  197 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
  198 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
  199 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
  200 
  201 /*
  202  * Kmalloc array related definitions
  203  */
  204 
  205 #ifdef CONFIG_SLAB
  206 /*
  207  * The largest kmalloc size supported by the SLAB allocators is
  208  * 32 megabyte (2^25) or the maximum allocatable page order if that is
  209  * less than 32 MB.
  210  *
  211  * WARNING: Its not easy to increase this value since the allocators have
  212  * to do various tricks to work around compiler limitations in order to
  213  * ensure proper constant folding.
  214  */
  215 #define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
  216 				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
  217 #define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH
  218 #ifndef KMALLOC_SHIFT_LOW
  219 #define KMALLOC_SHIFT_LOW	5
  220 #endif
  221 #endif
  222 
  223 #ifdef CONFIG_SLUB
  224 /*
  225  * SLUB directly allocates requests fitting in to an order-1 page
  226  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
  227  */
  228 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
  229 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
  230 #ifndef KMALLOC_SHIFT_LOW
  231 #define KMALLOC_SHIFT_LOW	3
  232 #endif
  233 #endif
  234 
  235 #ifdef CONFIG_SLOB
  236 /*
  237  * SLOB passes all requests larger than one page to the page allocator.
  238  * No kmalloc array is necessary since objects of different sizes can
  239  * be allocated from the same page.
  240  */
  241 #define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
  242 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
  243 #ifndef KMALLOC_SHIFT_LOW
  244 #define KMALLOC_SHIFT_LOW	3
  245 #endif
  246 #endif
  247 
  248 /* Maximum allocatable size */
  249 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
  250 /* Maximum size for which we actually use a slab cache */
  251 #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
  252 /* Maximum order allocatable via the slab allocagtor */
  253 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
  254 
  255 /*
  256  * Kmalloc subsystem.
  257  */
  258 #ifndef KMALLOC_MIN_SIZE
  259 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
  260 #endif
  261 
  262 /*
  263  * This restriction comes from byte sized index implementation.
  264  * Page size is normally 2^12 bytes and, in this case, if we want to use
  265  * byte sized index which can represent 2^8 entries, the size of the object
  266  * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
  267  * If minimum size of kmalloc is less than 16, we use it as minimum object
  268  * size and give up to use byte sized index.
  269  */
  270 #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
  271                                (KMALLOC_MIN_SIZE) : 16)
  272 
  273 #ifndef CONFIG_SLOB
  274 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
  275 #ifdef CONFIG_ZONE_DMA
  276 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
  277 #endif
  278 
  279 /*
  280  * Figure out which kmalloc slab an allocation of a certain size
  281  * belongs to.
  282  * 0 = zero alloc
  283  * 1 =  65 .. 96 bytes
  284  * 2 = 129 .. 192 bytes
  285  * n = 2^(n-1)+1 .. 2^n
  286  */
  287 static __always_inline int kmalloc_index(size_t size)
  288 {
  289 	if (!size)
  290 		return 0;
  291 
  292 	if (size <= KMALLOC_MIN_SIZE)
  293 		return KMALLOC_SHIFT_LOW;
  294 
  295 	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
  296 		return 1;
  297 	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
  298 		return 2;
  299 	if (size <=          8) return 3;
  300 	if (size <=         16) return 4;
  301 	if (size <=         32) return 5;
  302 	if (size <=         64) return 6;
  303 	if (size <=        128) return 7;
  304 	if (size <=        256) return 8;
  305 	if (size <=        512) return 9;
  306 	if (size <=       1024) return 10;
  307 	if (size <=   2 * 1024) return 11;
  308 	if (size <=   4 * 1024) return 12;
  309 	if (size <=   8 * 1024) return 13;
  310 	if (size <=  16 * 1024) return 14;
  311 	if (size <=  32 * 1024) return 15;
  312 	if (size <=  64 * 1024) return 16;
  313 	if (size <= 128 * 1024) return 17;
  314 	if (size <= 256 * 1024) return 18;
  315 	if (size <= 512 * 1024) return 19;
  316 	if (size <= 1024 * 1024) return 20;
  317 	if (size <=  2 * 1024 * 1024) return 21;
  318 	if (size <=  4 * 1024 * 1024) return 22;
  319 	if (size <=  8 * 1024 * 1024) return 23;
  320 	if (size <=  16 * 1024 * 1024) return 24;
  321 	if (size <=  32 * 1024 * 1024) return 25;
  322 	if (size <=  64 * 1024 * 1024) return 26;
  323 	BUG();
  324 
  325 	/* Will never be reached. Needed because the compiler may complain */
  326 	return -1;
  327 }
  328 #endif /* !CONFIG_SLOB */
  329 
  330 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
  331 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
  332 void kmem_cache_free(struct kmem_cache *, void *);
  333 
  334 /*
  335  * Bulk allocation and freeing operations. These are accelerated in an
  336  * allocator specific way to avoid taking locks repeatedly or building
  337  * metadata structures unnecessarily.
  338  *
  339  * Note that interrupts must be enabled when calling these functions.
  340  */
  341 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
  342 int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
  343 
  344 /*
  345  * Caller must not use kfree_bulk() on memory not originally allocated
  346  * by kmalloc(), because the SLOB allocator cannot handle this.
  347  */
  348 static __always_inline void kfree_bulk(size_t size, void **p)
  349 {
  350 	kmem_cache_free_bulk(NULL, size, p);
  351 }
  352 
  353 #ifdef CONFIG_NUMA
  354 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
  355 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
  356 #else
  357 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
  358 {
  359 	return __kmalloc(size, flags);
  360 }
  361 
  362 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
  363 {
  364 	return kmem_cache_alloc(s, flags);
  365 }
  366 #endif
  367 
  368 #ifdef CONFIG_TRACING
  369 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
  370 
  371 #ifdef CONFIG_NUMA
  372 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
  373 					   gfp_t gfpflags,
  374 					   int node, size_t size) __assume_slab_alignment __malloc;
  375 #else
  376 static __always_inline void *
  377 kmem_cache_alloc_node_trace(struct kmem_cache *s,
  378 			      gfp_t gfpflags,
  379 			      int node, size_t size)
  380 {
  381 	return kmem_cache_alloc_trace(s, gfpflags, size);
  382 }
  383 #endif /* CONFIG_NUMA */
  384 
  385 #else /* CONFIG_TRACING */
  386 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
  387 		gfp_t flags, size_t size)
  388 {
  389 	void *ret = kmem_cache_alloc(s, flags);
  390 
  391 	kasan_kmalloc(s, ret, size, flags);
  392 	return ret;
  393 }
  394 
  395 static __always_inline void *
  396 kmem_cache_alloc_node_trace(struct kmem_cache *s,
  397 			      gfp_t gfpflags,
  398 			      int node, size_t size)
  399 {
  400 	void *ret = kmem_cache_alloc_node(s, gfpflags, node);
  401 
  402 	kasan_kmalloc(s, ret, size, gfpflags);
  403 	return ret;
  404 }
  405 #endif /* CONFIG_TRACING */
  406 
  407 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
  408 
  409 #ifdef CONFIG_TRACING
  410 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
  411 #else
  412 static __always_inline void *
  413 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
  414 {
  415 	return kmalloc_order(size, flags, order);
  416 }
  417 #endif
  418 
  419 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
  420 {
  421 	unsigned int order = get_order(size);
  422 	return kmalloc_order_trace(size, flags, order);
  423 }
  424 
  425 /**
  426  * kmalloc - allocate memory
  427  * @size: how many bytes of memory are required.
  428  * @flags: the type of memory to allocate.
  429  *
  430  * kmalloc is the normal method of allocating memory
  431  * for objects smaller than page size in the kernel.
  432  *
  433  * The @flags argument may be one of:
  434  *
  435  * %GFP_USER - Allocate memory on behalf of user.  May sleep.
  436  *
  437  * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
  438  *
  439  * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools.
  440  *   For example, use this inside interrupt handlers.
  441  *
  442  * %GFP_HIGHUSER - Allocate pages from high memory.
  443  *
  444  * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
  445  *
  446  * %GFP_NOFS - Do not make any fs calls while trying to get memory.
  447  *
  448  * %GFP_NOWAIT - Allocation will not sleep.
  449  *
  450  * %__GFP_THISNODE - Allocate node-local memory only.
  451  *
  452  * %GFP_DMA - Allocation suitable for DMA.
  453  *   Should only be used for kmalloc() caches. Otherwise, use a
  454  *   slab created with SLAB_DMA.
  455  *
  456  * Also it is possible to set different flags by OR'ing
  457  * in one or more of the following additional @flags:
  458  *
  459  * %__GFP_COLD - Request cache-cold pages instead of
  460  *   trying to return cache-warm pages.
  461  *
  462  * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
  463  *
  464  * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
  465  *   (think twice before using).
  466  *
  467  * %__GFP_NORETRY - If memory is not immediately available,
  468  *   then give up at once.
  469  *
  470  * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
  471  *
  472  * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
  473  *
  474  * There are other flags available as well, but these are not intended
  475  * for general use, and so are not documented here. For a full list of
  476  * potential flags, always refer to linux/gfp.h.
  477  */
  478 static __always_inline void *kmalloc(size_t size, gfp_t flags)
  479 {
  480 	if (__builtin_constant_p(size)) {
  481 		if (size > KMALLOC_MAX_CACHE_SIZE)
  482 			return kmalloc_large(size, flags);
  483 #ifndef CONFIG_SLOB
  484 		if (!(flags & GFP_DMA)) {
  485 			int index = kmalloc_index(size);
  486 
  487 			if (!index)
  488 				return ZERO_SIZE_PTR;
  489 
  490 			return kmem_cache_alloc_trace(kmalloc_caches[index],
  491 					flags, size);
  492 		}
  493 #endif
  494 	}
  495 	return __kmalloc(size, flags);
  496 }
  497 
  498 /*
  499  * Determine size used for the nth kmalloc cache.
  500  * return size or 0 if a kmalloc cache for that
  501  * size does not exist
  502  */
  503 static __always_inline int kmalloc_size(int n)
  504 {
  505 #ifndef CONFIG_SLOB
  506 	if (n > 2)
  507 		return 1 << n;
  508 
  509 	if (n == 1 && KMALLOC_MIN_SIZE <= 32)
  510 		return 96;
  511 
  512 	if (n == 2 && KMALLOC_MIN_SIZE <= 64)
  513 		return 192;
  514 #endif
  515 	return 0;
  516 }
  517 
  518 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  519 {
  520 #ifndef CONFIG_SLOB
  521 	if (__builtin_constant_p(size) &&
  522 		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
  523 		int i = kmalloc_index(size);
  524 
  525 		if (!i)
  526 			return ZERO_SIZE_PTR;
  527 
  528 		return kmem_cache_alloc_node_trace(kmalloc_caches[i],
  529 						flags, node, size);
  530 	}
  531 #endif
  532 	return __kmalloc_node(size, flags, node);
  533 }
  534 
  535 struct memcg_cache_array {
  536 	struct rcu_head rcu;
  537 	struct kmem_cache *entries[0];
  538 };
  539 
  540 /*
  541  * This is the main placeholder for memcg-related information in kmem caches.
  542  * Both the root cache and the child caches will have it. For the root cache,
  543  * this will hold a dynamically allocated array large enough to hold
  544  * information about the currently limited memcgs in the system. To allow the
  545  * array to be accessed without taking any locks, on relocation we free the old
  546  * version only after a grace period.
  547  *
  548  * Root and child caches hold different metadata.
  549  *
  550  * @root_cache:	Common to root and child caches.  NULL for root, pointer to
  551  *		the root cache for children.
  552  *
  553  * The following fields are specific to root caches.
  554  *
  555  * @memcg_caches: kmemcg ID indexed table of child caches.  This table is
  556  *		used to index child cachces during allocation and cleared
  557  *		early during shutdown.
  558  *
  559  * @root_caches_node: List node for slab_root_caches list.
  560  *
  561  * @children:	List of all child caches.  While the child caches are also
  562  *		reachable through @memcg_caches, a child cache remains on
  563  *		this list until it is actually destroyed.
  564  *
  565  * The following fields are specific to child caches.
  566  *
  567  * @memcg:	Pointer to the memcg this cache belongs to.
  568  *
  569  * @children_node: List node for @root_cache->children list.
  570  *
  571  * @kmem_caches_node: List node for @memcg->kmem_caches list.
  572  */
  573 struct memcg_cache_params {
  574 	struct kmem_cache *root_cache;
  575 	union {
  576 		struct {
  577 			struct memcg_cache_array __rcu *memcg_caches;
  578 			struct list_head __root_caches_node;
  579 			struct list_head children;
  580 		};
  581 		struct {
  582 			struct mem_cgroup *memcg;
  583 			struct list_head children_node;
  584 			struct list_head kmem_caches_node;
  585 
  586 			void (*deact_fn)(struct kmem_cache *);
  587 			union {
  588 				struct rcu_head deact_rcu_head;
  589 				struct work_struct deact_work;
  590 			};
  591 		};
  592 	};
  593 };
  594 
  595 int memcg_update_all_caches(int num_memcgs);
  596 
  597 /**
  598  * kmalloc_array - allocate memory for an array.
  599  * @n: number of elements.
  600  * @size: element size.
  601  * @flags: the type of memory to allocate (see kmalloc).
  602  */
  603 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
  604 {
  605 	if (size != 0 && n > SIZE_MAX / size)
  606 		return NULL;
  607 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
  608 		return kmalloc(n * size, flags);
  609 	return __kmalloc(n * size, flags);
  610 }
  611 
  612 /**
  613  * kcalloc - allocate memory for an array. The memory is set to zero.
  614  * @n: number of elements.
  615  * @size: element size.
  616  * @flags: the type of memory to allocate (see kmalloc).
  617  */
  618 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
  619 {
  620 	return kmalloc_array(n, size, flags | __GFP_ZERO);
  621 }
  622 
  623 /*
  624  * kmalloc_track_caller is a special version of kmalloc that records the
  625  * calling function of the routine calling it for slab leak tracking instead
  626  * of just the calling function (confusing, eh?).
  627  * It's useful when the call to kmalloc comes from a widely-used standard
  628  * allocator where we care about the real place the memory allocation
  629  * request comes from.
  630  */
  631 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
  632 #define kmalloc_track_caller(size, flags) \
  633 	__kmalloc_track_caller(size, flags, _RET_IP_)
  634 
  635 #ifdef CONFIG_NUMA
  636 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
  637 #define kmalloc_node_track_caller(size, flags, node) \
  638 	__kmalloc_node_track_caller(size, flags, node, \
  639 			_RET_IP_)
  640 
  641 #else /* CONFIG_NUMA */
  642 
  643 #define kmalloc_node_track_caller(size, flags, node) \
  644 	kmalloc_track_caller(size, flags)
  645 
  646 #endif /* CONFIG_NUMA */
  647 
  648 /*
  649  * Shortcuts
  650  */
  651 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
  652 {
  653 	return kmem_cache_alloc(k, flags | __GFP_ZERO);
  654 }
  655 
  656 /**
  657  * kzalloc - allocate memory. The memory is set to zero.
  658  * @size: how many bytes of memory are required.
  659  * @flags: the type of memory to allocate (see kmalloc).
  660  */
  661 static inline void *kzalloc(size_t size, gfp_t flags)
  662 {
  663 	return kmalloc(size, flags | __GFP_ZERO);
  664 }
  665 
  666 /**
  667  * kzalloc_node - allocate zeroed memory from a particular memory node.
  668  * @size: how many bytes of memory are required.
  669  * @flags: the type of memory to allocate (see kmalloc).
  670  * @node: memory node from which to allocate
  671  */
  672 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
  673 {
  674 	return kmalloc_node(size, flags | __GFP_ZERO, node);
  675 }
  676 
  677 unsigned int kmem_cache_size(struct kmem_cache *s);
  678 void __init kmem_cache_init_late(void);
  679 
  680 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
  681 int slab_prepare_cpu(unsigned int cpu);
  682 int slab_dead_cpu(unsigned int cpu);
  683 #else
  684 #define slab_prepare_cpu	NULL
  685 #define slab_dead_cpu		NULL
  686 #endif
  687 
  688 #endif	/* _LINUX_SLAB_H */            | 
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
| Ядро | Модуль | Правило | Верификатор | Вердикт | Статус | Время создания | Описание проблемы | 
| linux-4.11-rc1.tar.xz | drivers/net/irda/vlsi_ir.ko | 331_1a | CPAchecker | Bug | Fixed | 2017-03-25 01:55:42 | L0266 | 
Комментарий
Reported: 25 Mar 2017
[В начало]