Bug
        
                          [В начало]
Ошибка # 158
Показать/спрятать трассу ошибок|            Error trace     
         {    20     typedef unsigned char __u8;    23     typedef unsigned short __u16;    25     typedef int __s32;    26     typedef unsigned int __u32;    29     typedef long long __s64;    30     typedef unsigned long long __u64;    15     typedef signed char s8;    16     typedef unsigned char u8;    18     typedef short s16;    19     typedef unsigned short u16;    21     typedef int s32;    22     typedef unsigned int u32;    24     typedef long long s64;    25     typedef unsigned long long u64;    14     typedef long __kernel_long_t;    15     typedef unsigned long __kernel_ulong_t;    27     typedef int __kernel_pid_t;    48     typedef unsigned int __kernel_uid32_t;    49     typedef unsigned int __kernel_gid32_t;    71     typedef __kernel_ulong_t __kernel_size_t;    72     typedef __kernel_long_t __kernel_ssize_t;    87     typedef long long __kernel_loff_t;    88     typedef __kernel_long_t __kernel_time_t;    89     typedef __kernel_long_t __kernel_clock_t;    90     typedef int __kernel_timer_t;    91     typedef int __kernel_clockid_t;   280     struct kernel_symbol {   unsigned long value;   const char *name; } ;    34     struct module ;    12     typedef __u32 __kernel_dev_t;    15     typedef __kernel_dev_t dev_t;    18     typedef unsigned short umode_t;    21     typedef __kernel_pid_t pid_t;    26     typedef __kernel_clockid_t clockid_t;    29     typedef _Bool bool;    31     typedef __kernel_uid32_t uid_t;    32     typedef __kernel_gid32_t gid_t;    45     typedef __kernel_loff_t loff_t;    54     typedef __kernel_size_t size_t;    59     typedef __kernel_ssize_t ssize_t;    69     typedef __kernel_time_t time_t;   102     typedef __s32 int32_t;   108     typedef __u32 uint32_t;   133     typedef unsigned long sector_t;   134     typedef unsigned long blkcnt_t;   152     typedef u64 dma_addr_t;   157     typedef unsigned int gfp_t;   158     typedef unsigned int fmode_t;   161     typedef u64 phys_addr_t;   166     typedef phys_addr_t resource_size_t;   176     struct __anonstruct_atomic_t_6 {   int counter; } ;   176     typedef struct __anonstruct_atomic_t_6 atomic_t;   181     struct __anonstruct_atomic64_t_7 {   long counter; } ;   181     typedef struct __anonstruct_atomic64_t_7 atomic64_t;   182     struct list_head {   struct list_head *next;   struct list_head *prev; } ;   187     struct hlist_node ;   187     struct hlist_head {   struct hlist_node *first; } ;   191     struct hlist_node {   struct hlist_node *next;   struct hlist_node **pprev; } ;   202     struct callback_head {   struct callback_head *next;   void (*func)(struct callback_head *); } ;   115     typedef void (*ctor_fn_t)();   259     struct _ddebug {   const char *modname;   const char *function;   const char *filename;   const char *format;   unsigned int lineno;   unsigned char flags; } ;    58     struct device ;   450     struct file_operations ;   462     struct completion ;   463     struct pt_regs ;   557     struct bug_entry {   int bug_addr_disp;   int file_disp;   unsigned short line;   unsigned short flags; } ;   114     struct timespec ;   115     struct compat_timespec ;   116     struct thread_info {   unsigned long flags; } ;    20     struct __anonstruct_futex_25 {   u32 *uaddr;   u32 val;   u32 flags;   u32 bitset;   u64 time;   u32 *uaddr2; } ;    20     struct __anonstruct_nanosleep_26 {   clockid_t clockid;   struct timespec *rmtp;   struct compat_timespec *compat_rmtp;   u64 expires; } ;    20     struct pollfd ;    20     struct __anonstruct_poll_27 {   struct pollfd *ufds;   int nfds;   int has_timeout;   unsigned long tv_sec;   unsigned long tv_nsec; } ;    20     union __anonunion____missing_field_name_24 {   struct __anonstruct_futex_25 futex;   struct __anonstruct_nanosleep_26 nanosleep;   struct __anonstruct_poll_27 poll; } ;    20     struct restart_block {   long int (*fn)(struct restart_block *);   union __anonunion____missing_field_name_24 __annonCompField4; } ;    39     struct page ;    26     struct task_struct ;    27     struct mm_struct ;   288     struct pt_regs {   unsigned long r15;   unsigned long r14;   unsigned long r13;   unsigned long r12;   unsigned long bp;   unsigned long bx;   unsigned long r11;   unsigned long r10;   unsigned long r9;   unsigned long r8;   unsigned long ax;   unsigned long cx;   unsigned long dx;   unsigned long si;   unsigned long di;   unsigned long orig_ax;   unsigned long ip;   unsigned long cs;   unsigned long flags;   unsigned long sp;   unsigned long ss; } ;    66     struct __anonstruct____missing_field_name_30 {   unsigned int a;   unsigned int b; } ;    66     struct __anonstruct____missing_field_name_31 {   u16 limit0;   u16 base0;   unsigned char base1;   unsigned char type;   unsigned char s;   unsigned char dpl;   unsigned char p;   unsigned char limit;   unsigned char avl;   unsigned char l;   unsigned char d;   unsigned char g;   unsigned char base2; } ;    66     union __anonunion____missing_field_name_29 {   struct __anonstruct____missing_field_name_30 __annonCompField5;   struct __anonstruct____missing_field_name_31 __annonCompField6; } ;    66     struct desc_struct {   union __anonunion____missing_field_name_29 __annonCompField7; } ;    13     typedef unsigned long pteval_t;    14     typedef unsigned long pmdval_t;    16     typedef unsigned long pgdval_t;    17     typedef unsigned long pgprotval_t;    19     struct __anonstruct_pte_t_32 {   pteval_t pte; } ;    19     typedef struct __anonstruct_pte_t_32 pte_t;    21     struct pgprot {   pgprotval_t pgprot; } ;   256     typedef struct pgprot pgprot_t;   258     struct __anonstruct_pgd_t_33 {   pgdval_t pgd; } ;   258     typedef struct __anonstruct_pgd_t_33 pgd_t;   297     struct __anonstruct_pmd_t_35 {   pmdval_t pmd; } ;   297     typedef struct __anonstruct_pmd_t_35 pmd_t;   423     typedef struct page *pgtable_t;   434     struct file ;   445     struct seq_file ;   481     struct thread_struct ;   483     struct cpumask ;    20     struct qspinlock {   atomic_t val; } ;    33     typedef struct qspinlock arch_spinlock_t;    34     struct qrwlock {   atomic_t cnts;   arch_spinlock_t wait_lock; } ;    14     typedef struct qrwlock arch_rwlock_t;   247     struct math_emu_info {   long ___orig_eip;   struct pt_regs *regs; } ;    83     struct static_key {   atomic_t enabled; } ;    23     typedef atomic64_t atomic_long_t;   359     struct cpumask {   unsigned long bits[128U]; } ;    15     typedef struct cpumask cpumask_t;   654     typedef struct cpumask *cpumask_var_t;    22     struct tracepoint_func {   void *func;   void *data;   int prio; } ;    28     struct tracepoint {   const char *name;   struct static_key key;   void (*regfunc)();   void (*unregfunc)();   struct tracepoint_func *funcs; } ;   246     struct fregs_state {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u32 status; } ;    26     struct __anonstruct____missing_field_name_59 {   u64 rip;   u64 rdp; } ;    26     struct __anonstruct____missing_field_name_60 {   u32 fip;   u32 fcs;   u32 foo;   u32 fos; } ;    26     union __anonunion____missing_field_name_58 {   struct __anonstruct____missing_field_name_59 __annonCompField13;   struct __anonstruct____missing_field_name_60 __annonCompField14; } ;    26     union __anonunion____missing_field_name_61 {   u32 padding1[12U];   u32 sw_reserved[12U]; } ;    26     struct fxregs_state {   u16 cwd;   u16 swd;   u16 twd;   u16 fop;   union __anonunion____missing_field_name_58 __annonCompField15;   u32 mxcsr;   u32 mxcsr_mask;   u32 st_space[32U];   u32 xmm_space[64U];   u32 padding[12U];   union __anonunion____missing_field_name_61 __annonCompField16; } ;    66     struct swregs_state {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u8 ftop;   u8 changed;   u8 lookahead;   u8 no_update;   u8 rm;   u8 alimit;   struct math_emu_info *info;   u32 entry_eip; } ;   227     struct xstate_header {   u64 xfeatures;   u64 xcomp_bv;   u64 reserved[6U]; } ;   233     struct xregs_state {   struct fxregs_state i387;   struct xstate_header header;   u8 extended_state_area[0U]; } ;   254     union fpregs_state {   struct fregs_state fsave;   struct fxregs_state fxsave;   struct swregs_state soft;   struct xregs_state xsave;   u8 __padding[4096U]; } ;   271     struct fpu {   unsigned int last_cpu;   unsigned char fpstate_active;   unsigned char fpregs_active;   unsigned char counter;   union fpregs_state state; } ;   169     struct seq_operations ;   372     struct perf_event ;   377     struct __anonstruct_mm_segment_t_73 {   unsigned long seg; } ;   377     typedef struct __anonstruct_mm_segment_t_73 mm_segment_t;   378     struct thread_struct {   struct desc_struct tls_array[3U];   unsigned long sp0;   unsigned long sp;   unsigned short es;   unsigned short ds;   unsigned short fsindex;   unsigned short gsindex;   u32 status;   unsigned long fsbase;   unsigned long gsbase;   struct perf_event *ptrace_bps[4U];   unsigned long debugreg6;   unsigned long ptrace_dr7;   unsigned long cr2;   unsigned long trap_nr;   unsigned long error_code;   unsigned long *io_bitmap_ptr;   unsigned long iopl;   unsigned int io_bitmap_max;   mm_segment_t addr_limit;   unsigned char sig_on_uaccess_err;   unsigned char uaccess_err;   struct fpu fpu; } ;    33     struct lockdep_map ;    55     struct stack_trace {   unsigned int nr_entries;   unsigned int max_entries;   unsigned long *entries;   int skip; } ;    28     struct lockdep_subclass_key {   char __one_byte; } ;    53     struct lock_class_key {   struct lockdep_subclass_key subkeys[8U]; } ;    59     struct lock_class {   struct hlist_node hash_entry;   struct list_head lock_entry;   struct lockdep_subclass_key *key;   unsigned int subclass;   unsigned int dep_gen_id;   unsigned long usage_mask;   struct stack_trace usage_traces[13U];   struct list_head locks_after;   struct list_head locks_before;   unsigned int version;   unsigned long ops;   const char *name;   int name_version;   unsigned long contention_point[4U];   unsigned long contending_point[4U]; } ;   144     struct lockdep_map {   struct lock_class_key *key;   struct lock_class *class_cache[2U];   const char *name;   int cpu;   unsigned long ip; } ;   207     struct held_lock {   u64 prev_chain_key;   unsigned long acquire_ip;   struct lockdep_map *instance;   struct lockdep_map *nest_lock;   u64 waittime_stamp;   u64 holdtime_stamp;   unsigned short class_idx;   unsigned char irq_context;   unsigned char trylock;   unsigned char read;   unsigned char check;   unsigned char hardirqs_off;   unsigned short references;   unsigned int pin_count; } ;   572     struct raw_spinlock {   arch_spinlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    32     typedef struct raw_spinlock raw_spinlock_t;    33     struct __anonstruct____missing_field_name_75 {   u8 __padding[24U];   struct lockdep_map dep_map; } ;    33     union __anonunion____missing_field_name_74 {   struct raw_spinlock rlock;   struct __anonstruct____missing_field_name_75 __annonCompField19; } ;    33     struct spinlock {   union __anonunion____missing_field_name_74 __annonCompField20; } ;    76     typedef struct spinlock spinlock_t;    23     struct __anonstruct_rwlock_t_76 {   arch_rwlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    23     typedef struct __anonstruct_rwlock_t_76 rwlock_t;   416     struct seqcount {   unsigned int sequence;   struct lockdep_map dep_map; } ;    52     typedef struct seqcount seqcount_t;   407     struct __anonstruct_seqlock_t_91 {   struct seqcount seqcount;   spinlock_t lock; } ;   407     typedef struct __anonstruct_seqlock_t_91 seqlock_t;   601     struct timespec {   __kernel_time_t tv_sec;   long tv_nsec; } ;     7     typedef __s64 time64_t;    83     struct user_namespace ;    22     struct __anonstruct_kuid_t_92 {   uid_t val; } ;    22     typedef struct __anonstruct_kuid_t_92 kuid_t;    27     struct __anonstruct_kgid_t_93 {   gid_t val; } ;    27     typedef struct __anonstruct_kgid_t_93 kgid_t;   139     struct kstat {   u64 ino;   dev_t dev;   umode_t mode;   unsigned int nlink;   kuid_t uid;   kgid_t gid;   dev_t rdev;   loff_t size;   struct timespec atime;   struct timespec mtime;   struct timespec ctime;   unsigned long blksize;   unsigned long long blocks; } ;    36     struct vm_area_struct ;    38     struct __wait_queue_head {   spinlock_t lock;   struct list_head task_list; } ;    43     typedef struct __wait_queue_head wait_queue_head_t;    97     struct __anonstruct_nodemask_t_94 {   unsigned long bits[16U]; } ;    97     typedef struct __anonstruct_nodemask_t_94 nodemask_t;   249     typedef unsigned int isolate_mode_t;    13     struct optimistic_spin_queue {   atomic_t tail; } ;    39     struct mutex {   atomic_t count;   spinlock_t wait_lock;   struct list_head wait_list;   struct task_struct *owner;   void *magic;   struct lockdep_map dep_map; } ;    67     struct mutex_waiter {   struct list_head list;   struct task_struct *task;   void *magic; } ;   177     struct rw_semaphore ;   178     struct rw_semaphore {   atomic_long_t count;   struct list_head wait_list;   raw_spinlock_t wait_lock;   struct optimistic_spin_queue osq;   struct task_struct *owner;   struct lockdep_map dep_map; } ;   178     struct completion {   unsigned int done;   wait_queue_head_t wait; } ;   450     union ktime {   s64 tv64; } ;    41     typedef union ktime ktime_t;  1145     struct timer_list {   struct hlist_node entry;   unsigned long expires;   void (*function)(unsigned long);   unsigned long data;   u32 flags;   int start_pid;   void *start_site;   char start_comm[16U];   struct lockdep_map lockdep_map; } ;   254     struct hrtimer ;   255     enum hrtimer_restart ;   256     struct rb_node {   unsigned long __rb_parent_color;   struct rb_node *rb_right;   struct rb_node *rb_left; } ;    41     struct rb_root {   struct rb_node *rb_node; } ;   835     struct nsproxy ;   278     struct workqueue_struct ;   279     struct work_struct ;    54     struct work_struct {   atomic_long_t data;   struct list_head entry;   void (*func)(struct work_struct *);   struct lockdep_map lockdep_map; } ;   107     struct delayed_work {   struct work_struct work;   struct timer_list timer;   struct workqueue_struct *wq;   int cpu; } ;   268     struct notifier_block ;    53     struct notifier_block {   int (*notifier_call)(struct notifier_block *, unsigned long, void *);   struct notifier_block *next;   int priority; } ;   217     struct resource ;    66     struct resource {   resource_size_t start;   resource_size_t end;   const char *name;   unsigned long flags;   unsigned long desc;   struct resource *parent;   struct resource *sibling;   struct resource *child; } ;    58     struct pm_message {   int event; } ;    64     typedef struct pm_message pm_message_t;    65     struct dev_pm_ops {   int (*prepare)(struct device *);   void (*complete)(struct device *);   int (*suspend)(struct device *);   int (*resume)(struct device *);   int (*freeze)(struct device *);   int (*thaw)(struct device *);   int (*poweroff)(struct device *);   int (*restore)(struct device *);   int (*suspend_late)(struct device *);   int (*resume_early)(struct device *);   int (*freeze_late)(struct device *);   int (*thaw_early)(struct device *);   int (*poweroff_late)(struct device *);   int (*restore_early)(struct device *);   int (*suspend_noirq)(struct device *);   int (*resume_noirq)(struct device *);   int (*freeze_noirq)(struct device *);   int (*thaw_noirq)(struct device *);   int (*poweroff_noirq)(struct device *);   int (*restore_noirq)(struct device *);   int (*runtime_suspend)(struct device *);   int (*runtime_resume)(struct device *);   int (*runtime_idle)(struct device *); } ;   320     enum rpm_status {   RPM_ACTIVE = 0,   RPM_RESUMING = 1,   RPM_SUSPENDED = 2,   RPM_SUSPENDING = 3 } ;   327     enum rpm_request {   RPM_REQ_NONE = 0,   RPM_REQ_IDLE = 1,   RPM_REQ_SUSPEND = 2,   RPM_REQ_AUTOSUSPEND = 3,   RPM_REQ_RESUME = 4 } ;   335     struct wakeup_source ;   336     struct wake_irq ;   337     struct pm_domain_data ;   338     struct pm_subsys_data {   spinlock_t lock;   unsigned int refcount;   struct list_head clock_list;   struct pm_domain_data *domain_data; } ;   556     struct dev_pm_qos ;   556     struct dev_pm_info {   pm_message_t power_state;   unsigned char can_wakeup;   unsigned char async_suspend;   bool is_prepared;   bool is_suspended;   bool is_noirq_suspended;   bool is_late_suspended;   bool early_init;   bool direct_complete;   spinlock_t lock;   struct list_head entry;   struct completion completion;   struct wakeup_source *wakeup;   bool wakeup_path;   bool syscore;   bool no_pm_callbacks;   struct timer_list suspend_timer;   unsigned long timer_expires;   struct work_struct work;   wait_queue_head_t wait_queue;   struct wake_irq *wakeirq;   atomic_t usage_count;   atomic_t child_count;   unsigned char disable_depth;   unsigned char idle_notification;   unsigned char request_pending;   unsigned char deferred_resume;   unsigned char run_wake;   unsigned char runtime_auto;   bool ignore_children;   unsigned char no_callbacks;   unsigned char irq_safe;   unsigned char use_autosuspend;   unsigned char timer_autosuspends;   unsigned char memalloc_noio;   enum rpm_request request;   enum rpm_status runtime_status;   int runtime_error;   int autosuspend_delay;   unsigned long last_busy;   unsigned long active_jiffies;   unsigned long suspended_jiffies;   unsigned long accounting_timestamp;   struct pm_subsys_data *subsys_data;   void (*set_latency_tolerance)(struct device *, s32 );   struct dev_pm_qos *qos; } ;   616     struct dev_pm_domain {   struct dev_pm_ops ops;   void (*detach)(struct device *, bool );   int (*activate)(struct device *);   void (*sync)(struct device *);   void (*dismiss)(struct device *); } ;    34     struct ldt_struct ;    34     struct vdso_image ;    34     struct __anonstruct_mm_context_t_165 {   struct ldt_struct *ldt;   unsigned short ia32_compat;   struct mutex lock;   void *vdso;   const struct vdso_image *vdso_image;   atomic_t perf_rdpmc_allowed;   u16 pkey_allocation_map;   s16 execute_only_pkey; } ;    34     typedef struct __anonstruct_mm_context_t_165 mm_context_t;  1290     struct llist_node ;    64     struct llist_node {   struct llist_node *next; } ;    37     struct cred ;    19     struct inode ;    58     struct arch_uprobe_task {   unsigned long saved_scratch_register;   unsigned int saved_trap_nr;   unsigned int saved_tf; } ;    66     enum uprobe_task_state {   UTASK_RUNNING = 0,   UTASK_SSTEP = 1,   UTASK_SSTEP_ACK = 2,   UTASK_SSTEP_TRAPPED = 3 } ;    73     struct __anonstruct____missing_field_name_211 {   struct arch_uprobe_task autask;   unsigned long vaddr; } ;    73     struct __anonstruct____missing_field_name_212 {   struct callback_head dup_xol_work;   unsigned long dup_xol_addr; } ;    73     union __anonunion____missing_field_name_210 {   struct __anonstruct____missing_field_name_211 __annonCompField35;   struct __anonstruct____missing_field_name_212 __annonCompField36; } ;    73     struct uprobe ;    73     struct return_instance ;    73     struct uprobe_task {   enum uprobe_task_state state;   union __anonunion____missing_field_name_210 __annonCompField37;   struct uprobe *active_uprobe;   unsigned long xol_vaddr;   struct return_instance *return_instances;   unsigned int depth; } ;    94     struct return_instance {   struct uprobe *uprobe;   unsigned long func;   unsigned long stack;   unsigned long orig_ret_vaddr;   bool chained;   struct return_instance *next; } ;   110     struct xol_area ;   111     struct uprobes_state {   struct xol_area *xol_area; } ;   150     struct address_space ;   151     struct mem_cgroup ;   152     union __anonunion____missing_field_name_213 {   struct address_space *mapping;   void *s_mem;   atomic_t compound_mapcount; } ;   152     union __anonunion____missing_field_name_214 {   unsigned long index;   void *freelist; } ;   152     struct __anonstruct____missing_field_name_218 {   unsigned short inuse;   unsigned short objects;   unsigned char frozen; } ;   152     union __anonunion____missing_field_name_217 {   atomic_t _mapcount;   unsigned int active;   struct __anonstruct____missing_field_name_218 __annonCompField40;   int units; } ;   152     struct __anonstruct____missing_field_name_216 {   union __anonunion____missing_field_name_217 __annonCompField41;   atomic_t _refcount; } ;   152     union __anonunion____missing_field_name_215 {   unsigned long counters;   struct __anonstruct____missing_field_name_216 __annonCompField42; } ;   152     struct dev_pagemap ;   152     struct __anonstruct____missing_field_name_220 {   struct page *next;   int pages;   int pobjects; } ;   152     struct __anonstruct____missing_field_name_221 {   unsigned long compound_head;   unsigned int compound_dtor;   unsigned int compound_order; } ;   152     struct __anonstruct____missing_field_name_222 {   unsigned long __pad;   pgtable_t pmd_huge_pte; } ;   152     union __anonunion____missing_field_name_219 {   struct list_head lru;   struct dev_pagemap *pgmap;   struct __anonstruct____missing_field_name_220 __annonCompField44;   struct callback_head callback_head;   struct __anonstruct____missing_field_name_221 __annonCompField45;   struct __anonstruct____missing_field_name_222 __annonCompField46; } ;   152     struct kmem_cache ;   152     union __anonunion____missing_field_name_223 {   unsigned long private;   spinlock_t *ptl;   struct kmem_cache *slab_cache; } ;   152     struct page {   unsigned long flags;   union __anonunion____missing_field_name_213 __annonCompField38;   union __anonunion____missing_field_name_214 __annonCompField39;   union __anonunion____missing_field_name_215 __annonCompField43;   union __anonunion____missing_field_name_219 __annonCompField47;   union __anonunion____missing_field_name_223 __annonCompField48;   struct mem_cgroup *mem_cgroup; } ;   197     struct page_frag {   struct page *page;   __u32 offset;   __u32 size; } ;   282     struct userfaultfd_ctx ;   282     struct vm_userfaultfd_ctx {   struct userfaultfd_ctx *ctx; } ;   289     struct __anonstruct_shared_224 {   struct rb_node rb;   unsigned long rb_subtree_last; } ;   289     struct anon_vma ;   289     struct vm_operations_struct ;   289     struct mempolicy ;   289     struct vm_area_struct {   unsigned long vm_start;   unsigned long vm_end;   struct vm_area_struct *vm_next;   struct vm_area_struct *vm_prev;   struct rb_node vm_rb;   unsigned long rb_subtree_gap;   struct mm_struct *vm_mm;   pgprot_t vm_page_prot;   unsigned long vm_flags;   struct __anonstruct_shared_224 shared;   struct list_head anon_vma_chain;   struct anon_vma *anon_vma;   const struct vm_operations_struct *vm_ops;   unsigned long vm_pgoff;   struct file *vm_file;   void *vm_private_data;   struct mempolicy *vm_policy;   struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;   362     struct core_thread {   struct task_struct *task;   struct core_thread *next; } ;   367     struct core_state {   atomic_t nr_threads;   struct core_thread dumper;   struct completion startup; } ;   381     struct task_rss_stat {   int events;   int count[4U]; } ;   389     struct mm_rss_stat {   atomic_long_t count[4U]; } ;   394     struct kioctx_table ;   395     struct linux_binfmt ;   395     struct mmu_notifier_mm ;   395     struct mm_struct {   struct vm_area_struct *mmap;   struct rb_root mm_rb;   u32 vmacache_seqnum;   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   unsigned long mmap_base;   unsigned long mmap_legacy_base;   unsigned long task_size;   unsigned long highest_vm_end;   pgd_t *pgd;   atomic_t mm_users;   atomic_t mm_count;   atomic_long_t nr_ptes;   atomic_long_t nr_pmds;   int map_count;   spinlock_t page_table_lock;   struct rw_semaphore mmap_sem;   struct list_head mmlist;   unsigned long hiwater_rss;   unsigned long hiwater_vm;   unsigned long total_vm;   unsigned long locked_vm;   unsigned long pinned_vm;   unsigned long data_vm;   unsigned long exec_vm;   unsigned long stack_vm;   unsigned long def_flags;   unsigned long start_code;   unsigned long end_code;   unsigned long start_data;   unsigned long end_data;   unsigned long start_brk;   unsigned long brk;   unsigned long start_stack;   unsigned long arg_start;   unsigned long arg_end;   unsigned long env_start;   unsigned long env_end;   unsigned long saved_auxv[46U];   struct mm_rss_stat rss_stat;   struct linux_binfmt *binfmt;   cpumask_var_t cpu_vm_mask_var;   mm_context_t context;   unsigned long flags;   struct core_state *core_state;   spinlock_t ioctx_lock;   struct kioctx_table *ioctx_table;   struct task_struct *owner;   struct file *exe_file;   struct mmu_notifier_mm *mmu_notifier_mm;   struct cpumask cpumask_allocation;   unsigned long numa_next_scan;   unsigned long numa_scan_offset;   int numa_scan_seq;   bool tlb_flush_pending;   struct uprobes_state uprobes_state;   void *bd_addr;   atomic_long_t hugetlb_usage;   struct work_struct async_put_work; } ;   563     struct vm_fault ;   617     struct vdso_image {   void *data;   unsigned long size;   unsigned long alt;   unsigned long alt_len;   long sym_vvar_start;   long sym_vvar_page;   long sym_hpet_page;   long sym_pvclock_page;   long sym_VDSO32_NOTE_MASK;   long sym___kernel_sigreturn;   long sym___kernel_rt_sigreturn;   long sym___kernel_vsyscall;   long sym_int80_landing_pad; } ;    15     typedef __u64 Elf64_Addr;    16     typedef __u16 Elf64_Half;    18     typedef __u64 Elf64_Off;    20     typedef __u32 Elf64_Word;    21     typedef __u64 Elf64_Xword;   190     struct elf64_sym {   Elf64_Word st_name;   unsigned char st_info;   unsigned char st_other;   Elf64_Half st_shndx;   Elf64_Addr st_value;   Elf64_Xword st_size; } ;   198     typedef struct elf64_sym Elf64_Sym;   219     struct elf64_hdr {   unsigned char e_ident[16U];   Elf64_Half e_type;   Elf64_Half e_machine;   Elf64_Word e_version;   Elf64_Addr e_entry;   Elf64_Off e_phoff;   Elf64_Off e_shoff;   Elf64_Word e_flags;   Elf64_Half e_ehsize;   Elf64_Half e_phentsize;   Elf64_Half e_phnum;   Elf64_Half e_shentsize;   Elf64_Half e_shnum;   Elf64_Half e_shstrndx; } ;   235     typedef struct elf64_hdr Elf64_Ehdr;   314     struct elf64_shdr {   Elf64_Word sh_name;   Elf64_Word sh_type;   Elf64_Xword sh_flags;   Elf64_Addr sh_addr;   Elf64_Off sh_offset;   Elf64_Xword sh_size;   Elf64_Word sh_link;   Elf64_Word sh_info;   Elf64_Xword sh_addralign;   Elf64_Xword sh_entsize; } ;   326     typedef struct elf64_shdr Elf64_Shdr;    53     union __anonunion____missing_field_name_229 {   unsigned long bitmap[4U];   struct callback_head callback_head; } ;    53     struct idr_layer {   int prefix;   int layer;   struct idr_layer *ary[256U];   int count;   union __anonunion____missing_field_name_229 __annonCompField49; } ;    41     struct idr {   struct idr_layer *hint;   struct idr_layer *top;   int layers;   int cur;   spinlock_t lock;   int id_free_cnt;   struct idr_layer *id_free; } ;   124     struct ida_bitmap {   long nr_busy;   unsigned long bitmap[15U]; } ;   167     struct ida {   struct idr idr;   struct ida_bitmap *free_bitmap; } ;   199     struct dentry ;   200     struct iattr ;   201     struct super_block ;   202     struct file_system_type ;   203     struct kernfs_open_node ;   204     struct kernfs_iattrs ;   227     struct kernfs_root ;   227     struct kernfs_elem_dir {   unsigned long subdirs;   struct rb_root children;   struct kernfs_root *root; } ;    85     struct kernfs_node ;    85     struct kernfs_elem_symlink {   struct kernfs_node *target_kn; } ;    89     struct kernfs_ops ;    89     struct kernfs_elem_attr {   const struct kernfs_ops *ops;   struct kernfs_open_node *open;   loff_t size;   struct kernfs_node *notify_next; } ;    96     union __anonunion____missing_field_name_234 {   struct kernfs_elem_dir dir;   struct kernfs_elem_symlink symlink;   struct kernfs_elem_attr attr; } ;    96     struct kernfs_node {   atomic_t count;   atomic_t active;   struct lockdep_map dep_map;   struct kernfs_node *parent;   const char *name;   struct rb_node rb;   const void *ns;   unsigned int hash;   union __anonunion____missing_field_name_234 __annonCompField50;   void *priv;   unsigned short flags;   umode_t mode;   unsigned int ino;   struct kernfs_iattrs *iattr; } ;   138     struct kernfs_syscall_ops {   int (*remount_fs)(struct kernfs_root *, int *, char *);   int (*show_options)(struct seq_file *, struct kernfs_root *);   int (*mkdir)(struct kernfs_node *, const char *, umode_t );   int (*rmdir)(struct kernfs_node *);   int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *);   int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ;   157     struct kernfs_root {   struct kernfs_node *kn;   unsigned int flags;   struct ida ino_ida;   struct kernfs_syscall_ops *syscall_ops;   struct list_head supers;   wait_queue_head_t deactivate_waitq; } ;   173     struct kernfs_open_file {   struct kernfs_node *kn;   struct file *file;   void *priv;   struct mutex mutex;   struct mutex prealloc_mutex;   int event;   struct list_head list;   char *prealloc_buf;   size_t atomic_write_len;   bool mmapped;   const struct vm_operations_struct *vm_ops; } ;   191     struct kernfs_ops {   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   ssize_t  (*read)(struct kernfs_open_file *, char *, size_t , loff_t );   size_t atomic_write_len;   bool prealloc;   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *);   struct lock_class_key lockdep_key; } ;   511     struct sock ;   512     struct kobject ;   513     enum kobj_ns_type {   KOBJ_NS_TYPE_NONE = 0,   KOBJ_NS_TYPE_NET = 1,   KOBJ_NS_TYPES = 2 } ;   519     struct kobj_ns_type_operations {   enum kobj_ns_type type;   bool  (*current_may_mount)();   void * (*grab_current_ns)();   const void * (*netlink_ns)(struct sock *);   const void * (*initial_ns)();   void (*drop_ns)(void *); } ;    59     struct bin_attribute ;    60     struct attribute {   const char *name;   umode_t mode;   bool ignore_lockdep;   struct lock_class_key *key;   struct lock_class_key skey; } ;    37     struct attribute_group {   const char *name;   umode_t  (*is_visible)(struct kobject *, struct attribute *, int);   umode_t  (*is_bin_visible)(struct kobject *, struct bin_attribute *, int);   struct attribute **attrs;   struct bin_attribute **bin_attrs; } ;    92     struct bin_attribute {   struct attribute attr;   size_t size;   void *private;   ssize_t  (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   ssize_t  (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;   165     struct sysfs_ops {   ssize_t  (*show)(struct kobject *, struct attribute *, char *);   ssize_t  (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;   530     struct kref {   atomic_t refcount; } ;    52     struct kset ;    52     struct kobj_type ;    52     struct kobject {   const char *name;   struct list_head entry;   struct kobject *parent;   struct kset *kset;   struct kobj_type *ktype;   struct kernfs_node *sd;   struct kref kref;   struct delayed_work release;   unsigned char state_initialized;   unsigned char state_in_sysfs;   unsigned char state_add_uevent_sent;   unsigned char state_remove_uevent_sent;   unsigned char uevent_suppress; } ;   115     struct kobj_type {   void (*release)(struct kobject *);   const struct sysfs_ops *sysfs_ops;   struct attribute **default_attrs;   const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *);   const void * (*namespace)(struct kobject *); } ;   123     struct kobj_uevent_env {   char *argv[3U];   char *envp[32U];   int envp_idx;   char buf[2048U];   int buflen; } ;   131     struct kset_uevent_ops {   const int (*filter)(struct kset *, struct kobject *);   const const char * (*name)(struct kset *, struct kobject *);   const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;   148     struct kset {   struct list_head list;   spinlock_t list_lock;   struct kobject kobj;   const struct kset_uevent_ops *uevent_ops; } ;   223     struct kernel_param ;   228     struct kernel_param_ops {   unsigned int flags;   int (*set)(const char *, const struct kernel_param *);   int (*get)(char *, const struct kernel_param *);   void (*free)(void *); } ;    62     struct kparam_string ;    62     struct kparam_array ;    62     union __anonunion____missing_field_name_237 {   void *arg;   const struct kparam_string *str;   const struct kparam_array *arr; } ;    62     struct kernel_param {   const char *name;   struct module *mod;   const struct kernel_param_ops *ops;   const u16 perm;   s8 level;   u8 flags;   union __anonunion____missing_field_name_237 __annonCompField51; } ;    83     struct kparam_string {   unsigned int maxlen;   char *string; } ;    89     struct kparam_array {   unsigned int max;   unsigned int elemsize;   unsigned int *num;   const struct kernel_param_ops *ops;   void *elem; } ;   470     struct exception_table_entry ;    24     struct latch_tree_node {   struct rb_node node[2U]; } ;   211     struct mod_arch_specific { } ;    39     struct module_param_attrs ;    39     struct module_kobject {   struct kobject kobj;   struct module *mod;   struct kobject *drivers_dir;   struct module_param_attrs *mp;   struct completion *kobj_completion; } ;    50     struct module_attribute {   struct attribute attr;   ssize_t  (*show)(struct module_attribute *, struct module_kobject *, char *);   ssize_t  (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t );   void (*setup)(struct module *, const char *);   int (*test)(struct module *);   void (*free)(struct module *); } ;   277     enum module_state {   MODULE_STATE_LIVE = 0,   MODULE_STATE_COMING = 1,   MODULE_STATE_GOING = 2,   MODULE_STATE_UNFORMED = 3 } ;   284     struct mod_tree_node {   struct module *mod;   struct latch_tree_node node; } ;   291     struct module_layout {   void *base;   unsigned int size;   unsigned int text_size;   unsigned int ro_size;   unsigned int ro_after_init_size;   struct mod_tree_node mtn; } ;   307     struct mod_kallsyms {   Elf64_Sym *symtab;   unsigned int num_symtab;   char *strtab; } ;   321     struct klp_modinfo {   Elf64_Ehdr hdr;   Elf64_Shdr *sechdrs;   char *secstrings;   unsigned int symndx; } ;   329     struct module_sect_attrs ;   329     struct module_notes_attrs ;   329     struct trace_event_call ;   329     struct trace_enum_map ;   329     struct module {   enum module_state state;   struct list_head list;   char name[56U];   struct module_kobject mkobj;   struct module_attribute *modinfo_attrs;   const char *version;   const char *srcversion;   struct kobject *holders_dir;   const struct kernel_symbol *syms;   const unsigned long *crcs;   unsigned int num_syms;   struct mutex param_lock;   struct kernel_param *kp;   unsigned int num_kp;   unsigned int num_gpl_syms;   const struct kernel_symbol *gpl_syms;   const unsigned long *gpl_crcs;   const struct kernel_symbol *unused_syms;   const unsigned long *unused_crcs;   unsigned int num_unused_syms;   unsigned int num_unused_gpl_syms;   const struct kernel_symbol *unused_gpl_syms;   const unsigned long *unused_gpl_crcs;   bool sig_ok;   bool async_probe_requested;   const struct kernel_symbol *gpl_future_syms;   const unsigned long *gpl_future_crcs;   unsigned int num_gpl_future_syms;   unsigned int num_exentries;   struct exception_table_entry *extable;   int (*init)();   struct module_layout core_layout;   struct module_layout init_layout;   struct mod_arch_specific arch;   unsigned int taints;   unsigned int num_bugs;   struct list_head bug_list;   struct bug_entry *bug_table;   struct mod_kallsyms *kallsyms;   struct mod_kallsyms core_kallsyms;   struct module_sect_attrs *sect_attrs;   struct module_notes_attrs *notes_attrs;   char *args;   void *percpu;   unsigned int percpu_size;   unsigned int num_tracepoints;   const struct tracepoint **tracepoints_ptrs;   unsigned int num_trace_bprintk_fmt;   const char **trace_bprintk_fmt_start;   struct trace_event_call **trace_events;   unsigned int num_trace_events;   struct trace_enum_map **trace_enums;   unsigned int num_trace_enums;   bool klp;   bool klp_alive;   struct klp_modinfo *klp_info;   struct list_head source_list;   struct list_head target_list;   void (*exit)();   atomic_t refcnt;   ctor_fn_t  (**ctors)();   unsigned int num_ctors; } ;   799     struct klist_node ;    37     struct klist_node {   void *n_klist;   struct list_head n_node;   struct kref n_ref; } ;    93     struct hlist_bl_node ;    93     struct hlist_bl_head {   struct hlist_bl_node *first; } ;    36     struct hlist_bl_node {   struct hlist_bl_node *next;   struct hlist_bl_node **pprev; } ;   114     struct __anonstruct____missing_field_name_287 {   spinlock_t lock;   int count; } ;   114     union __anonunion____missing_field_name_286 {   struct __anonstruct____missing_field_name_287 __annonCompField52; } ;   114     struct lockref {   union __anonunion____missing_field_name_286 __annonCompField53; } ;    77     struct path ;    78     struct vfsmount ;    79     struct __anonstruct____missing_field_name_289 {   u32 hash;   u32 len; } ;    79     union __anonunion____missing_field_name_288 {   struct __anonstruct____missing_field_name_289 __annonCompField54;   u64 hash_len; } ;    79     struct qstr {   union __anonunion____missing_field_name_288 __annonCompField55;   const unsigned char *name; } ;    65     struct dentry_operations ;    65     union __anonunion____missing_field_name_290 {   struct list_head d_lru;   wait_queue_head_t *d_wait; } ;    65     union __anonunion_d_u_291 {   struct hlist_node d_alias;   struct hlist_bl_node d_in_lookup_hash;   struct callback_head d_rcu; } ;    65     struct dentry {   unsigned int d_flags;   seqcount_t d_seq;   struct hlist_bl_node d_hash;   struct dentry *d_parent;   struct qstr d_name;   struct inode *d_inode;   unsigned char d_iname[32U];   struct lockref d_lockref;   const struct dentry_operations *d_op;   struct super_block *d_sb;   unsigned long d_time;   void *d_fsdata;   union __anonunion____missing_field_name_290 __annonCompField56;   struct list_head d_child;   struct list_head d_subdirs;   union __anonunion_d_u_291 d_u; } ;   121     struct dentry_operations {   int (*d_revalidate)(struct dentry *, unsigned int);   int (*d_weak_revalidate)(struct dentry *, unsigned int);   int (*d_hash)(const struct dentry *, struct qstr *);   int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *);   int (*d_delete)(const struct dentry *);   int (*d_init)(struct dentry *);   void (*d_release)(struct dentry *);   void (*d_prune)(struct dentry *);   void (*d_iput)(struct dentry *, struct inode *);   char * (*d_dname)(struct dentry *, char *, int);   struct vfsmount * (*d_automount)(struct path *);   int (*d_manage)(struct dentry *, bool );   struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ;   592     struct path {   struct vfsmount *mnt;   struct dentry *dentry; } ;    19     struct shrink_control {   gfp_t gfp_mask;   unsigned long nr_to_scan;   int nid;   struct mem_cgroup *memcg; } ;    27     struct shrinker {   unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *);   unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *);   int seeks;   long batch;   unsigned long flags;   struct list_head list;   atomic_long_t *nr_deferred; } ;    80     struct list_lru_one {   struct list_head list;   long nr_items; } ;    32     struct list_lru_memcg {   struct list_lru_one *lru[0U]; } ;    37     struct list_lru_node {   spinlock_t lock;   struct list_lru_one lru;   struct list_lru_memcg *memcg_lrus; } ;    47     struct list_lru {   struct list_lru_node *node;   struct list_head list; } ;    63     struct __anonstruct____missing_field_name_293 {   struct radix_tree_node *parent;   void *private_data; } ;    63     union __anonunion____missing_field_name_292 {   struct __anonstruct____missing_field_name_293 __annonCompField57;   struct callback_head callback_head; } ;    63     struct radix_tree_node {   unsigned char shift;   unsigned char offset;   unsigned int count;   union __anonunion____missing_field_name_292 __annonCompField58;   struct list_head private_list;   void *slots[64U];   unsigned long tags[3U][1U]; } ;   106     struct radix_tree_root {   gfp_t gfp_mask;   struct radix_tree_node *rnode; } ;   531     enum pid_type {   PIDTYPE_PID = 0,   PIDTYPE_PGID = 1,   PIDTYPE_SID = 2,   PIDTYPE_MAX = 3 } ;   538     struct pid_namespace ;   538     struct upid {   int nr;   struct pid_namespace *ns;   struct hlist_node pid_chain; } ;    56     struct pid {   atomic_t count;   unsigned int level;   struct hlist_head tasks[3U];   struct callback_head rcu;   struct upid numbers[1U]; } ;    68     struct pid_link {   struct hlist_node node;   struct pid *pid; } ;    22     struct kernel_cap_struct {   __u32 cap[2U]; } ;    25     typedef struct kernel_cap_struct kernel_cap_t;    45     struct fiemap_extent {   __u64 fe_logical;   __u64 fe_physical;   __u64 fe_length;   __u64 fe_reserved64[2U];   __u32 fe_flags;   __u32 fe_reserved[3U]; } ;    38     enum migrate_mode {   MIGRATE_ASYNC = 0,   MIGRATE_SYNC_LIGHT = 1,   MIGRATE_SYNC = 2 } ;    44     enum rcu_sync_type {   RCU_SYNC = 0,   RCU_SCHED_SYNC = 1,   RCU_BH_SYNC = 2 } ;    50     struct rcu_sync {   int gp_state;   int gp_count;   wait_queue_head_t gp_wait;   int cb_state;   struct callback_head cb_head;   enum rcu_sync_type gp_type; } ;    66     struct percpu_rw_semaphore {   struct rcu_sync rss;   unsigned int *read_count;   struct rw_semaphore rw_sem;   wait_queue_head_t writer;   int readers_block; } ;    87     struct block_device ;    88     struct io_context ;    89     struct cgroup_subsys_state ;   273     struct delayed_call {   void (*fn)(void *);   void *arg; } ;   264     struct backing_dev_info ;   265     struct bdi_writeback ;   266     struct export_operations ;   269     struct kiocb ;   270     struct pipe_inode_info ;   271     struct poll_table_struct ;   272     struct kstatfs ;   273     struct swap_info_struct ;   274     struct iov_iter ;   275     struct fscrypt_info ;   276     struct fscrypt_operations ;    76     struct iattr {   unsigned int ia_valid;   umode_t ia_mode;   kuid_t ia_uid;   kgid_t ia_gid;   loff_t ia_size;   struct timespec ia_atime;   struct timespec ia_mtime;   struct timespec ia_ctime;   struct file *ia_file; } ;   213     struct dquot ;   214     struct kqid ;    19     typedef __kernel_uid32_t projid_t;    23     struct __anonstruct_kprojid_t_302 {   projid_t val; } ;    23     typedef struct __anonstruct_kprojid_t_302 kprojid_t;   181     enum quota_type {   USRQUOTA = 0,   GRPQUOTA = 1,   PRJQUOTA = 2 } ;    66     typedef long long qsize_t;    67     union __anonunion____missing_field_name_303 {   kuid_t uid;   kgid_t gid;   kprojid_t projid; } ;    67     struct kqid {   union __anonunion____missing_field_name_303 __annonCompField60;   enum quota_type type; } ;   194     struct mem_dqblk {   qsize_t dqb_bhardlimit;   qsize_t dqb_bsoftlimit;   qsize_t dqb_curspace;   qsize_t dqb_rsvspace;   qsize_t dqb_ihardlimit;   qsize_t dqb_isoftlimit;   qsize_t dqb_curinodes;   time64_t dqb_btime;   time64_t dqb_itime; } ;   216     struct quota_format_type ;   217     struct mem_dqinfo {   struct quota_format_type *dqi_format;   int dqi_fmt_id;   struct list_head dqi_dirty_list;   unsigned long dqi_flags;   unsigned int dqi_bgrace;   unsigned int dqi_igrace;   qsize_t dqi_max_spc_limit;   qsize_t dqi_max_ino_limit;   void *dqi_priv; } ;   282     struct dquot {   struct hlist_node dq_hash;   struct list_head dq_inuse;   struct list_head dq_free;   struct list_head dq_dirty;   struct mutex dq_lock;   atomic_t dq_count;   wait_queue_head_t dq_wait_unused;   struct super_block *dq_sb;   struct kqid dq_id;   loff_t dq_off;   unsigned long dq_flags;   struct mem_dqblk dq_dqb; } ;   309     struct quota_format_ops {   int (*check_quota_file)(struct super_block *, int);   int (*read_file_info)(struct super_block *, int);   int (*write_file_info)(struct super_block *, int);   int (*free_file_info)(struct super_block *, int);   int (*read_dqblk)(struct dquot *);   int (*commit_dqblk)(struct dquot *);   int (*release_dqblk)(struct dquot *);   int (*get_next_id)(struct super_block *, struct kqid *); } ;   321     struct dquot_operations {   int (*write_dquot)(struct dquot *);   struct dquot * (*alloc_dquot)(struct super_block *, int);   void (*destroy_dquot)(struct dquot *);   int (*acquire_dquot)(struct dquot *);   int (*release_dquot)(struct dquot *);   int (*mark_dirty)(struct dquot *);   int (*write_info)(struct super_block *, int);   qsize_t * (*get_reserved_space)(struct inode *);   int (*get_projid)(struct inode *, kprojid_t *);   int (*get_next_id)(struct super_block *, struct kqid *); } ;   338     struct qc_dqblk {   int d_fieldmask;   u64 d_spc_hardlimit;   u64 d_spc_softlimit;   u64 d_ino_hardlimit;   u64 d_ino_softlimit;   u64 d_space;   u64 d_ino_count;   s64 d_ino_timer;   s64 d_spc_timer;   int d_ino_warns;   int d_spc_warns;   u64 d_rt_spc_hardlimit;   u64 d_rt_spc_softlimit;   u64 d_rt_space;   s64 d_rt_spc_timer;   int d_rt_spc_warns; } ;   361     struct qc_type_state {   unsigned int flags;   unsigned int spc_timelimit;   unsigned int ino_timelimit;   unsigned int rt_spc_timelimit;   unsigned int spc_warnlimit;   unsigned int ino_warnlimit;   unsigned int rt_spc_warnlimit;   unsigned long long ino;   blkcnt_t blocks;   blkcnt_t nextents; } ;   407     struct qc_state {   unsigned int s_incoredqs;   struct qc_type_state s_state[3U]; } ;   418     struct qc_info {   int i_fieldmask;   unsigned int i_flags;   unsigned int i_spc_timelimit;   unsigned int i_ino_timelimit;   unsigned int i_rt_spc_timelimit;   unsigned int i_spc_warnlimit;   unsigned int i_ino_warnlimit;   unsigned int i_rt_spc_warnlimit; } ;   431     struct quotactl_ops {   int (*quota_on)(struct super_block *, int, int, struct path *);   int (*quota_off)(struct super_block *, int);   int (*quota_enable)(struct super_block *, unsigned int);   int (*quota_disable)(struct super_block *, unsigned int);   int (*quota_sync)(struct super_block *, int);   int (*set_info)(struct super_block *, int, struct qc_info *);   int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *);   int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*get_state)(struct super_block *, struct qc_state *);   int (*rm_xquota)(struct super_block *, unsigned int); } ;   447     struct quota_format_type {   int qf_fmt_id;   const struct quota_format_ops *qf_ops;   struct module *qf_owner;   struct quota_format_type *qf_next; } ;   511     struct quota_info {   unsigned int flags;   struct mutex dqio_mutex;   struct mutex dqonoff_mutex;   struct inode *files[3U];   struct mem_dqinfo info[3U];   const struct quota_format_ops *ops[3U]; } ;   541     struct writeback_control ;   542     struct kiocb {   struct file *ki_filp;   loff_t ki_pos;   void (*ki_complete)(struct kiocb *, long, long);   void *private;   int ki_flags; } ;   368     struct address_space_operations {   int (*writepage)(struct page *, struct writeback_control *);   int (*readpage)(struct file *, struct page *);   int (*writepages)(struct address_space *, struct writeback_control *);   int (*set_page_dirty)(struct page *);   int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int);   int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **);   int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *);   sector_t  (*bmap)(struct address_space *, sector_t );   void (*invalidatepage)(struct page *, unsigned int, unsigned int);   int (*releasepage)(struct page *, gfp_t );   void (*freepage)(struct page *);   ssize_t  (*direct_IO)(struct kiocb *, struct iov_iter *);   int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode );   bool  (*isolate_page)(struct page *, isolate_mode_t );   void (*putback_page)(struct page *);   int (*launder_page)(struct page *);   int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);   void (*is_dirty_writeback)(struct page *, bool *, bool *);   int (*error_remove_page)(struct address_space *, struct page *);   int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *);   void (*swap_deactivate)(struct file *); } ;   427     struct address_space {   struct inode *host;   struct radix_tree_root page_tree;   spinlock_t tree_lock;   atomic_t i_mmap_writable;   struct rb_root i_mmap;   struct rw_semaphore i_mmap_rwsem;   unsigned long nrpages;   unsigned long nrexceptional;   unsigned long writeback_index;   const struct address_space_operations *a_ops;   unsigned long flags;   spinlock_t private_lock;   gfp_t gfp_mask;   struct list_head private_list;   void *private_data; } ;   449     struct request_queue ;   450     struct hd_struct ;   450     struct gendisk ;   450     struct block_device {   dev_t bd_dev;   int bd_openers;   struct inode *bd_inode;   struct super_block *bd_super;   struct mutex bd_mutex;   void *bd_claiming;   void *bd_holder;   int bd_holders;   bool bd_write_holder;   struct list_head bd_holder_disks;   struct block_device *bd_contains;   unsigned int bd_block_size;   struct hd_struct *bd_part;   unsigned int bd_part_count;   int bd_invalidated;   struct gendisk *bd_disk;   struct request_queue *bd_queue;   struct list_head bd_list;   unsigned long bd_private;   int bd_fsfreeze_count;   struct mutex bd_fsfreeze_mutex; } ;   565     struct posix_acl ;   592     struct inode_operations ;   592     union __anonunion____missing_field_name_308 {   const unsigned int i_nlink;   unsigned int __i_nlink; } ;   592     union __anonunion____missing_field_name_309 {   struct hlist_head i_dentry;   struct callback_head i_rcu; } ;   592     struct file_lock_context ;   592     struct cdev ;   592     union __anonunion____missing_field_name_310 {   struct pipe_inode_info *i_pipe;   struct block_device *i_bdev;   struct cdev *i_cdev;   char *i_link;   unsigned int i_dir_seq; } ;   592     struct inode {   umode_t i_mode;   unsigned short i_opflags;   kuid_t i_uid;   kgid_t i_gid;   unsigned int i_flags;   struct posix_acl *i_acl;   struct posix_acl *i_default_acl;   const struct inode_operations *i_op;   struct super_block *i_sb;   struct address_space *i_mapping;   void *i_security;   unsigned long i_ino;   union __anonunion____missing_field_name_308 __annonCompField61;   dev_t i_rdev;   loff_t i_size;   struct timespec i_atime;   struct timespec i_mtime;   struct timespec i_ctime;   spinlock_t i_lock;   unsigned short i_bytes;   unsigned int i_blkbits;   blkcnt_t i_blocks;   unsigned long i_state;   struct rw_semaphore i_rwsem;   unsigned long dirtied_when;   unsigned long dirtied_time_when;   struct hlist_node i_hash;   struct list_head i_io_list;   struct bdi_writeback *i_wb;   int i_wb_frn_winner;   u16 i_wb_frn_avg_time;   u16 i_wb_frn_history;   struct list_head i_lru;   struct list_head i_sb_list;   struct list_head i_wb_list;   union __anonunion____missing_field_name_309 __annonCompField62;   u64 i_version;   atomic_t i_count;   atomic_t i_dio_count;   atomic_t i_writecount;   atomic_t i_readcount;   const struct file_operations *i_fop;   struct file_lock_context *i_flctx;   struct address_space i_data;   struct list_head i_devices;   union __anonunion____missing_field_name_310 __annonCompField63;   __u32 i_generation;   __u32 i_fsnotify_mask;   struct hlist_head i_fsnotify_marks;   struct fscrypt_info *i_crypt_info;   void *i_private; } ;   847     struct fown_struct {   rwlock_t lock;   struct pid *pid;   enum pid_type pid_type;   kuid_t uid;   kuid_t euid;   int signum; } ;   855     struct file_ra_state {   unsigned long start;   unsigned int size;   unsigned int async_size;   unsigned int ra_pages;   unsigned int mmap_miss;   loff_t prev_pos; } ;   878     union __anonunion_f_u_311 {   struct llist_node fu_llist;   struct callback_head fu_rcuhead; } ;   878     struct file {   union __anonunion_f_u_311 f_u;   struct path f_path;   struct inode *f_inode;   const struct file_operations *f_op;   spinlock_t f_lock;   atomic_long_t f_count;   unsigned int f_flags;   fmode_t f_mode;   struct mutex f_pos_lock;   loff_t f_pos;   struct fown_struct f_owner;   const struct cred *f_cred;   struct file_ra_state f_ra;   u64 f_version;   void *f_security;   void *private_data;   struct list_head f_ep_links;   struct list_head f_tfile_llink;   struct address_space *f_mapping; } ;   963     typedef void *fl_owner_t;   964     struct file_lock ;   965     struct file_lock_operations {   void (*fl_copy_lock)(struct file_lock *, struct file_lock *);   void (*fl_release_private)(struct file_lock *); } ;   971     struct lock_manager_operations {   int (*lm_compare_owner)(struct file_lock *, struct file_lock *);   unsigned long int (*lm_owner_key)(struct file_lock *);   fl_owner_t  (*lm_get_owner)(fl_owner_t );   void (*lm_put_owner)(fl_owner_t );   void (*lm_notify)(struct file_lock *);   int (*lm_grant)(struct file_lock *, int);   bool  (*lm_break)(struct file_lock *);   int (*lm_change)(struct file_lock *, int, struct list_head *);   void (*lm_setup)(struct file_lock *, void **); } ;   998     struct nlm_lockowner ;   999     struct nfs_lock_info {   u32 state;   struct nlm_lockowner *owner;   struct list_head list; } ;    14     struct nfs4_lock_state ;    15     struct nfs4_lock_info {   struct nfs4_lock_state *owner; } ;    19     struct fasync_struct ;    19     struct __anonstruct_afs_313 {   struct list_head link;   int state; } ;    19     union __anonunion_fl_u_312 {   struct nfs_lock_info nfs_fl;   struct nfs4_lock_info nfs4_fl;   struct __anonstruct_afs_313 afs; } ;    19     struct file_lock {   struct file_lock *fl_next;   struct list_head fl_list;   struct hlist_node fl_link;   struct list_head fl_block;   fl_owner_t fl_owner;   unsigned int fl_flags;   unsigned char fl_type;   unsigned int fl_pid;   int fl_link_cpu;   struct pid *fl_nspid;   wait_queue_head_t fl_wait;   struct file *fl_file;   loff_t fl_start;   loff_t fl_end;   struct fasync_struct *fl_fasync;   unsigned long fl_break_time;   unsigned long fl_downgrade_time;   const struct file_lock_operations *fl_ops;   const struct lock_manager_operations *fl_lmops;   union __anonunion_fl_u_312 fl_u; } ;  1051     struct file_lock_context {   spinlock_t flc_lock;   struct list_head flc_flock;   struct list_head flc_posix;   struct list_head flc_lease; } ;  1118     struct files_struct ;  1271     struct fasync_struct {   spinlock_t fa_lock;   int magic;   int fa_fd;   struct fasync_struct *fa_next;   struct file *fa_file;   struct callback_head fa_rcu; } ;  1306     struct sb_writers {   int frozen;   wait_queue_head_t wait_unfrozen;   struct percpu_rw_semaphore rw_sem[3U]; } ;  1336     struct super_operations ;  1336     struct xattr_handler ;  1336     struct mtd_info ;  1336     struct super_block {   struct list_head s_list;   dev_t s_dev;   unsigned char s_blocksize_bits;   unsigned long s_blocksize;   loff_t s_maxbytes;   struct file_system_type *s_type;   const struct super_operations *s_op;   const struct dquot_operations *dq_op;   const struct quotactl_ops *s_qcop;   const struct export_operations *s_export_op;   unsigned long s_flags;   unsigned long s_iflags;   unsigned long s_magic;   struct dentry *s_root;   struct rw_semaphore s_umount;   int s_count;   atomic_t s_active;   void *s_security;   const struct xattr_handler **s_xattr;   const struct fscrypt_operations *s_cop;   struct hlist_bl_head s_anon;   struct list_head s_mounts;   struct block_device *s_bdev;   struct backing_dev_info *s_bdi;   struct mtd_info *s_mtd;   struct hlist_node s_instances;   unsigned int s_quota_types;   struct quota_info s_dquot;   struct sb_writers s_writers;   char s_id[32U];   u8 s_uuid[16U];   void *s_fs_info;   unsigned int s_max_links;   fmode_t s_mode;   u32 s_time_gran;   struct mutex s_vfs_rename_mutex;   char *s_subtype;   char *s_options;   const struct dentry_operations *s_d_op;   int cleancache_poolid;   struct shrinker s_shrink;   atomic_long_t s_remove_count;   int s_readonly_remount;   struct workqueue_struct *s_dio_done_wq;   struct hlist_head s_pins;   struct user_namespace *s_user_ns;   struct list_lru s_dentry_lru;   struct list_lru s_inode_lru;   struct callback_head rcu;   struct work_struct destroy_work;   struct mutex s_sync_lock;   int s_stack_depth;   spinlock_t s_inode_list_lock;   struct list_head s_inodes;   spinlock_t s_inode_wblist_lock;   struct list_head s_inodes_wb; } ;  1620     struct fiemap_extent_info {   unsigned int fi_flags;   unsigned int fi_extents_mapped;   unsigned int fi_extents_max;   struct fiemap_extent *fi_extents_start; } ;  1633     struct dir_context ;  1658     struct dir_context {   int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int);   loff_t pos; } ;  1665     struct file_operations {   struct module *owner;   loff_t  (*llseek)(struct file *, loff_t , int);   ssize_t  (*read)(struct file *, char *, size_t , loff_t *);   ssize_t  (*write)(struct file *, const char *, size_t , loff_t *);   ssize_t  (*read_iter)(struct kiocb *, struct iov_iter *);   ssize_t  (*write_iter)(struct kiocb *, struct iov_iter *);   int (*iterate)(struct file *, struct dir_context *);   int (*iterate_shared)(struct file *, struct dir_context *);   unsigned int (*poll)(struct file *, struct poll_table_struct *);   long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long);   long int (*compat_ioctl)(struct file *, unsigned int, unsigned long);   int (*mmap)(struct file *, struct vm_area_struct *);   int (*open)(struct inode *, struct file *);   int (*flush)(struct file *, fl_owner_t );   int (*release)(struct inode *, struct file *);   int (*fsync)(struct file *, loff_t , loff_t , int);   int (*aio_fsync)(struct kiocb *, int);   int (*fasync)(int, struct file *, int);   int (*lock)(struct file *, int, struct file_lock *);   ssize_t  (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int);   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   int (*check_flags)(int);   int (*flock)(struct file *, int, struct file_lock *);   ssize_t  (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int);   ssize_t  (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int);   int (*setlease)(struct file *, long, struct file_lock **, void **);   long int (*fallocate)(struct file *, int, loff_t , loff_t );   void (*show_fdinfo)(struct seq_file *, struct file *);   ssize_t  (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int);   int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 );   ssize_t  (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ;  1734     struct inode_operations {   struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int);   const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *);   int (*permission)(struct inode *, int);   struct posix_acl * (*get_acl)(struct inode *, int);   int (*readlink)(struct dentry *, char *, int);   int (*create)(struct inode *, struct dentry *, umode_t , bool );   int (*link)(struct dentry *, struct inode *, struct dentry *);   int (*unlink)(struct inode *, struct dentry *);   int (*symlink)(struct inode *, struct dentry *, const char *);   int (*mkdir)(struct inode *, struct dentry *, umode_t );   int (*rmdir)(struct inode *, struct dentry *);   int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t );   int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int);   int (*setattr)(struct dentry *, struct iattr *);   int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *);   ssize_t  (*listxattr)(struct dentry *, char *, size_t );   int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 );   int (*update_time)(struct inode *, struct timespec *, int);   int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *);   int (*tmpfile)(struct inode *, struct dentry *, umode_t );   int (*set_acl)(struct inode *, struct posix_acl *, int); } ;  1784     struct super_operations {   struct inode * (*alloc_inode)(struct super_block *);   void (*destroy_inode)(struct inode *);   void (*dirty_inode)(struct inode *, int);   int (*write_inode)(struct inode *, struct writeback_control *);   int (*drop_inode)(struct inode *);   void (*evict_inode)(struct inode *);   void (*put_super)(struct super_block *);   int (*sync_fs)(struct super_block *, int);   int (*freeze_super)(struct super_block *);   int (*freeze_fs)(struct super_block *);   int (*thaw_super)(struct super_block *);   int (*unfreeze_fs)(struct super_block *);   int (*statfs)(struct dentry *, struct kstatfs *);   int (*remount_fs)(struct super_block *, int *, char *);   void (*umount_begin)(struct super_block *);   int (*show_options)(struct seq_file *, struct dentry *);   int (*show_devname)(struct seq_file *, struct dentry *);   int (*show_path)(struct seq_file *, struct dentry *);   int (*show_stats)(struct seq_file *, struct dentry *);   ssize_t  (*quota_read)(struct super_block *, int, char *, size_t , loff_t );   ssize_t  (*quota_write)(struct super_block *, int, const char *, size_t , loff_t );   struct dquot ** (*get_dquots)(struct inode *);   int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t );   long int (*nr_cached_objects)(struct super_block *, struct shrink_control *);   long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;  2027     struct file_system_type {   const char *name;   int fs_flags;   struct dentry * (*mount)(struct file_system_type *, int, const char *, void *);   void (*kill_sb)(struct super_block *);   struct module *owner;   struct file_system_type *next;   struct hlist_head fs_supers;   struct lock_class_key s_lock_key;   struct lock_class_key s_umount_key;   struct lock_class_key s_vfs_rename_key;   struct lock_class_key s_writers_key[3U];   struct lock_class_key i_lock_key;   struct lock_class_key i_mutex_key;   struct lock_class_key i_mutex_dir_key; } ;  3211     struct assoc_array_ptr ;  3211     struct assoc_array {   struct assoc_array_ptr *root;   unsigned long nr_leaves_on_tree; } ;    31     typedef int32_t key_serial_t;    34     typedef uint32_t key_perm_t;    35     struct key ;    36     struct user_struct ;    37     struct signal_struct ;    38     struct key_type ;    42     struct keyring_index_key {   struct key_type *type;   const char *description;   size_t desc_len; } ;    91     union key_payload {   void *rcu_data0;   void *data[4U]; } ;   128     union __anonunion____missing_field_name_314 {   struct list_head graveyard_link;   struct rb_node serial_node; } ;   128     struct key_user ;   128     union __anonunion____missing_field_name_315 {   time_t expiry;   time_t revoked_at; } ;   128     struct __anonstruct____missing_field_name_317 {   struct key_type *type;   char *description; } ;   128     union __anonunion____missing_field_name_316 {   struct keyring_index_key index_key;   struct __anonstruct____missing_field_name_317 __annonCompField66; } ;   128     struct __anonstruct____missing_field_name_319 {   struct list_head name_link;   struct assoc_array keys; } ;   128     union __anonunion____missing_field_name_318 {   union key_payload payload;   struct __anonstruct____missing_field_name_319 __annonCompField68;   int reject_error; } ;   128     struct key {   atomic_t usage;   key_serial_t serial;   union __anonunion____missing_field_name_314 __annonCompField64;   struct rw_semaphore sem;   struct key_user *user;   void *security;   union __anonunion____missing_field_name_315 __annonCompField65;   time_t last_used_at;   kuid_t uid;   kgid_t gid;   key_perm_t perm;   unsigned short quotalen;   unsigned short datalen;   unsigned long flags;   union __anonunion____missing_field_name_316 __annonCompField67;   union __anonunion____missing_field_name_318 __annonCompField69;   int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ;   377     struct audit_context ;    27     struct group_info {   atomic_t usage;   int ngroups;   kgid_t gid[0U]; } ;    85     struct cred {   atomic_t usage;   atomic_t subscribers;   void *put_addr;   unsigned int magic;   kuid_t uid;   kgid_t gid;   kuid_t suid;   kgid_t sgid;   kuid_t euid;   kgid_t egid;   kuid_t fsuid;   kgid_t fsgid;   unsigned int securebits;   kernel_cap_t cap_inheritable;   kernel_cap_t cap_permitted;   kernel_cap_t cap_effective;   kernel_cap_t cap_bset;   kernel_cap_t cap_ambient;   unsigned char jit_keyring;   struct key *session_keyring;   struct key *process_keyring;   struct key *thread_keyring;   struct key *request_key_auth;   void *security;   struct user_struct *user;   struct user_namespace *user_ns;   struct group_info *group_info;   struct callback_head rcu; } ;   368     struct seq_file {   char *buf;   size_t size;   size_t from;   size_t count;   size_t pad_until;   loff_t index;   loff_t read_pos;   u64 version;   struct mutex lock;   const struct seq_operations *op;   int poll_event;   const struct file *file;   void *private; } ;    30     struct seq_operations {   void * (*start)(struct seq_file *, loff_t *);   void (*stop)(struct seq_file *, void *);   void * (*next)(struct seq_file *, void *, loff_t *);   int (*show)(struct seq_file *, void *); } ;   222     struct pinctrl ;   223     struct pinctrl_state ;   194     struct dev_pin_info {   struct pinctrl *p;   struct pinctrl_state *default_state;   struct pinctrl_state *init_state;   struct pinctrl_state *sleep_state;   struct pinctrl_state *idle_state; } ;    84     struct plist_node {   int prio;   struct list_head prio_list;   struct list_head node_list; } ;     4     typedef unsigned long cputime_t;    26     struct sem_undo_list ;    26     struct sysv_sem {   struct sem_undo_list *undo_list; } ;    26     struct sysv_shm {   struct list_head shm_clist; } ;    24     struct __anonstruct_sigset_t_320 {   unsigned long sig[1U]; } ;    24     typedef struct __anonstruct_sigset_t_320 sigset_t;    25     struct siginfo ;    17     typedef void __signalfn_t(int);    18     typedef __signalfn_t *__sighandler_t;    20     typedef void __restorefn_t();    21     typedef __restorefn_t *__sigrestore_t;    38     union sigval {   int sival_int;   void *sival_ptr; } ;    10     typedef union sigval sigval_t;    11     struct __anonstruct__kill_322 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid; } ;    11     struct __anonstruct__timer_323 {   __kernel_timer_t _tid;   int _overrun;   char _pad[0U];   sigval_t _sigval;   int _sys_private; } ;    11     struct __anonstruct__rt_324 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   sigval_t _sigval; } ;    11     struct __anonstruct__sigchld_325 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   int _status;   __kernel_clock_t _utime;   __kernel_clock_t _stime; } ;    11     struct __anonstruct__addr_bnd_328 {   void *_lower;   void *_upper; } ;    11     union __anonunion____missing_field_name_327 {   struct __anonstruct__addr_bnd_328 _addr_bnd;   __u32 _pkey; } ;    11     struct __anonstruct__sigfault_326 {   void *_addr;   short _addr_lsb;   union __anonunion____missing_field_name_327 __annonCompField70; } ;    11     struct __anonstruct__sigpoll_329 {   long _band;   int _fd; } ;    11     struct __anonstruct__sigsys_330 {   void *_call_addr;   int _syscall;   unsigned int _arch; } ;    11     union __anonunion__sifields_321 {   int _pad[28U];   struct __anonstruct__kill_322 _kill;   struct __anonstruct__timer_323 _timer;   struct __anonstruct__rt_324 _rt;   struct __anonstruct__sigchld_325 _sigchld;   struct __anonstruct__sigfault_326 _sigfault;   struct __anonstruct__sigpoll_329 _sigpoll;   struct __anonstruct__sigsys_330 _sigsys; } ;    11     struct siginfo {   int si_signo;   int si_errno;   int si_code;   union __anonunion__sifields_321 _sifields; } ;   118     typedef struct siginfo siginfo_t;    22     struct sigpending {   struct list_head list;   sigset_t signal; } ;   257     struct sigaction {   __sighandler_t sa_handler;   unsigned long sa_flags;   __sigrestore_t sa_restorer;   sigset_t sa_mask; } ;   271     struct k_sigaction {   struct sigaction sa; } ;    43     struct seccomp_filter ;    44     struct seccomp {   int mode;   struct seccomp_filter *filter; } ;    40     struct rt_mutex_waiter ;    41     struct rlimit {   __kernel_ulong_t rlim_cur;   __kernel_ulong_t rlim_max; } ;    11     struct timerqueue_node {   struct rb_node node;   ktime_t expires; } ;    12     struct timerqueue_head {   struct rb_root head;   struct timerqueue_node *next; } ;    50     struct hrtimer_clock_base ;    51     struct hrtimer_cpu_base ;    60     enum hrtimer_restart {   HRTIMER_NORESTART = 0,   HRTIMER_RESTART = 1 } ;    65     struct hrtimer {   struct timerqueue_node node;   ktime_t _softexpires;   enum hrtimer_restart  (*function)(struct hrtimer *);   struct hrtimer_clock_base *base;   u8 state;   u8 is_rel;   int start_pid;   void *start_site;   char start_comm[16U]; } ;   125     struct hrtimer_clock_base {   struct hrtimer_cpu_base *cpu_base;   int index;   clockid_t clockid;   struct timerqueue_head active;   ktime_t  (*get_time)();   ktime_t offset; } ;   158     struct hrtimer_cpu_base {   raw_spinlock_t lock;   seqcount_t seq;   struct hrtimer *running;   unsigned int cpu;   unsigned int active_bases;   unsigned int clock_was_set_seq;   bool migration_enabled;   bool nohz_active;   unsigned char in_hrtirq;   unsigned char hres_active;   unsigned char hang_detected;   ktime_t expires_next;   struct hrtimer *next_timer;   unsigned int nr_events;   unsigned int nr_retries;   unsigned int nr_hangs;   unsigned int max_hang_time;   struct hrtimer_clock_base clock_base[4U]; } ;    12     enum kcov_mode {   KCOV_MODE_DISABLED = 0,   KCOV_MODE_TRACE = 1 } ;    17     struct task_io_accounting {   u64 rchar;   u64 wchar;   u64 syscr;   u64 syscw;   u64 read_bytes;   u64 write_bytes;   u64 cancelled_write_bytes; } ;    45     struct latency_record {   unsigned long backtrace[12U];   unsigned int count;   unsigned long time;   unsigned long max; } ;    41     struct percpu_ref ;    55     typedef void percpu_ref_func_t(struct percpu_ref *);    68     struct percpu_ref {   atomic_long_t count;   unsigned long percpu_count_ptr;   percpu_ref_func_t *release;   percpu_ref_func_t *confirm_switch;   bool force_atomic;   struct callback_head rcu; } ;   325     struct cgroup ;   326     struct cgroup_root ;   327     struct cgroup_subsys ;   328     struct cgroup_taskset ;   372     struct cgroup_file {   struct kernfs_node *kn; } ;    90     struct cgroup_subsys_state {   struct cgroup *cgroup;   struct cgroup_subsys *ss;   struct percpu_ref refcnt;   struct cgroup_subsys_state *parent;   struct list_head sibling;   struct list_head children;   int id;   unsigned int flags;   u64 serial_nr;   atomic_t online_cnt;   struct callback_head callback_head;   struct work_struct destroy_work; } ;   141     struct css_set {   atomic_t refcount;   struct hlist_node hlist;   struct list_head tasks;   struct list_head mg_tasks;   struct list_head cgrp_links;   struct cgroup *dfl_cgrp;   struct cgroup_subsys_state *subsys[13U];   struct list_head mg_preload_node;   struct list_head mg_node;   struct cgroup *mg_src_cgrp;   struct cgroup *mg_dst_cgrp;   struct css_set *mg_dst_cset;   struct list_head e_cset_node[13U];   struct list_head task_iters;   bool dead;   struct callback_head callback_head; } ;   221     struct cgroup {   struct cgroup_subsys_state self;   unsigned long flags;   int id;   int level;   int populated_cnt;   struct kernfs_node *kn;   struct cgroup_file procs_file;   struct cgroup_file events_file;   u16 subtree_control;   u16 subtree_ss_mask;   u16 old_subtree_control;   u16 old_subtree_ss_mask;   struct cgroup_subsys_state *subsys[13U];   struct cgroup_root *root;   struct list_head cset_links;   struct list_head e_csets[13U];   struct list_head pidlists;   struct mutex pidlist_mutex;   wait_queue_head_t offline_waitq;   struct work_struct release_agent_work;   int ancestor_ids[]; } ;   306     struct cgroup_root {   struct kernfs_root *kf_root;   unsigned int subsys_mask;   int hierarchy_id;   struct cgroup cgrp;   int cgrp_ancestor_id_storage;   atomic_t nr_cgrps;   struct list_head root_list;   unsigned int flags;   struct idr cgroup_idr;   char release_agent_path[4096U];   char name[64U]; } ;   345     struct cftype {   char name[64U];   unsigned long private;   size_t max_write_len;   unsigned int flags;   unsigned int file_offset;   struct cgroup_subsys *ss;   struct list_head node;   struct kernfs_ops *kf_ops;   u64  (*read_u64)(struct cgroup_subsys_state *, struct cftype *);   s64  (*read_s64)(struct cgroup_subsys_state *, struct cftype *);   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 );   int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 );   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   struct lock_class_key lockdep_key; } ;   430     struct cgroup_subsys {   struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *);   int (*css_online)(struct cgroup_subsys_state *);   void (*css_offline)(struct cgroup_subsys_state *);   void (*css_released)(struct cgroup_subsys_state *);   void (*css_free)(struct cgroup_subsys_state *);   void (*css_reset)(struct cgroup_subsys_state *);   int (*can_attach)(struct cgroup_taskset *);   void (*cancel_attach)(struct cgroup_taskset *);   void (*attach)(struct cgroup_taskset *);   void (*post_attach)();   int (*can_fork)(struct task_struct *);   void (*cancel_fork)(struct task_struct *);   void (*fork)(struct task_struct *);   void (*exit)(struct task_struct *);   void (*free)(struct task_struct *);   void (*bind)(struct cgroup_subsys_state *);   bool early_init;   bool implicit_on_dfl;   bool broken_hierarchy;   bool warned_broken_hierarchy;   int id;   const char *name;   const char *legacy_name;   struct cgroup_root *root;   struct idr css_idr;   struct list_head cfts;   struct cftype *dfl_cftypes;   struct cftype *legacy_cftypes;   unsigned int depends_on; } ;   128     struct futex_pi_state ;   129     struct robust_list_head ;   130     struct bio_list ;   131     struct fs_struct ;   132     struct perf_event_context ;   133     struct blk_plug ;   134     struct nameidata ;   188     struct cfs_rq ;   189     struct task_group ;   495     struct sighand_struct {   atomic_t count;   struct k_sigaction action[64U];   spinlock_t siglock;   wait_queue_head_t signalfd_wqh; } ;   539     struct pacct_struct {   int ac_flag;   long ac_exitcode;   unsigned long ac_mem;   cputime_t ac_utime;   cputime_t ac_stime;   unsigned long ac_minflt;   unsigned long ac_majflt; } ;   547     struct cpu_itimer {   cputime_t expires;   cputime_t incr;   u32 error;   u32 incr_error; } ;   554     struct prev_cputime {   cputime_t utime;   cputime_t stime;   raw_spinlock_t lock; } ;   579     struct task_cputime {   cputime_t utime;   cputime_t stime;   unsigned long long sum_exec_runtime; } ;   595     struct task_cputime_atomic {   atomic64_t utime;   atomic64_t stime;   atomic64_t sum_exec_runtime; } ;   617     struct thread_group_cputimer {   struct task_cputime_atomic cputime_atomic;   bool running;   bool checking_timer; } ;   662     struct autogroup ;   663     struct tty_struct ;   663     struct taskstats ;   663     struct tty_audit_buf ;   663     struct signal_struct {   atomic_t sigcnt;   atomic_t live;   int nr_threads;   struct list_head thread_head;   wait_queue_head_t wait_chldexit;   struct task_struct *curr_target;   struct sigpending shared_pending;   int group_exit_code;   int notify_count;   struct task_struct *group_exit_task;   int group_stop_count;   unsigned int flags;   unsigned char is_child_subreaper;   unsigned char has_child_subreaper;   int posix_timer_id;   struct list_head posix_timers;   struct hrtimer real_timer;   struct pid *leader_pid;   ktime_t it_real_incr;   struct cpu_itimer it[2U];   struct thread_group_cputimer cputimer;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   struct pid *tty_old_pgrp;   int leader;   struct tty_struct *tty;   struct autogroup *autogroup;   seqlock_t stats_lock;   cputime_t utime;   cputime_t stime;   cputime_t cutime;   cputime_t cstime;   cputime_t gtime;   cputime_t cgtime;   struct prev_cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   unsigned long cnvcsw;   unsigned long cnivcsw;   unsigned long min_flt;   unsigned long maj_flt;   unsigned long cmin_flt;   unsigned long cmaj_flt;   unsigned long inblock;   unsigned long oublock;   unsigned long cinblock;   unsigned long coublock;   unsigned long maxrss;   unsigned long cmaxrss;   struct task_io_accounting ioac;   unsigned long long sum_sched_runtime;   struct rlimit rlim[16U];   struct pacct_struct pacct;   struct taskstats *stats;   unsigned int audit_tty;   struct tty_audit_buf *tty_audit_buf;   bool oom_flag_origin;   short oom_score_adj;   short oom_score_adj_min;   struct mm_struct *oom_mm;   struct mutex cred_guard_mutex; } ;   839     struct user_struct {   atomic_t __count;   atomic_t processes;   atomic_t sigpending;   atomic_t inotify_watches;   atomic_t inotify_devs;   atomic_t fanotify_listeners;   atomic_long_t epoll_watches;   unsigned long mq_bytes;   unsigned long locked_shm;   unsigned long unix_inflight;   atomic_long_t pipe_bufs;   struct key *uid_keyring;   struct key *session_keyring;   struct hlist_node uidhash_node;   kuid_t uid;   atomic_long_t locked_vm; } ;   884     struct reclaim_state ;   885     struct sched_info {   unsigned long pcount;   unsigned long long run_delay;   unsigned long long last_arrival;   unsigned long long last_queued; } ;   900     struct task_delay_info {   spinlock_t lock;   unsigned int flags;   u64 blkio_start;   u64 blkio_delay;   u64 swapin_delay;   u32 blkio_count;   u32 swapin_count;   u64 freepages_start;   u64 freepages_delay;   u32 freepages_count; } ;   957     struct wake_q_node {   struct wake_q_node *next; } ;  1235     struct load_weight {   unsigned long weight;   u32 inv_weight; } ;  1243     struct sched_avg {   u64 last_update_time;   u64 load_sum;   u32 util_sum;   u32 period_contrib;   unsigned long load_avg;   unsigned long util_avg; } ;  1301     struct sched_statistics {   u64 wait_start;   u64 wait_max;   u64 wait_count;   u64 wait_sum;   u64 iowait_count;   u64 iowait_sum;   u64 sleep_start;   u64 sleep_max;   s64 sum_sleep_runtime;   u64 block_start;   u64 block_max;   u64 exec_max;   u64 slice_max;   u64 nr_migrations_cold;   u64 nr_failed_migrations_affine;   u64 nr_failed_migrations_running;   u64 nr_failed_migrations_hot;   u64 nr_forced_migrations;   u64 nr_wakeups;   u64 nr_wakeups_sync;   u64 nr_wakeups_migrate;   u64 nr_wakeups_local;   u64 nr_wakeups_remote;   u64 nr_wakeups_affine;   u64 nr_wakeups_affine_attempts;   u64 nr_wakeups_passive;   u64 nr_wakeups_idle; } ;  1336     struct sched_entity {   struct load_weight load;   struct rb_node run_node;   struct list_head group_node;   unsigned int on_rq;   u64 exec_start;   u64 sum_exec_runtime;   u64 vruntime;   u64 prev_sum_exec_runtime;   u64 nr_migrations;   struct sched_statistics statistics;   int depth;   struct sched_entity *parent;   struct cfs_rq *cfs_rq;   struct cfs_rq *my_q;   struct sched_avg avg; } ;  1373     struct rt_rq ;  1373     struct sched_rt_entity {   struct list_head run_list;   unsigned long timeout;   unsigned long watchdog_stamp;   unsigned int time_slice;   unsigned short on_rq;   unsigned short on_list;   struct sched_rt_entity *back;   struct sched_rt_entity *parent;   struct rt_rq *rt_rq;   struct rt_rq *my_q; } ;  1391     struct sched_dl_entity {   struct rb_node rb_node;   u64 dl_runtime;   u64 dl_deadline;   u64 dl_period;   u64 dl_bw;   s64 runtime;   u64 deadline;   unsigned int flags;   int dl_throttled;   int dl_boosted;   int dl_yielded;   struct hrtimer dl_timer; } ;  1455     struct tlbflush_unmap_batch {   struct cpumask cpumask;   bool flush_required;   bool writable; } ;  1474     struct sched_class ;  1474     struct compat_robust_list_head ;  1474     struct numa_group ;  1474     struct kcov ;  1474     struct task_struct {   struct thread_info thread_info;   volatile long state;   void *stack;   atomic_t usage;   unsigned int flags;   unsigned int ptrace;   struct llist_node wake_entry;   int on_cpu;   unsigned int cpu;   unsigned int wakee_flips;   unsigned long wakee_flip_decay_ts;   struct task_struct *last_wakee;   int wake_cpu;   int on_rq;   int prio;   int static_prio;   int normal_prio;   unsigned int rt_priority;   const struct sched_class *sched_class;   struct sched_entity se;   struct sched_rt_entity rt;   struct task_group *sched_task_group;   struct sched_dl_entity dl;   struct hlist_head preempt_notifiers;   unsigned int policy;   int nr_cpus_allowed;   cpumask_t cpus_allowed;   unsigned long rcu_tasks_nvcsw;   bool rcu_tasks_holdout;   struct list_head rcu_tasks_holdout_list;   int rcu_tasks_idle_cpu;   struct sched_info sched_info;   struct list_head tasks;   struct plist_node pushable_tasks;   struct rb_node pushable_dl_tasks;   struct mm_struct *mm;   struct mm_struct *active_mm;   u32 vmacache_seqnum;   struct vm_area_struct *vmacache[4U];   struct task_rss_stat rss_stat;   int exit_state;   int exit_code;   int exit_signal;   int pdeath_signal;   unsigned long jobctl;   unsigned int personality;   unsigned char sched_reset_on_fork;   unsigned char sched_contributes_to_load;   unsigned char sched_migrated;   unsigned char sched_remote_wakeup;   unsigned char;   unsigned char in_execve;   unsigned char in_iowait;   unsigned char restore_sigmask;   unsigned char memcg_may_oom;   unsigned char memcg_kmem_skip_account;   unsigned char brk_randomized;   unsigned long atomic_flags;   struct restart_block restart_block;   pid_t pid;   pid_t tgid;   struct task_struct *real_parent;   struct task_struct *parent;   struct list_head children;   struct list_head sibling;   struct task_struct *group_leader;   struct list_head ptraced;   struct list_head ptrace_entry;   struct pid_link pids[3U];   struct list_head thread_group;   struct list_head thread_node;   struct completion *vfork_done;   int *set_child_tid;   int *clear_child_tid;   cputime_t utime;   cputime_t stime;   cputime_t utimescaled;   cputime_t stimescaled;   cputime_t gtime;   struct prev_cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   u64 start_time;   u64 real_start_time;   unsigned long min_flt;   unsigned long maj_flt;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   const struct cred *real_cred;   const struct cred *cred;   char comm[16U];   struct nameidata *nameidata;   struct sysv_sem sysvsem;   struct sysv_shm sysvshm;   unsigned long last_switch_count;   struct fs_struct *fs;   struct files_struct *files;   struct nsproxy *nsproxy;   struct signal_struct *signal;   struct sighand_struct *sighand;   sigset_t blocked;   sigset_t real_blocked;   sigset_t saved_sigmask;   struct sigpending pending;   unsigned long sas_ss_sp;   size_t sas_ss_size;   unsigned int sas_ss_flags;   struct callback_head *task_works;   struct audit_context *audit_context;   kuid_t loginuid;   unsigned int sessionid;   struct seccomp seccomp;   u32 parent_exec_id;   u32 self_exec_id;   spinlock_t alloc_lock;   raw_spinlock_t pi_lock;   struct wake_q_node wake_q;   struct rb_root pi_waiters;   struct rb_node *pi_waiters_leftmost;   struct rt_mutex_waiter *pi_blocked_on;   struct mutex_waiter *blocked_on;   unsigned int irq_events;   unsigned long hardirq_enable_ip;   unsigned long hardirq_disable_ip;   unsigned int hardirq_enable_event;   unsigned int hardirq_disable_event;   int hardirqs_enabled;   int hardirq_context;   unsigned long softirq_disable_ip;   unsigned long softirq_enable_ip;   unsigned int softirq_disable_event;   unsigned int softirq_enable_event;   int softirqs_enabled;   int softirq_context;   u64 curr_chain_key;   int lockdep_depth;   unsigned int lockdep_recursion;   struct held_lock held_locks[48U];   gfp_t lockdep_reclaim_gfp;   unsigned int in_ubsan;   void *journal_info;   struct bio_list *bio_list;   struct blk_plug *plug;   struct reclaim_state *reclaim_state;   struct backing_dev_info *backing_dev_info;   struct io_context *io_context;   unsigned long ptrace_message;   siginfo_t *last_siginfo;   struct task_io_accounting ioac;   u64 acct_rss_mem1;   u64 acct_vm_mem1;   cputime_t acct_timexpd;   nodemask_t mems_allowed;   seqcount_t mems_allowed_seq;   int cpuset_mem_spread_rotor;   int cpuset_slab_spread_rotor;   struct css_set *cgroups;   struct list_head cg_list;   struct robust_list_head *robust_list;   struct compat_robust_list_head *compat_robust_list;   struct list_head pi_state_list;   struct futex_pi_state *pi_state_cache;   struct perf_event_context *perf_event_ctxp[2U];   struct mutex perf_event_mutex;   struct list_head perf_event_list;   struct mempolicy *mempolicy;   short il_next;   short pref_node_fork;   int numa_scan_seq;   unsigned int numa_scan_period;   unsigned int numa_scan_period_max;   int numa_preferred_nid;   unsigned long numa_migrate_retry;   u64 node_stamp;   u64 last_task_numa_placement;   u64 last_sum_exec_runtime;   struct callback_head numa_work;   struct list_head numa_entry;   struct numa_group *numa_group;   unsigned long *numa_faults;   unsigned long total_numa_faults;   unsigned long numa_faults_locality[3U];   unsigned long numa_pages_migrated;   struct tlbflush_unmap_batch tlb_ubc;   struct callback_head rcu;   struct pipe_inode_info *splice_pipe;   struct page_frag task_frag;   struct task_delay_info *delays;   int make_it_fail;   int nr_dirtied;   int nr_dirtied_pause;   unsigned long dirty_paused_when;   int latency_record_count;   struct latency_record latency_record[32U];   u64 timer_slack_ns;   u64 default_timer_slack_ns;   unsigned int kasan_depth;   unsigned long trace;   unsigned long trace_recursion;   enum kcov_mode kcov_mode;   unsigned int kcov_size;   void *kcov_area;   struct kcov *kcov;   struct mem_cgroup *memcg_in_oom;   gfp_t memcg_oom_gfp_mask;   int memcg_oom_order;   unsigned int memcg_nr_pages_over_high;   struct uprobe_task *utask;   unsigned int sequential_io;   unsigned int sequential_io_avg;   unsigned long task_state_change;   int pagefault_disabled;   struct task_struct *oom_reaper_list;   atomic_t stack_refcount;   struct thread_struct thread; } ;  3606     struct ratelimit_state {   raw_spinlock_t lock;   int interval;   int burst;   int printed;   int missed;   unsigned long begin;   unsigned long flags; } ;    76     struct dma_map_ops ;    76     struct dev_archdata {   struct dma_map_ops *dma_ops;   void *iommu; } ;    21     struct pdev_archdata { } ;    24     struct device_private ;    25     struct device_driver ;    26     struct driver_private ;    27     struct class ;    28     struct subsys_private ;    29     struct bus_type ;    30     struct device_node ;    31     struct fwnode_handle ;    32     struct iommu_ops ;    33     struct iommu_group ;    34     struct iommu_fwspec ;    62     struct device_attribute ;    62     struct bus_type {   const char *name;   const char *dev_name;   struct device *dev_root;   struct device_attribute *dev_attrs;   const struct attribute_group **bus_groups;   const struct attribute_group **dev_groups;   const struct attribute_group **drv_groups;   int (*match)(struct device *, struct device_driver *);   int (*uevent)(struct device *, struct kobj_uevent_env *);   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*online)(struct device *);   int (*offline)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct dev_pm_ops *pm;   const struct iommu_ops *iommu_ops;   struct subsys_private *p;   struct lock_class_key lock_key; } ;   143     struct device_type ;   202     enum probe_type {   PROBE_DEFAULT_STRATEGY = 0,   PROBE_PREFER_ASYNCHRONOUS = 1,   PROBE_FORCE_SYNCHRONOUS = 2 } ;   208     struct of_device_id ;   208     struct acpi_device_id ;   208     struct device_driver {   const char *name;   struct bus_type *bus;   struct module *owner;   const char *mod_name;   bool suppress_bind_attrs;   enum probe_type probe_type;   const struct of_device_id *of_match_table;   const struct acpi_device_id *acpi_match_table;   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct attribute_group **groups;   const struct dev_pm_ops *pm;   struct driver_private *p; } ;   358     struct class_attribute ;   358     struct class {   const char *name;   struct module *owner;   struct class_attribute *class_attrs;   const struct attribute_group **dev_groups;   struct kobject *dev_kobj;   int (*dev_uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *);   void (*class_release)(struct class *);   void (*dev_release)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct kobj_ns_type_operations *ns_type;   const void * (*namespace)(struct device *);   const struct dev_pm_ops *pm;   struct subsys_private *p; } ;   451     struct class_attribute {   struct attribute attr;   ssize_t  (*show)(struct class *, struct class_attribute *, char *);   ssize_t  (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;   519     struct device_type {   const char *name;   const struct attribute_group **groups;   int (*uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *);   void (*release)(struct device *);   const struct dev_pm_ops *pm; } ;   547     struct device_attribute {   struct attribute attr;   ssize_t  (*show)(struct device *, struct device_attribute *, char *);   ssize_t  (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;   700     struct device_dma_parameters {   unsigned int max_segment_size;   unsigned long segment_boundary_mask; } ;   709     struct irq_domain ;   709     struct dma_coherent_mem ;   709     struct cma ;   709     struct device {   struct device *parent;   struct device_private *p;   struct kobject kobj;   const char *init_name;   const struct device_type *type;   struct mutex mutex;   struct bus_type *bus;   struct device_driver *driver;   void *platform_data;   void *driver_data;   struct dev_pm_info power;   struct dev_pm_domain *pm_domain;   struct irq_domain *msi_domain;   struct dev_pin_info *pins;   struct list_head msi_list;   int numa_node;   u64 *dma_mask;   u64 coherent_dma_mask;   unsigned long dma_pfn_offset;   struct device_dma_parameters *dma_parms;   struct list_head dma_pools;   struct dma_coherent_mem *dma_mem;   struct cma *cma_area;   struct dev_archdata archdata;   struct device_node *of_node;   struct fwnode_handle *fwnode;   dev_t devt;   u32 id;   spinlock_t devres_lock;   struct list_head devres_head;   struct klist_node knode_class;   struct class *class;   const struct attribute_group **groups;   void (*release)(struct device *);   struct iommu_group *iommu_group;   struct iommu_fwspec *iommu_fwspec;   bool offline_disabled;   bool offline; } ;   865     struct wakeup_source {   const char *name;   struct list_head entry;   spinlock_t lock;   struct wake_irq *wakeirq;   struct timer_list timer;   unsigned long timer_expires;   ktime_t total_time;   ktime_t max_time;   ktime_t last_time;   ktime_t start_prevent_time;   ktime_t prevent_sleep_time;   unsigned long event_count;   unsigned long active_count;   unsigned long relax_count;   unsigned long expire_count;   unsigned long wakeup_count;   bool active;   bool autosleep_enabled; } ;    13     typedef unsigned long kernel_ulong_t;   186     struct acpi_device_id {   __u8 id[9U];   kernel_ulong_t driver_data;   __u32 cls;   __u32 cls_msk; } ;   195     struct pnp_device_id {   __u8 id[8U];   kernel_ulong_t driver_data; } ;   203     struct __anonstruct_devs_357 {   __u8 id[8U]; } ;   203     struct pnp_card_device_id {   __u8 id[8U];   kernel_ulong_t driver_data;   struct __anonstruct_devs_357 devs[8U]; } ;   229     struct of_device_id {   char name[32U];   char type[32U];   char compatible[128U];   const void *data; } ;   484     struct platform_device_id {   char name[20U];   kernel_ulong_t driver_data; } ;   674     struct mfd_cell ;   676     struct platform_device {   const char *name;   int id;   bool id_auto;   struct device dev;   u32 num_resources;   struct resource *resource;   const struct platform_device_id *id_entry;   char *driver_override;   struct mfd_cell *mfd_cell;   struct pdev_archdata archdata; } ;   179     struct platform_driver {   int (*probe)(struct platform_device *);   int (*remove)(struct platform_device *);   void (*shutdown)(struct platform_device *);   int (*suspend)(struct platform_device *, pm_message_t );   int (*resume)(struct platform_device *);   struct device_driver driver;   const struct platform_device_id *id_table;   bool prevent_deferred_probe; } ;   352     enum irqreturn {   IRQ_NONE = 0,   IRQ_HANDLED = 1,   IRQ_WAKE_THREAD = 2 } ;    16     typedef enum irqreturn irqreturn_t;   423     struct proc_dir_entry ;   130     struct exception_table_entry {   int insn;   int fixup;   int handler; } ;   494     struct tasklet_struct {   struct tasklet_struct *next;   unsigned long state;   atomic_t count;   void (*func)(unsigned long);   unsigned long data; } ;   716     struct scatterlist ;    96     enum dma_data_direction {   DMA_BIDIRECTIONAL = 0,   DMA_TO_DEVICE = 1,   DMA_FROM_DEVICE = 2,   DMA_NONE = 3 } ;   273     struct vm_fault {   unsigned int flags;   gfp_t gfp_mask;   unsigned long pgoff;   void *virtual_address;   struct page *cow_page;   struct page *page;   void *entry; } ;   308     struct fault_env {   struct vm_area_struct *vma;   unsigned long address;   unsigned int flags;   pmd_t *pmd;   pte_t *pte;   spinlock_t *ptl;   pgtable_t prealloc_pte; } ;   335     struct vm_operations_struct {   void (*open)(struct vm_area_struct *);   void (*close)(struct vm_area_struct *);   int (*mremap)(struct vm_area_struct *);   int (*fault)(struct vm_area_struct *, struct vm_fault *);   int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int);   void (*map_pages)(struct fault_env *, unsigned long, unsigned long);   int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*access)(struct vm_area_struct *, unsigned long, void *, int, int);   const char * (*name)(struct vm_area_struct *);   int (*set_policy)(struct vm_area_struct *, struct mempolicy *);   struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long);   struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;  2450     struct scatterlist {   unsigned long sg_magic;   unsigned long page_link;   unsigned int offset;   unsigned int length;   dma_addr_t dma_address;   unsigned int dma_length; } ;    21     struct sg_table {   struct scatterlist *sgl;   unsigned int nents;   unsigned int orig_nents; } ;   158     struct dma_map_ops {   void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long);   void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long);   int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long);   int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long);   dma_addr_t  (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long);   void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long);   int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long);   void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long);   dma_addr_t  (*map_resource)(struct device *, phys_addr_t , size_t , enum dma_data_direction , unsigned long);   void (*unmap_resource)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long);   void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction );   void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction );   int (*mapping_error)(struct device *, dma_addr_t );   int (*dma_supported)(struct device *, u64 );   int (*set_dma_mask)(struct device *, u64 );   int is_phys; } ;   207     struct pnp_protocol ;   208     struct pnp_dev ;   195     struct pnp_id ;   195     struct pnp_card {   struct device dev;   unsigned char number;   struct list_head global_list;   struct list_head protocol_list;   struct list_head devices;   struct pnp_protocol *protocol;   struct pnp_id *id;   char name[50U];   unsigned char pnpver;   unsigned char productver;   unsigned int serial;   unsigned char checksum;   struct proc_dir_entry *procdir; } ;   218     struct pnp_card_driver ;   218     struct pnp_card_link {   struct pnp_card *card;   struct pnp_card_driver *driver;   void *driver_data;   pm_message_t pm_state; } ;   243     struct pnp_driver ;   243     struct pnp_dev {   struct device dev;   u64 dma_mask;   unsigned int number;   int status;   struct list_head global_list;   struct list_head protocol_list;   struct list_head card_list;   struct list_head rdev_list;   struct pnp_protocol *protocol;   struct pnp_card *card;   struct pnp_driver *driver;   struct pnp_card_link *card_link;   struct pnp_id *id;   int active;   int capabilities;   unsigned int num_dependent_sets;   struct list_head resources;   struct list_head options;   char name[50U];   int flags;   struct proc_dir_entry *procent;   void *data; } ;   356     struct pnp_id {   char id[8U];   struct pnp_id *next; } ;   379     struct pnp_driver {   char *name;   const struct pnp_device_id *id_table;   unsigned int flags;   int (*probe)(struct pnp_dev *, const struct pnp_device_id *);   void (*remove)(struct pnp_dev *);   void (*shutdown)(struct pnp_dev *);   int (*suspend)(struct pnp_dev *, pm_message_t );   int (*resume)(struct pnp_dev *);   struct device_driver driver; } ;   391     struct pnp_card_driver {   struct list_head global_list;   char *name;   const struct pnp_card_device_id *id_table;   unsigned int flags;   int (*probe)(struct pnp_card_link *, const struct pnp_card_device_id *);   void (*remove)(struct pnp_card_link *);   int (*suspend)(struct pnp_card_link *, pm_message_t );   int (*resume)(struct pnp_card_link *);   struct pnp_driver link; } ;   406     struct pnp_protocol {   struct list_head protocol_list;   char *name;   int (*get)(struct pnp_dev *);   int (*set)(struct pnp_dev *);   int (*disable)(struct pnp_dev *);   bool  (*can_wakeup)(struct pnp_dev *);   int (*suspend)(struct pnp_dev *, pm_message_t );   int (*resume)(struct pnp_dev *);   unsigned char number;   struct device dev;   struct list_head cards;   struct list_head devices; } ;   249     enum led_brightness {   LED_OFF = 0,   LED_HALF = 127,   LED_FULL = 255 } ;   255     struct led_trigger ;   255     struct led_classdev {   const char *name;   enum led_brightness brightness;   enum led_brightness max_brightness;   int flags;   void (*brightness_set)(struct led_classdev *, enum led_brightness );   int (*brightness_set_blocking)(struct led_classdev *, enum led_brightness );   enum led_brightness  (*brightness_get)(struct led_classdev *);   int (*blink_set)(struct led_classdev *, unsigned long *, unsigned long *);   struct device *dev;   const struct attribute_group **groups;   struct list_head node;   const char *default_trigger;   unsigned long blink_delay_on;   unsigned long blink_delay_off;   struct timer_list blink_timer;   int blink_brightness;   void (*flash_resume)(struct led_classdev *);   struct work_struct set_brightness_work;   int delayed_set_value;   struct rw_semaphore trigger_lock;   struct led_trigger *trigger;   struct list_head trig_list;   void *trigger_data;   bool activated;   struct mutex led_access; } ;   221     struct led_trigger {   const char *name;   void (*activate)(struct led_classdev *);   void (*deactivate)(struct led_classdev *);   rwlock_t leddev_list_lock;   struct list_head led_cdevs;   struct list_head next_trig; } ;   171     struct fault_attr {   unsigned long probability;   unsigned long interval;   atomic_t times;   atomic_t space;   unsigned long verbose;   bool task_filter;   unsigned long stacktrace_depth;   unsigned long require_start;   unsigned long require_end;   unsigned long reject_start;   unsigned long reject_end;   unsigned long count;   struct ratelimit_state ratelimit_state;   struct dentry *dname; } ;    67     struct mmc_data ;    68     struct mmc_request ;    69     struct mmc_command {   u32 opcode;   u32 arg;   u32 resp[4U];   unsigned int flags;   unsigned int retries;   int error;   unsigned int busy_timeout;   bool sanitize_busy;   struct mmc_data *data;   struct mmc_request *mrq; } ;   108     struct mmc_data {   unsigned int timeout_ns;   unsigned int timeout_clks;   unsigned int blksz;   unsigned int blocks;   int error;   unsigned int flags;   unsigned int bytes_xfered;   struct mmc_command *stop;   struct mmc_request *mrq;   unsigned int sg_len;   int sg_count;   struct scatterlist *sg;   s32 host_cookie; } ;   130     struct mmc_host ;   131     struct mmc_request {   struct mmc_command *sbc;   struct mmc_command *cmd;   struct mmc_data *data;   struct mmc_command *stop;   struct completion completion;   struct completion cmd_completion;   void (*done)(struct mmc_request *);   struct mmc_host *host;   bool cap_cmd_during_tfr; } ;   146     struct mmc_card ;   147     struct mmc_async_req ;   222     struct mmc_cid {   unsigned int manfid;   char prod_name[8U];   unsigned char prv;   unsigned int serial;   unsigned short oemid;   unsigned short year;   unsigned char hwrev;   unsigned char fwrev;   unsigned char month; } ;    28     struct mmc_csd {   unsigned char structure;   unsigned char mmca_vsn;   unsigned short cmdclass;   unsigned short tacc_clks;   unsigned int tacc_ns;   unsigned int c_size;   unsigned int r2w_factor;   unsigned int max_dtr;   unsigned int erase_size;   unsigned int read_blkbits;   unsigned int write_blkbits;   unsigned int capacity;   unsigned char read_partial;   unsigned char read_misalign;   unsigned char write_partial;   unsigned char write_misalign;   unsigned char dsr_imp; } ;    48     struct mmc_ext_csd {   u8 rev;   u8 erase_group_def;   u8 sec_feature_support;   u8 rel_sectors;   u8 rel_param;   u8 part_config;   u8 cache_ctrl;   u8 rst_n_function;   u8 max_packed_writes;   u8 max_packed_reads;   u8 packed_event_en;   unsigned int part_time;   unsigned int sa_timeout;   unsigned int generic_cmd6_time;   unsigned int power_off_longtime;   u8 power_off_notification;   unsigned int hs_max_dtr;   unsigned int hs200_max_dtr;   unsigned int sectors;   unsigned int hc_erase_size;   unsigned int hc_erase_timeout;   unsigned int sec_trim_mult;   unsigned int sec_erase_mult;   unsigned int trim_timeout;   bool partition_setting_completed;   unsigned long long enhanced_area_offset;   unsigned int enhanced_area_size;   unsigned int cache_size;   bool hpi_en;   bool hpi;   unsigned int hpi_cmd;   bool bkops;   bool man_bkops_en;   unsigned int data_sector_size;   unsigned int data_tag_unit_size;   unsigned int boot_ro_lock;   bool boot_ro_lockable;   bool ffu_capable;   u8 fwrev[8U];   u8 raw_exception_status;   u8 raw_partition_support;   u8 raw_rpmb_size_mult;   u8 raw_erased_mem_count;   u8 strobe_support;   u8 raw_ext_csd_structure;   u8 raw_card_type;   u8 raw_driver_strength;   u8 out_of_int_time;   u8 raw_pwr_cl_52_195;   u8 raw_pwr_cl_26_195;   u8 raw_pwr_cl_52_360;   u8 raw_pwr_cl_26_360;   u8 raw_s_a_timeout;   u8 raw_hc_erase_gap_size;   u8 raw_erase_timeout_mult;   u8 raw_hc_erase_grp_size;   u8 raw_sec_trim_mult;   u8 raw_sec_erase_mult;   u8 raw_sec_feature_support;   u8 raw_trim_mult;   u8 raw_pwr_cl_200_195;   u8 raw_pwr_cl_200_360;   u8 raw_pwr_cl_ddr_52_195;   u8 raw_pwr_cl_ddr_52_360;   u8 raw_pwr_cl_ddr_200_360;   u8 raw_bkops_status;   u8 raw_sectors[4U];   unsigned int feature_support; } ;   125     struct sd_scr {   unsigned char sda_vsn;   unsigned char sda_spec3;   unsigned char bus_widths;   unsigned char cmds; } ;   135     struct sd_ssr {   unsigned int au;   unsigned int erase_timeout;   unsigned int erase_offset; } ;   143     struct sd_switch_caps {   unsigned int hs_max_dtr;   unsigned int uhs_max_dtr;   unsigned int sd3_bus_mode;   unsigned int sd3_drv_type;   unsigned int sd3_curr_limit; } ;   174     struct sdio_cccr {   unsigned int sdio_vsn;   unsigned int sd_vsn;   unsigned char multi_block;   unsigned char low_speed;   unsigned char wide_bus;   unsigned char high_power;   unsigned char high_speed;   unsigned char disable_cd; } ;   195     struct sdio_cis {   unsigned short vendor;   unsigned short device;   unsigned short blksize;   unsigned int max_dtr; } ;   202     struct mmc_ios ;   203     struct sdio_func ;   204     struct sdio_func_tuple ;   217     struct mmc_part {   unsigned int size;   unsigned int part_cfg;   char name[20U];   bool force_ro;   unsigned int area_type; } ;   241     struct mmc_card {   struct mmc_host *host;   struct device dev;   u32 ocr;   unsigned int rca;   unsigned int type;   unsigned int state;   unsigned int quirks;   unsigned int erase_size;   unsigned int erase_shift;   unsigned int pref_erase;   unsigned int eg_boundary;   u8 erased_byte;   u32 raw_cid[4U];   u32 raw_csd[4U];   u32 raw_scr[2U];   u32 raw_ssr[16U];   struct mmc_cid cid;   struct mmc_csd csd;   struct mmc_ext_csd ext_csd;   struct sd_scr scr;   struct sd_ssr ssr;   struct sd_switch_caps sw_caps;   unsigned int sdio_funcs;   struct sdio_cccr cccr;   struct sdio_cis cis;   struct sdio_func *sdio_func[7U];   struct sdio_func *sdio_single_irq;   unsigned int num_info;   const char **info;   struct sdio_func_tuple *tuples;   unsigned int sd_bus_speed;   unsigned int mmc_avail_type;   unsigned int drive_strength;   struct dentry *debugfs_root;   struct mmc_part part[7U];   unsigned int nr_parts; } ;    25     typedef unsigned int mmc_pm_flag_t;    26     struct mmc_ios {   unsigned int clock;   unsigned short vdd;   unsigned char bus_mode;   unsigned char chip_select;   unsigned char power_mode;   unsigned char bus_width;   unsigned char timing;   unsigned char signal_voltage;   unsigned char drv_type;   bool enhanced_strobe; } ;    84     struct mmc_host_ops {   void (*post_req)(struct mmc_host *, struct mmc_request *, int);   void (*pre_req)(struct mmc_host *, struct mmc_request *, bool );   void (*request)(struct mmc_host *, struct mmc_request *);   void (*set_ios)(struct mmc_host *, struct mmc_ios *);   int (*get_ro)(struct mmc_host *);   int (*get_cd)(struct mmc_host *);   void (*enable_sdio_irq)(struct mmc_host *, int);   void (*init_card)(struct mmc_host *, struct mmc_card *);   int (*start_signal_voltage_switch)(struct mmc_host *, struct mmc_ios *);   int (*card_busy)(struct mmc_host *);   int (*execute_tuning)(struct mmc_host *, u32 );   int (*prepare_hs400_tuning)(struct mmc_host *, struct mmc_ios *);   void (*hs400_enhanced_strobe)(struct mmc_host *, struct mmc_ios *);   int (*select_drive_strength)(struct mmc_card *, unsigned int, int, int, int *);   void (*hw_reset)(struct mmc_host *);   void (*card_event)(struct mmc_host *);   int (*multi_io_quirk)(struct mmc_card *, unsigned int, int); } ;   164     struct mmc_async_req {   struct mmc_request *mrq;   int (*err_check)(struct mmc_card *, struct mmc_async_req *); } ;   178     struct mmc_slot {   int cd_irq;   void *handler_priv; } ;   194     struct mmc_context_info {   bool is_done_rcv;   bool is_new_req;   bool is_waiting_last_req;   wait_queue_head_t wait;   spinlock_t lock; } ;   210     struct regulator ;   211     struct mmc_pwrseq ;   212     struct mmc_supply {   struct regulator *vmmc;   struct regulator *vqmmc; } ;   218     struct mmc_bus_ops ;   218     struct mmc_host {   struct device *parent;   struct device class_dev;   int index;   const struct mmc_host_ops *ops;   struct mmc_pwrseq *pwrseq;   unsigned int f_min;   unsigned int f_max;   unsigned int f_init;   u32 ocr_avail;   u32 ocr_avail_sdio;   u32 ocr_avail_sd;   u32 ocr_avail_mmc;   struct notifier_block pm_notify;   u32 max_current_330;   u32 max_current_300;   u32 max_current_180;   u32 caps;   u32 caps2;   mmc_pm_flag_t pm_caps;   unsigned int max_seg_size;   unsigned short max_segs;   unsigned short unused;   unsigned int max_req_size;   unsigned int max_blk_size;   unsigned int max_blk_count;   unsigned int max_busy_timeout;   spinlock_t lock;   struct mmc_ios ios;   unsigned char use_spi_crc;   unsigned char claimed;   unsigned char bus_dead;   unsigned char removed;   unsigned char can_retune;   unsigned char doing_retune;   unsigned char retune_now;   unsigned char retune_paused;   int rescan_disable;   int rescan_entered;   int need_retune;   int hold_retune;   unsigned int retune_period;   struct timer_list retune_timer;   bool trigger_card_event;   struct mmc_card *card;   wait_queue_head_t wq;   struct task_struct *claimer;   int claim_cnt;   struct delayed_work detect;   int detect_change;   struct mmc_slot slot;   const struct mmc_bus_ops *bus_ops;   unsigned int bus_refs;   unsigned int sdio_irqs;   struct task_struct *sdio_irq_thread;   bool sdio_irq_pending;   atomic_t sdio_irq_thread_abort;   mmc_pm_flag_t pm_flags;   struct led_trigger *led;   bool regulator_enabled;   struct mmc_supply supply;   struct dentry *debugfs_root;   struct mmc_async_req *areq;   struct mmc_context_info context_info;   struct mmc_request *ongoing_mrq;   struct fault_attr fail_mmc_request;   unsigned int actual_clock;   unsigned int slotno;   int dsr_req;   u32 dsr;   unsigned long private[0U]; } ;   313     struct wbsd_host {   struct mmc_host *mmc;   spinlock_t lock;   int flags;   struct mmc_request *mrq;   u8 isr;   struct scatterlist *cur_sg;   unsigned int num_sg;   unsigned int offset;   unsigned int remain;   char *dma_buffer;   dma_addr_t dma_addr;   int firsterr;   u8 clk;   unsigned char bus_width;   int config;   u8 unlock_code;   int chip_id;   int base;   int irq;   int dma;   struct tasklet_struct card_tasklet;   struct tasklet_struct fifo_tasklet;   struct tasklet_struct crc_tasklet;   struct tasklet_struct timeout_tasklet;   struct tasklet_struct finish_tasklet;   struct timer_list ignore_timer; } ;     1     long int __builtin_expect(long, long);    34     extern struct module __this_module;   204     bool  test_and_set_bit(long nr, volatile unsigned long *addr);   163     int printk(const char *, ...);    55     void __dynamic_pr_debug(struct _ddebug *, const char *, ...);     8     void ldv_dma_map_page();    71     void warn_slowpath_null(const char *, const int);     7     extern unsigned long page_offset_base;     9     extern unsigned long vmemmap_base;    23     unsigned long int __phys_addr(unsigned long);    32     void * __memcpy(void *, const void *, size_t );    93     void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);    22     void _raw_spin_lock(raw_spinlock_t *);    30     void _raw_spin_lock_bh(raw_spinlock_t *);    34     unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *);    41     void _raw_spin_unlock(raw_spinlock_t *);    42     void _raw_spin_unlock_bh(raw_spinlock_t *);    45     void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);   289     raw_spinlock_t * spinlock_check(spinlock_t *lock);   300     void spin_lock(spinlock_t *lock);   305     void spin_lock_bh(spinlock_t *lock);   345     void spin_unlock(spinlock_t *lock);   350     void spin_unlock_bh(spinlock_t *lock);   360     void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);    78     extern volatile unsigned long jiffies;   296     unsigned long int __msecs_to_jiffies(const unsigned int);   358     unsigned long int msecs_to_jiffies(const unsigned int m);    95     void init_timer_key(struct timer_list *, unsigned int, const char *, struct lock_class_key *);   191     int mod_timer(struct timer_list *, unsigned long);   245     int del_timer_sync(struct timer_list *);   166     extern struct resource ioport_resource;   225     struct resource * __request_region(struct resource *, resource_size_t , resource_size_t , const char *, int);   234     void __release_region(struct resource *, resource_size_t , resource_size_t );   316     void outb(unsigned char value, int port);   316     unsigned char inb(int port);    87     const char * kobject_name(const struct kobject *kobj);   868     const char * dev_name(const struct device *dev);   915     void * dev_get_drvdata(const struct device *dev);   920     void dev_set_drvdata(struct device *dev, void *data);    46     void platform_device_unregister(struct platform_device *);   168     struct platform_device * platform_device_alloc(const char *, int);   176     int platform_device_add(struct platform_device *);   178     void platform_device_put(struct platform_device *);   199     int __platform_driver_register(struct platform_driver *, struct module *);   201     void platform_driver_unregister(struct platform_driver *);   211     void * platform_get_drvdata(const struct platform_device *pdev);   139     int request_threaded_irq(unsigned int, irqreturn_t  (*)(int, void *), irqreturn_t  (*)(int, void *), unsigned long, const char *, void *);   144     int request_irq(unsigned int irq, irqreturn_t  (*handler)(int, void *), unsigned long flags, const char *name, void *dev);   158     void free_irq(unsigned int, void *);   558     void __tasklet_schedule(struct tasklet_struct *);   560     void tasklet_schedule(struct tasklet_struct *t);   566     void __tasklet_hi_schedule(struct tasklet_struct *);   568     void tasklet_hi_schedule(struct tasklet_struct *t);   608     void tasklet_kill(struct tasklet_struct *);   610     void tasklet_init(struct tasklet_struct *, void (*)(unsigned long), unsigned long);    37     void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool );    44     void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool );  1003     void * lowmem_page_address(const struct page *page);   120     struct page * sg_page(struct scatterlist *sg);   239     void * sg_virt(struct scatterlist *sg);   131     void kmemcheck_mark_initialized(void *address, unsigned int n);   136     int valid_dma_direction(int dma_direction);    28     extern struct dma_map_ops *dma_ops;    30     struct dma_map_ops * get_dma_ops(struct device *dev);   180     dma_addr_t  ldv_dma_map_single_attrs_5(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs);   180     dma_addr_t  dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs);   203     void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs);    10     void __const_udelay(unsigned long);    26     struct resource * pnp_get_resource(struct pnp_dev *, unsigned long, unsigned int);    36     int pnp_resource_valid(struct resource *res);    58     resource_size_t  pnp_port_start(struct pnp_dev *dev, unsigned int bar);   149     resource_size_t  pnp_irq(struct pnp_dev *dev, unsigned int bar);   173     resource_size_t  pnp_dma(struct pnp_dev *dev, unsigned int bar);   191     int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar);   479     int pnp_register_driver(struct pnp_driver *);   480     void pnp_unregister_driver(struct pnp_driver *);   403     struct mmc_host * mmc_alloc_host(int, struct device *);   404     int mmc_add_host(struct mmc_host *);   405     void mmc_remove_host(struct mmc_host *);   406     void mmc_free_host(struct mmc_host *);   409     void * mmc_priv(struct mmc_host *host);   423     void mmc_detect_change(struct mmc_host *, unsigned long);   424     void mmc_request_done(struct mmc_host *, struct mmc_request *);   154     void kfree(const void *);   330     void * __kmalloc(size_t , gfp_t );   478     void * kmalloc(size_t size, gfp_t flags);   150     extern struct spinlock dma_spin_lock;   152     unsigned long int claim_dma_lock();   159     void release_dma_lock(unsigned long flags);   166     void enable_dma(unsigned int dmanr);   174     void disable_dma(unsigned int dmanr);   189     void clear_dma_ff(unsigned int dmanr);   198     void set_dma_mode(unsigned int dmanr, char mode);   211     void set_dma_page(unsigned int dmanr, char pagenr);   242     void set_dma_addr(unsigned int dmanr, unsigned int a);   263     void set_dma_count(unsigned int dmanr, unsigned int count);   287     int get_dma_residue(unsigned int dmanr);   305     int request_dma(unsigned int, const char *);   306     void free_dma(unsigned int);    58     const struct pnp_device_id pnp_dev_table[3U] = { { { 'W', 'E', 'C', '0', '5', '1', '7', '\x0' }, 0UL }, { { 'W', 'E', 'C', '0', '5', '1', '8', '\x0' }, 0UL }, { { '\x0' }, 0UL } };    64     const struct pnp_device_id __mod_pnp__pnp_dev_table_device_table[3U] = {  };    68     const int config_ports[2U] = { 46, 78 };    69     const int unlock_codes[2U] = { 131, 135 };    71     const int valid_ids[1U] = { 28946 };    76     unsigned int param_nopnp = 0U;    80     unsigned int param_io = 584U;    81     unsigned int param_irq = 6U;    82     int param_dma = 2;    88     void wbsd_unlock_config(struct wbsd_host *host);    96     void wbsd_lock_config(struct wbsd_host *host);   103     void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value);   111     u8  wbsd_read_config(struct wbsd_host *host, u8 reg);   119     void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value);   125     u8  wbsd_read_index(struct wbsd_host *host, u8 index);   135     void wbsd_init_device(struct wbsd_host *host);   194     void wbsd_reset(struct wbsd_host *host);   208     void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq);   241     void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data);   253     int wbsd_next_sg(struct wbsd_host *host);   272     char * wbsd_sg_to_buffer(struct wbsd_host *host);   277     void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data);   294     void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data);   315     void wbsd_get_short_reply(struct wbsd_host *host, struct mmc_command *cmd);   333     void wbsd_get_long_reply(struct wbsd_host *host, struct mmc_command *cmd);   358     void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd);   418     void wbsd_empty_fifo(struct wbsd_host *host);   480     void wbsd_fill_fifo(struct wbsd_host *host);   542     void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data);   677     void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data);   755     void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq);   851     void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);   924     int wbsd_get_ro(struct mmc_host *mmc);   946     const struct mmc_host_ops wbsd_ops = { 0, 0, &wbsd_request, &wbsd_set_ios, &wbsd_get_ro, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };   962     void wbsd_reset_ignore(unsigned long data);   987     struct mmc_data * wbsd_get_data(struct wbsd_host *host);  1004     void wbsd_tasklet_card(unsigned long param);  1053     void wbsd_tasklet_fifo(unsigned long param);  1084     void wbsd_tasklet_crc(unsigned long param);  1108     void wbsd_tasklet_timeout(unsigned long param);  1132     void wbsd_tasklet_finish(unsigned long param);  1157     irqreturn_t  wbsd_irq(int irq, void *dev_id);  1199     int wbsd_alloc_mmc(struct device *dev);  1268     void wbsd_free_mmc(struct device *dev);  1291     int wbsd_scan(struct wbsd_host *host);  1347     int wbsd_request_region(struct wbsd_host *host, int base);  1360     void wbsd_release_regions(struct wbsd_host *host);  1377     void wbsd_request_dma(struct wbsd_host *host, int dma);  1436     void wbsd_release_dma(struct wbsd_host *host);  1455     int wbsd_request_irq(struct wbsd_host *host, int irq);  1485     void wbsd_release_irq(struct wbsd_host *host);  1505     int wbsd_request_resources(struct wbsd_host *host, int base, int irq, int dma);  1536     void wbsd_release_resources(struct wbsd_host *host);  1547     void wbsd_chip_config(struct wbsd_host *host);  1591     int wbsd_chip_validate(struct wbsd_host *host);  1631     void wbsd_chip_poweroff(struct wbsd_host *host);  1647     int wbsd_init(struct device *dev, int base, int irq, int dma, int pnp);  1733     void wbsd_shutdown(struct device *dev, int pnp);  1760     int wbsd_probe(struct platform_device *dev);  1766     int wbsd_remove(struct platform_device *dev);  1780     int wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id);  1799     void wbsd_pnp_remove(struct pnp_dev *dev);  1812     int wbsd_platform_suspend(struct platform_device *dev, pm_message_t state);  1829     int wbsd_platform_resume(struct platform_device *dev);  1854     int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state);  1865     int wbsd_pnp_resume(struct pnp_dev *pnp_dev);  1908     struct platform_device *wbsd_device = 0;  1910     struct platform_driver wbsd_driver = { &wbsd_probe, &wbsd_remove, 0, &wbsd_platform_suspend, &wbsd_platform_resume, { "wbsd", 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 0, (_Bool)0 };  1923     struct pnp_driver wbsd_pnp_driver = { (char *)"wbsd", (const struct pnp_device_id *)(&pnp_dev_table), 0U, &wbsd_pnp_probe, &wbsd_pnp_remove, 0, &wbsd_pnp_suspend, &wbsd_pnp_resume, { 0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } };  1939     int wbsd_drv_init();  1978     void wbsd_drv_exit();  2032     void ldv_check_final_state();  2035     void ldv_check_return_value(int);  2038     void ldv_check_return_value_probe(int);  2041     void ldv_initialize();  2044     void ldv_handler_precall();  2047     int nondet_int();  2050     int LDV_IN_INTERRUPT = 0;  2053     void ldv_main0_sequence_infinite_withcheck_stateful();    10     void ldv_error();     7     bool  ldv_is_err(const void *ptr);    14     void * ldv_err_ptr(long error);    21     long int ldv_ptr_err(const void *ptr);    28     bool  ldv_is_err_or_null(const void *ptr);     5     int LDV_DMA_MAP_CALLS = 0;    16     void ldv_dma_mapping_error();           return ;         }        {      2055     struct mmc_host *var_group1;  2056     struct mmc_request *var_group2;  2057     struct mmc_ios *var_group3;  2058     struct platform_device *var_group4;  2059     int res_wbsd_probe_48;  2060     struct pm_message var_wbsd_platform_suspend_52_p1;  2061     struct pnp_dev *var_group5;  2062     const struct pnp_device_id *var_wbsd_pnp_probe_50_p1;  2063     int res_wbsd_pnp_probe_50;  2064     struct pm_message var_wbsd_pnp_suspend_54_p1;  2065     int var_wbsd_irq_31_p0;  2066     void *var_wbsd_irq_31_p1;  2067     unsigned long var_wbsd_reset_ignore_24_p0;  2068     int ldv_s_wbsd_driver_platform_driver;  2069     int ldv_s_wbsd_pnp_driver_pnp_driver;  2070     int tmp;  2071     int tmp___0;  2072     int tmp___1;  2675     ldv_s_wbsd_driver_platform_driver = 0;  2677     ldv_s_wbsd_pnp_driver_pnp_driver = 0;  2619     LDV_IN_INTERRUPT = 1;  2628     ldv_initialize() { /* Function call is skipped due to function is undefined */}  2662     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}           {  1941       int result;  1943       printk("\016wbsd: Winbond W83L51xD SD/MMC card interface driver\n") { /* Function call is skipped due to function is undefined */}  1945       printk("\016wbsd: Copyright(c) Pierre Ossman\n") { /* Function call is skipped due to function is undefined */}  1950       result = pnp_register_driver(&wbsd_pnp_driver) { /* Function call is skipped due to function is undefined */}           } 2685     goto ldv_35745;  2685     tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */}  2689     goto ldv_35744;  2686     ldv_35744:;  2690     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}  2690     switch (tmp___0);           {  1762       int tmp;             {           } 1649         struct wbsd_host *host;  1650         struct mmc_host *mmc;  1651         int ret;  1652         void *tmp;  1653         void *tmp___0;  1654         int tmp___1;  1655         unsigned long __ms;  1656         unsigned long tmp___2;  1657         const char *tmp___3;  1650         host = (struct wbsd_host *)0;  1651         mmc = (struct mmc_host *)0;               {  1201           struct mmc_host *mmc;  1202           struct wbsd_host *host;  1203           void *tmp;  1204           struct lock_class_key __key;  1205           struct lock_class_key __key___0;  1207           mmc = mmc_alloc_host(496, dev) { /* Function call is skipped due to function is undefined */}                 {   411             return (void *)(&(host->private));;                 } 1211           host = (struct wbsd_host *)tmp;  1212           host->mmc = mmc;  1214           host->dma = -1;  1219           mmc->ops = &wbsd_ops;  1220           mmc->f_min = 375000U;  1221           mmc->f_max = 24000000U;  1222           mmc->ocr_avail = 3145728U;  1223           mmc->caps = 1U;                 {   291             return &(lock->__annonCompField20.rlock);;                 } 1225           __raw_spin_lock_init(&(host->lock.__annonCompField20.rlock), "&(&host->lock)->rlock", &__key) { /* Function call is skipped due to function is undefined */}  1230           init_timer_key(&(host->ignore_timer), 0U, "(&host->ignore_timer)", &__key___0) { /* Function call is skipped due to function is undefined */}  1231           host->ignore_timer.data = (unsigned long)host;  1232           host->ignore_timer.function = &wbsd_reset_ignore;  1238           mmc->max_segs = 128U;  1243           mmc->max_req_size = 65536U;  1249           mmc->max_seg_size = mmc->max_req_size;  1255           mmc->max_blk_size = 4087U;  1261           mmc->max_blk_count = mmc->max_req_size;                 {               }  922             dev->driver_data = data;   923             return ;;                 }              {   917           void *__CPAchecker_TMP_0 = (void *)(dev->driver_data);   917           return __CPAchecker_TMP_0;;               } 1658         mmc = (struct mmc_host *)tmp;               {   411           return (void *)(&(host->private));;               } 1659         host = (struct wbsd_host *)tmp___0;               {  1293           int i;  1294           int j;  1295           int k;  1296           int id;  1297           struct resource *tmp;  1298           unsigned char tmp___0;  1299           unsigned char tmp___1;  1300           struct _ddebug descriptor;  1301           long tmp___2;  1300           i = 0;  1300           goto ldv_35470;  1302           goto ldv_35469;  1301           ldv_35469:;  1301           tmp = __request_region(&ioport_resource, (resource_size_t )(config_ports[i]), 2ULL, "wbsd", 0) { /* Function call is skipped due to function is undefined */}  1304           j = 0;  1304           goto ldv_35467;  1306           goto ldv_35466;  1305           ldv_35466:;  1305           id = 65535;  1307           host->config = config_ports[i];  1308           host->unlock_code = (u8 )(unlock_codes[j]);                 {    90             long tmp;    90             tmp = __builtin_expect((host->config) == 0, 0L) { /* Function call is skipped due to function is undefined */}    92             int __CPAchecker_TMP_0 = (int)(host->unlock_code);                   { 316 Ignored inline assembler code   317               return ;;                   }   93             int __CPAchecker_TMP_1 = (int)(host->unlock_code);                   {                 }316 Ignored inline assembler code   317               return ;;                   }                { 316 Ignored inline assembler code   317             return ;;                 }                {   318             unsigned char value;   316             Ignored inline assembler code  316             return value;;                 } 1313           id = ((int)tmp___0) << 8;                 { 316 Ignored inline assembler code   317             return ;;                 }                {   318             unsigned char value;   316             Ignored inline assembler code  316             return value;;                 } 1316           id = ((int)tmp___1) | id;                 {    98             long tmp;    98             tmp = __builtin_expect((host->config) == 0, 0L) { /* Function call is skipped due to function is undefined */}                   {                 }316 Ignored inline assembler code   317               return ;;                   } 1320           k = 0;  1320           goto ldv_35462;  1322           goto ldv_35461;  1321           ldv_35461:;  1322           host->chip_id = id;               }              {  1507           int ret;                 {  1349             struct resource *tmp;  1352             tmp = __request_region(&ioport_resource, (resource_size_t )base, 8ULL, "wbsd", 0) { /* Function call is skipped due to function is undefined */}  1355             host->base = base;                 }                {  1457             int ret;  1462             tasklet_init(&(host->card_tasklet), &wbsd_tasklet_card, (unsigned long)host) { /* Function call is skipped due to function is undefined */}  1464             tasklet_init(&(host->fifo_tasklet), &wbsd_tasklet_fifo, (unsigned long)host) { /* Function call is skipped due to function is undefined */}  1466             tasklet_init(&(host->crc_tasklet), &wbsd_tasklet_crc, (unsigned long)host) { /* Function call is skipped due to function is undefined */}  1468             tasklet_init(&(host->timeout_tasklet), &wbsd_tasklet_timeout, (unsigned long)host) { /* Function call is skipped due to function is undefined */}  1470             tasklet_init(&(host->finish_tasklet), &wbsd_tasklet_finish, (unsigned long)host) { /* Function call is skipped due to function is undefined */}                   {   147               int tmp;   147               tmp = request_threaded_irq(irq, handler, (irqreturn_t  (*)(int, void *))0, flags, name, dev) { /* Function call is skipped due to function is undefined */}   147               return tmp;;                   } 1480             host->irq = irq;                 }                {               } 1379             int tmp;  1380             void *tmp___0;  1382             tmp = request_dma((unsigned int)dma, "wbsd") { /* Function call is skipped due to function is undefined */}                   {   480               void *tmp___2;   495               tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}   495               return tmp___2;;                   } 1389             host->dma_buffer = (char *)tmp___0;  1391             unsigned long __CPAchecker_TMP_0 = (unsigned long)(host->dma_buffer);  1397             void *__CPAchecker_TMP_1 = (void *)(host->dma_buffer);                   {    38               unsigned long long tmp;                     {                   }  184                 struct dma_map_ops *ops;   185                 struct dma_map_ops *tmp;   186                 unsigned long long addr;   187                 int tmp___0;   188                 long tmp___1;   189                 unsigned long tmp___2;   190                 unsigned long tmp___3;                       {    32                   long tmp;    35                   tmp = __builtin_expect(((unsigned long)dev) == ((unsigned long)((struct device *)0)), 0L) { /* Function call is skipped due to function is undefined */}    35                   assume(!(tmp != 0L));    35                   assume(!(((unsigned long)(dev->archdata.dma_ops)) == ((unsigned long)((struct dma_map_ops *)0))));    38                   return dev->archdata.dma_ops;;                       }  185                 ops = tmp;                       {   133                   return ;;                       }                      {   138                   int __CPAchecker_TMP_0;   138                   assume(dma_direction == 0);                         __CPAchecker_TMP_0 = 1;   138                   return __CPAchecker_TMP_0;;                       }  189                 tmp___1 = __builtin_expect(tmp___0 == 0, 0L) { /* Function call is skipped due to function is undefined */}   189                 assume(!(tmp___1 != 0L));   190                 tmp___2 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */}   190                 addr = (*(ops->map_page))(dev, (struct page *)((tmp___2 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, dir, attrs);   193                 tmp___3 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */}   193                 debug_dma_map_page(dev, (struct page *)((tmp___3 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, (int)dir, addr, 1) { /* Function call is skipped due to function is undefined */}   196                 return addr;;                     } 1411             host->dma = dma;                 }              {                 {    90             long tmp;    90             tmp = __builtin_expect((host->config) == 0, 0L) { /* Function call is skipped due to function is undefined */}    92             int __CPAchecker_TMP_0 = (int)(host->unlock_code);                   { 316 Ignored inline assembler code   317               return ;;                   }   93             int __CPAchecker_TMP_1 = (int)(host->unlock_code);                   {                 }316 Ignored inline assembler code   317               return ;;                   }                {   105             long tmp;   105             tmp = __builtin_expect((host->config) == 0, 0L) { /* Function call is skipped due to function is undefined */}                   { 316 Ignored inline assembler code   317               return ;;                   }                  {                 }316 Ignored inline assembler code   317               return ;;                   }                {   105             long tmp;   105             tmp = __builtin_expect((host->config) == 0, 0L) { /* Function call is skipped due to function is undefined */}                   { 316 Ignored inline assembler code   317               return ;;                   }                  {                 }316 Ignored inline assembler code   317               return ;;                   }                {   105             long tmp;   105             tmp = __builtin_expect((host->config) == 0, 0L) { /* Function call is skipped due to function is undefined */}                   { 316 Ignored inline assembler code   317               return ;;                   }                  {                 }316 Ignored inline assembler code   317               return ;;                   }                {   105             long tmp;   105             tmp = __builtin_expect((host->config) == 0, 0L) { /* Function call is skipped due to function is undefined */}                   { 316 Ignored inline assembler code   317               return ;;                   }                  {                 }316 Ignored inline assembler code   317               return ;;                   }                {   105             long tmp;   105             tmp = __builtin_expect((host->config) == 0, 0L) { /* Function call is skipped due to function is undefined */}                   { 316 Ignored inline assembler code   317               return ;;                   }                  {                 }316 Ignored inline assembler code   317               return ;;                   } 1571           u8 __CPAchecker_TMP_0 = (u8 )(host->base);                 {   105             long tmp;   105             tmp = __builtin_expect((host->config) == 0, 0L) { /* Function call is skipped due to function is undefined */}                   { 316 Ignored inline assembler code   317               return ;;                   }                  {                 }316 Ignored inline assembler code   317               return ;;                   } 1573           u8 __CPAchecker_TMP_1 = (u8 )(host->irq);                 {   105             long tmp;   105             tmp = __builtin_expect((host->config) == 0, 0L) { /* Function call is skipped due to function is undefined */}                   { 316 Ignored inline assembler code   317               return ;;                   }                  {                 }316 Ignored inline assembler code   317               return ;;                   } 1576           u8 __CPAchecker_TMP_2 = (u8 )(host->dma);                 {   105             long tmp;   105             tmp = __builtin_expect((host->config) == 0, 0L) { /* Function call is skipped due to function is undefined */}                   { 316 Ignored inline assembler code   317               return ;;                   }                  {                 }316 Ignored inline assembler code   317               return ;;                   }                {   105             long tmp;   105             tmp = __builtin_expect((host->config) == 0, 0L) { /* Function call is skipped due to function is undefined */}                   { 316 Ignored inline assembler code   317               return ;;                   }                  {                 }316 Ignored inline assembler code   317               return ;;                   }                {   105             long tmp;   105             tmp = __builtin_expect((host->config) == 0, 0L) { /* Function call is skipped due to function is undefined */}                   { 316 Ignored inline assembler code   317               return ;;                   }                  {                 }316 Ignored inline assembler code   317               return ;;                   }                {               }   98             long tmp;    98             tmp = __builtin_expect((host->config) == 0, 0L) { /* Function call is skipped due to function is undefined */}                   {                 }316 Ignored inline assembler code   317               return ;;                   } 1709         __const_udelay(21475000UL) { /* Function call is skipped due to function is undefined */}               {   137           unsigned char setup;   138           unsigned char ier;   139           unsigned char tmp;                 {   127             unsigned char tmp;                   { 316 Ignored inline assembler code   317               return ;;                   }                  {                 }  318               unsigned char value;   316               Ignored inline assembler code  316               return value;;                   }  143           setup = (u8 )(((unsigned int)setup) | 6U);                 {                   { 316 Ignored inline assembler code   317               return ;;                   }                  {                 }316 Ignored inline assembler code   317               return ;;                   }  149           setup = ((unsigned int)setup) & 247U;                 {                   { 316 Ignored inline assembler code   317               return ;;                   }                  {                 }316 Ignored inline assembler code   317               return ;;                   }  151           host->flags = (host->flags) & -3;                 {   127             unsigned char tmp;                   { 316 Ignored inline assembler code   317               return ;;                   }                  {                 }  318               unsigned char value;   316               Ignored inline assembler code  316               return value;;                   }                { 316 Ignored inline assembler code   317             return ;;                 }                {                   { 316 Ignored inline assembler code   317               return ;;                   }                  {                 }316 Ignored inline assembler code   317               return ;;                   }                {   318             unsigned char value;   316             Ignored inline assembler code  316             return value;;                 }  172           host->flags = (host->flags) | 1;   179           ier = 0U;   180           ier = (u8 )(((unsigned int)ier) | 64U);   181           ier = (u8 )(((unsigned int)ier) | 32U);   182           ier = (u8 )(((unsigned int)ier) | 16U);   183           ier = (u8 )(((unsigned int)ier) | 8U);   184           ier = (u8 )(((unsigned int)ier) | 1U);                 { 316 Ignored inline assembler code   317             return ;;                 }                {               }  318             unsigned char value;   316             Ignored inline assembler code  316             return value;;                 } 1716         mmc_add_host(mmc) { /* Function call is skipped due to function is undefined */}               {   870           const char *tmp;   871           unsigned long __CPAchecker_TMP_0 = (unsigned long)(dev->init_name);   871           assume(!(__CPAchecker_TMP_0 != ((unsigned long)((const char *)0))));                 {    89             const char *__CPAchecker_TMP_0 = (const char *)(kobj->name);    89             return __CPAchecker_TMP_0;;                 }  874           return tmp;;               } 1718         printk("\016%s: W83L51xD", tmp___3) { /* Function call is skipped due to function is undefined */}  1721         printk(" at 0x%x irq %d", host->base, host->irq) { /* Function call is skipped due to function is undefined */}  1723         printk(" dma %d", host->dma) { /* Function call is skipped due to function is undefined */}  1728         printk("\n") { /* Function call is skipped due to function is undefined */}             } 2879     ldv_check_return_value(res_wbsd_probe_48) { /* Function call is skipped due to function is undefined */}  2880     ldv_check_return_value_probe(res_wbsd_probe_48) { /* Function call is skipped due to function is undefined */}  2906     ldv_s_wbsd_driver_platform_driver = ldv_s_wbsd_driver_platform_driver + 1;  2912     goto ldv_35729;  3413     ldv_35729:;  3414     ldv_35745:;  2685     tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */}  2689     goto ldv_35744;  2686     ldv_35744:;  2690     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}  2690     switch (tmp___0);           {         } 1782       int io;  1783       int irq;  1784       int dma;  1785       unsigned long long tmp;  1786       unsigned long long tmp___0;  1787       unsigned long long tmp___1;  1788       int tmp___2;  1789       struct _ddebug descriptor;  1790       long tmp___3;  1791       int tmp___4;             {    60         struct resource *res;    61         struct resource *tmp;    62         int tmp___0;    61         tmp = pnp_get_resource(dev, 256UL, bar) { /* Function call is skipped due to function is undefined */}    61         res = tmp;               {    38           assume(!(((unsigned long)res) != ((unsigned long)((struct resource *)0))));    40           return 0;;               }   63         assume(!(tmp___0 != 0));    65         return 0ULL;;             } 1787       io = (int)tmp;             {   151         struct resource *res;   152         struct resource *tmp;   153         int tmp___0;   151         tmp = pnp_get_resource(dev, 1024UL, bar) { /* Function call is skipped due to function is undefined */}   151         res = tmp;               {    38           assume(!(((unsigned long)res) != ((unsigned long)((struct resource *)0))));    40           return 0;;               }  153         assume(!(tmp___0 != 0));   155         return 18446744073709551615ULL;;             } 1788       irq = (int)tmp___0;             {   193         struct resource *tmp;   194         int tmp___0;   193         tmp = pnp_get_resource(dev, 2048UL, bar) { /* Function call is skipped due to function is undefined */}               {    38           assume(((unsigned long)res) != ((unsigned long)((struct resource *)0)));    39           return 1;;               }  193         return tmp___0;;             }            {   175         struct resource *res;   176         struct resource *tmp;   177         int tmp___0;   175         tmp = pnp_get_resource(dev, 2048UL, bar) { /* Function call is skipped due to function is undefined */}   175         res = tmp;               {    38           assume(((unsigned long)res) != ((unsigned long)((struct resource *)0)));    39           return 1;;               }  177         assume(tmp___0 != 0);   178         return res->start;;             } 1790       dma = (int)tmp___1;  1794       descriptor.modname = "wbsd";  1794       descriptor.function = "wbsd_pnp_probe";  1794       descriptor.filename = "/home/vitaly/ldv-launches/work/current--X--drivers--X--defaultlinux-4.9-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.9-rc1.tar.xz/csd_deg_dscv/2264/dscv_tempdir/dscv/ri/331_1a/drivers/mmc/host/wbsd.c";  1794       descriptor.format = "wbsd [%s()]: PnP resources: port %3x irq %d dma %d\n";  1794       descriptor.lineno = 1794U;  1794       descriptor.flags = 0U;  1794       tmp___3 = __builtin_expect(((long)(descriptor.flags)) & 1L, 0L) { /* Function call is skipped due to function is undefined */}  1794       __dynamic_pr_debug(&descriptor, "wbsd [%s()]: PnP resources: port %3x irq %d dma %d\n", "wbsd_pnp_probe", io, irq, dma) { /* Function call is skipped due to function is undefined */}             {           } 1649         struct wbsd_host *host;  1650         struct mmc_host *mmc;  1651         int ret;  1652         void *tmp;  1653         void *tmp___0;  1654         int tmp___1;  1655         unsigned long __ms;  1656         unsigned long tmp___2;  1657         const char *tmp___3;  1650         host = (struct wbsd_host *)0;  1651         mmc = (struct mmc_host *)0;               {  1201           struct mmc_host *mmc;  1202           struct wbsd_host *host;  1203           void *tmp;  1204           struct lock_class_key __key;  1205           struct lock_class_key __key___0;  1207           mmc = mmc_alloc_host(496, dev) { /* Function call is skipped due to function is undefined */}                 {   411             return (void *)(&(host->private));;                 } 1211           host = (struct wbsd_host *)tmp;  1212           host->mmc = mmc;  1214           host->dma = -1;  1219           mmc->ops = &wbsd_ops;  1220           mmc->f_min = 375000U;  1221           mmc->f_max = 24000000U;  1222           mmc->ocr_avail = 3145728U;  1223           mmc->caps = 1U;                 {   291             return &(lock->__annonCompField20.rlock);;                 } 1225           __raw_spin_lock_init(&(host->lock.__annonCompField20.rlock), "&(&host->lock)->rlock", &__key) { /* Function call is skipped due to function is undefined */}  1230           init_timer_key(&(host->ignore_timer), 0U, "(&host->ignore_timer)", &__key___0) { /* Function call is skipped due to function is undefined */}  1231           host->ignore_timer.data = (unsigned long)host;  1232           host->ignore_timer.function = &wbsd_reset_ignore;  1238           mmc->max_segs = 128U;  1243           mmc->max_req_size = 65536U;  1249           mmc->max_seg_size = mmc->max_req_size;  1255           mmc->max_blk_size = 4087U;  1261           mmc->max_blk_count = mmc->max_req_size;                 {               }  922             dev->driver_data = data;   923             return ;;                 }              {   917           void *__CPAchecker_TMP_0 = (void *)(dev->driver_data);   917           return __CPAchecker_TMP_0;;               } 1658         mmc = (struct mmc_host *)tmp;               {   411           return (void *)(&(host->private));;               } 1659         host = (struct wbsd_host *)tmp___0;               {  1293           int i;  1294           int j;  1295           int k;  1296           int id;  1297           struct resource *tmp;  1298           unsigned char tmp___0;  1299           unsigned char tmp___1;  1300           struct _ddebug descriptor;  1301           long tmp___2;  1300           i = 0;  1300           goto ldv_35470;  1302           goto ldv_35469;  1301           ldv_35469:;  1301           tmp = __request_region(&ioport_resource, (resource_size_t )(config_ports[i]), 2ULL, "wbsd", 0) { /* Function call is skipped due to function is undefined */}  1304           j = 0;  1304           goto ldv_35467;  1306           goto ldv_35466;  1305           ldv_35466:;  1305           id = 65535;  1307           host->config = config_ports[i];  1308           host->unlock_code = (u8 )(unlock_codes[j]);                 {    90             long tmp;    90             tmp = __builtin_expect((host->config) == 0, 0L) { /* Function call is skipped due to function is undefined */}    92             int __CPAchecker_TMP_0 = (int)(host->unlock_code);                   { 316 Ignored inline assembler code   317               return ;;                   }   93             int __CPAchecker_TMP_1 = (int)(host->unlock_code);                   {                 }316 Ignored inline assembler code   317               return ;;                   }                { 316 Ignored inline assembler code   317             return ;;                 }                {   318             unsigned char value;   316             Ignored inline assembler code  316             return value;;                 } 1313           id = ((int)tmp___0) << 8;                 { 316 Ignored inline assembler code   317             return ;;                 }                {   318             unsigned char value;   316             Ignored inline assembler code  316             return value;;                 } 1316           id = ((int)tmp___1) | id;                 {    98             long tmp;    98             tmp = __builtin_expect((host->config) == 0, 0L) { /* Function call is skipped due to function is undefined */}                   {                 }316 Ignored inline assembler code   317               return ;;                   } 1320           k = 0;  1320           goto ldv_35462;  1322           goto ldv_35461;  1321           ldv_35461:;  1322           host->chip_id = id;               }              {             } 1507           int ret;                 {  1349             struct resource *tmp;  1352             tmp = __request_region(&ioport_resource, (resource_size_t )base, 8ULL, "wbsd", 0) { /* Function call is skipped due to function is undefined */}  1355             host->base = base;                 }                {  1457             int ret;  1462             tasklet_init(&(host->card_tasklet), &wbsd_tasklet_card, (unsigned long)host) { /* Function call is skipped due to function is undefined */}  1464             tasklet_init(&(host->fifo_tasklet), &wbsd_tasklet_fifo, (unsigned long)host) { /* Function call is skipped due to function is undefined */}  1466             tasklet_init(&(host->crc_tasklet), &wbsd_tasklet_crc, (unsigned long)host) { /* Function call is skipped due to function is undefined */}  1468             tasklet_init(&(host->timeout_tasklet), &wbsd_tasklet_timeout, (unsigned long)host) { /* Function call is skipped due to function is undefined */}  1470             tasklet_init(&(host->finish_tasklet), &wbsd_tasklet_finish, (unsigned long)host) { /* Function call is skipped due to function is undefined */}                   {   147               int tmp;   147               tmp = request_threaded_irq(irq, handler, (irqreturn_t  (*)(int, void *))0, flags, name, dev) { /* Function call is skipped due to function is undefined */}   147               return tmp;;                   } 1480             host->irq = irq;                 }                {               } 1379             int tmp;  1380             void *tmp___0;  1382             tmp = request_dma((unsigned int)dma, "wbsd") { /* Function call is skipped due to function is undefined */}                   {   480               void *tmp___2;   495               tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}   495               return tmp___2;;                   } 1389             host->dma_buffer = (char *)tmp___0;  1391             unsigned long __CPAchecker_TMP_0 = (unsigned long)(host->dma_buffer);  1397             void *__CPAchecker_TMP_1 = (void *)(host->dma_buffer);                 } |              Source code         
     1 #ifndef _ASM_X86_DMA_MAPPING_H
    2 #define _ASM_X86_DMA_MAPPING_H
    3 
    4 /*
    5  * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
    6  * Documentation/DMA-API.txt for documentation.
    7  */
    8 
    9 #include <linux/kmemcheck.h>
   10 #include <linux/scatterlist.h>
   11 #include <linux/dma-debug.h>
   12 #include <asm/io.h>
   13 #include <asm/swiotlb.h>
   14 #include <linux/dma-contiguous.h>
   15 
   16 #ifdef CONFIG_ISA
   17 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
   18 #else
   19 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
   20 #endif
   21 
   22 #define DMA_ERROR_CODE	0
   23 
   24 extern int iommu_merge;
   25 extern struct device x86_dma_fallback_dev;
   26 extern int panic_on_overflow;
   27 
   28 extern struct dma_map_ops *dma_ops;
   29 
   30 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
   31 {
   32 #ifndef CONFIG_X86_DEV_DMA_OPS
   33 	return dma_ops;
   34 #else
   35 	if (unlikely(!dev) || !dev->archdata.dma_ops)
   36 		return dma_ops;
   37 	else
   38 		return dev->archdata.dma_ops;
   39 #endif
   40 }
   41 
   42 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
   43 #define arch_dma_alloc_attrs arch_dma_alloc_attrs
   44 
   45 #define HAVE_ARCH_DMA_SUPPORTED 1
   46 extern int dma_supported(struct device *hwdev, u64 mask);
   47 
   48 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
   49 					dma_addr_t *dma_addr, gfp_t flag,
   50 					unsigned long attrs);
   51 
   52 extern void dma_generic_free_coherent(struct device *dev, size_t size,
   53 				      void *vaddr, dma_addr_t dma_addr,
   54 				      unsigned long attrs);
   55 
   56 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
   57 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
   58 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
   59 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
   60 #else
   61 
   62 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
   63 {
   64 	if (!dev->dma_mask)
   65 		return 0;
   66 
   67 	return addr + size - 1 <= *dev->dma_mask;
   68 }
   69 
   70 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
   71 {
   72 	return paddr;
   73 }
   74 
   75 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
   76 {
   77 	return daddr;
   78 }
   79 #endif /* CONFIG_X86_DMA_REMAP */
   80 
   81 static inline void
   82 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
   83 	enum dma_data_direction dir)
   84 {
   85 	flush_write_buffers();
   86 }
   87 
   88 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
   89 						    gfp_t gfp)
   90 {
   91 	unsigned long dma_mask = 0;
   92 
   93 	dma_mask = dev->coherent_dma_mask;
   94 	if (!dma_mask)
   95 		dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
   96 
   97 	return dma_mask;
   98 }
   99 
  100 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
  101 {
  102 	unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
  103 
  104 	if (dma_mask <= DMA_BIT_MASK(24))
  105 		gfp |= GFP_DMA;
  106 #ifdef CONFIG_X86_64
  107 	if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
  108 		gfp |= GFP_DMA32;
  109 #endif
  110        return gfp;
  111 }
  112 
  113 #endif                 1 #ifndef _ASM_X86_IO_H
    2 #define _ASM_X86_IO_H
    3 
    4 /*
    5  * This file contains the definitions for the x86 IO instructions
    6  * inb/inw/inl/outb/outw/outl and the "string versions" of the same
    7  * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
    8  * versions of the single-IO instructions (inb_p/inw_p/..).
    9  *
   10  * This file is not meant to be obfuscating: it's just complicated
   11  * to (a) handle it all in a way that makes gcc able to optimize it
   12  * as well as possible and (b) trying to avoid writing the same thing
   13  * over and over again with slight variations and possibly making a
   14  * mistake somewhere.
   15  */
   16 
   17 /*
   18  * Thanks to James van Artsdalen for a better timing-fix than
   19  * the two short jumps: using outb's to a nonexistent port seems
   20  * to guarantee better timings even on fast machines.
   21  *
   22  * On the other hand, I'd like to be sure of a non-existent port:
   23  * I feel a bit unsafe about using 0x80 (should be safe, though)
   24  *
   25  *		Linus
   26  */
   27 
   28  /*
   29   *  Bit simplified and optimized by Jan Hubicka
   30   *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
   31   *
   32   *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
   33   *  isa_read[wl] and isa_write[wl] fixed
   34   *  - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
   35   */
   36 
   37 #define ARCH_HAS_IOREMAP_WC
   38 #define ARCH_HAS_IOREMAP_WT
   39 
   40 #include <linux/string.h>
   41 #include <linux/compiler.h>
   42 #include <asm/page.h>
   43 #include <asm/early_ioremap.h>
   44 #include <asm/pgtable_types.h>
   45 
   46 #define build_mmio_read(name, size, type, reg, barrier) \
   47 static inline type name(const volatile void __iomem *addr) \
   48 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \
   49 :"m" (*(volatile type __force *)addr) barrier); return ret; }
   50 
   51 #define build_mmio_write(name, size, type, reg, barrier) \
   52 static inline void name(type val, volatile void __iomem *addr) \
   53 { asm volatile("mov" size " %0,%1": :reg (val), \
   54 "m" (*(volatile type __force *)addr) barrier); }
   55 
   56 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
   57 build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
   58 build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
   59 
   60 build_mmio_read(__readb, "b", unsigned char, "=q", )
   61 build_mmio_read(__readw, "w", unsigned short, "=r", )
   62 build_mmio_read(__readl, "l", unsigned int, "=r", )
   63 
   64 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
   65 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
   66 build_mmio_write(writel, "l", unsigned int, "r", :"memory")
   67 
   68 build_mmio_write(__writeb, "b", unsigned char, "q", )
   69 build_mmio_write(__writew, "w", unsigned short, "r", )
   70 build_mmio_write(__writel, "l", unsigned int, "r", )
   71 
   72 #define readb_relaxed(a) __readb(a)
   73 #define readw_relaxed(a) __readw(a)
   74 #define readl_relaxed(a) __readl(a)
   75 #define __raw_readb __readb
   76 #define __raw_readw __readw
   77 #define __raw_readl __readl
   78 
   79 #define writeb_relaxed(v, a) __writeb(v, a)
   80 #define writew_relaxed(v, a) __writew(v, a)
   81 #define writel_relaxed(v, a) __writel(v, a)
   82 #define __raw_writeb __writeb
   83 #define __raw_writew __writew
   84 #define __raw_writel __writel
   85 
   86 #define mmiowb() barrier()
   87 
   88 #ifdef CONFIG_X86_64
   89 
   90 build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
   91 build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
   92 
   93 #define readq_relaxed(a)	readq(a)
   94 #define writeq_relaxed(v, a)	writeq(v, a)
   95 
   96 #define __raw_readq(a)		readq(a)
   97 #define __raw_writeq(val, addr)	writeq(val, addr)
   98 
   99 /* Let people know that we have them */
  100 #define readq			readq
  101 #define writeq			writeq
  102 
  103 #endif
  104 
  105 /**
  106  *	virt_to_phys	-	map virtual addresses to physical
  107  *	@address: address to remap
  108  *
  109  *	The returned physical address is the physical (CPU) mapping for
  110  *	the memory address given. It is only valid to use this function on
  111  *	addresses directly mapped or allocated via kmalloc.
  112  *
  113  *	This function does not give bus mappings for DMA transfers. In
  114  *	almost all conceivable cases a device driver should not be using
  115  *	this function
  116  */
  117 
  118 static inline phys_addr_t virt_to_phys(volatile void *address)
  119 {
  120 	return __pa(address);
  121 }
  122 
  123 /**
  124  *	phys_to_virt	-	map physical address to virtual
  125  *	@address: address to remap
  126  *
  127  *	The returned virtual address is a current CPU mapping for
  128  *	the memory address given. It is only valid to use this function on
  129  *	addresses that have a kernel mapping
  130  *
  131  *	This function does not handle bus mappings for DMA transfers. In
  132  *	almost all conceivable cases a device driver should not be using
  133  *	this function
  134  */
  135 
  136 static inline void *phys_to_virt(phys_addr_t address)
  137 {
  138 	return __va(address);
  139 }
  140 
  141 /*
  142  * Change "struct page" to physical address.
  143  */
  144 #define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
  145 
  146 /*
  147  * ISA I/O bus memory addresses are 1:1 with the physical address.
  148  * However, we truncate the address to unsigned int to avoid undesirable
  149  * promitions in legacy drivers.
  150  */
  151 static inline unsigned int isa_virt_to_bus(volatile void *address)
  152 {
  153 	return (unsigned int)virt_to_phys(address);
  154 }
  155 #define isa_page_to_bus(page)	((unsigned int)page_to_phys(page))
  156 #define isa_bus_to_virt		phys_to_virt
  157 
  158 /*
  159  * However PCI ones are not necessarily 1:1 and therefore these interfaces
  160  * are forbidden in portable PCI drivers.
  161  *
  162  * Allow them on x86 for legacy drivers, though.
  163  */
  164 #define virt_to_bus virt_to_phys
  165 #define bus_to_virt phys_to_virt
  166 
  167 /**
  168  * ioremap     -   map bus memory into CPU space
  169  * @offset:    bus address of the memory
  170  * @size:      size of the resource to map
  171  *
  172  * ioremap performs a platform specific sequence of operations to
  173  * make bus memory CPU accessible via the readb/readw/readl/writeb/
  174  * writew/writel functions and the other mmio helpers. The returned
  175  * address is not guaranteed to be usable directly as a virtual
  176  * address.
  177  *
  178  * If the area you are trying to map is a PCI BAR you should have a
  179  * look at pci_iomap().
  180  */
  181 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
  182 extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
  183 #define ioremap_uc ioremap_uc
  184 
  185 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
  186 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
  187 				unsigned long prot_val);
  188 
  189 /*
  190  * The default ioremap() behavior is non-cached:
  191  */
  192 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
  193 {
  194 	return ioremap_nocache(offset, size);
  195 }
  196 
  197 extern void iounmap(volatile void __iomem *addr);
  198 
  199 extern void set_iounmap_nonlazy(void);
  200 
  201 #ifdef __KERNEL__
  202 
  203 #include <asm-generic/iomap.h>
  204 
  205 /*
  206  * Convert a virtual cached pointer to an uncached pointer
  207  */
  208 #define xlate_dev_kmem_ptr(p)	p
  209 
  210 static inline void
  211 memset_io(volatile void __iomem *addr, unsigned char val, size_t count)
  212 {
  213 	memset((void __force *)addr, val, count);
  214 }
  215 
  216 static inline void
  217 memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
  218 {
  219 	memcpy(dst, (const void __force *)src, count);
  220 }
  221 
  222 static inline void
  223 memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
  224 {
  225 	memcpy((void __force *)dst, src, count);
  226 }
  227 
  228 /*
  229  * ISA space is 'always mapped' on a typical x86 system, no need to
  230  * explicitly ioremap() it. The fact that the ISA IO space is mapped
  231  * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
  232  * are physical addresses. The following constant pointer can be
  233  * used as the IO-area pointer (it can be iounmapped as well, so the
  234  * analogy with PCI is quite large):
  235  */
  236 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
  237 
  238 /*
  239  *	Cache management
  240  *
  241  *	This needed for two cases
  242  *	1. Out of order aware processors
  243  *	2. Accidentally out of order processors (PPro errata #51)
  244  */
  245 
  246 static inline void flush_write_buffers(void)
  247 {
  248 #if defined(CONFIG_X86_PPRO_FENCE)
  249 	asm volatile("lock; addl $0,0(%%esp)": : :"memory");
  250 #endif
  251 }
  252 
  253 #endif /* __KERNEL__ */
  254 
  255 extern void native_io_delay(void);
  256 
  257 extern int io_delay_type;
  258 extern void io_delay_init(void);
  259 
  260 #if defined(CONFIG_PARAVIRT)
  261 #include <asm/paravirt.h>
  262 #else
  263 
  264 static inline void slow_down_io(void)
  265 {
  266 	native_io_delay();
  267 #ifdef REALLY_SLOW_IO
  268 	native_io_delay();
  269 	native_io_delay();
  270 	native_io_delay();
  271 #endif
  272 }
  273 
  274 #endif
  275 
  276 #define BUILDIO(bwl, bw, type)						\
  277 static inline void out##bwl(unsigned type value, int port)		\
  278 {									\
  279 	asm volatile("out" #bwl " %" #bw "0, %w1"			\
  280 		     : : "a"(value), "Nd"(port));			\
  281 }									\
  282 									\
  283 static inline unsigned type in##bwl(int port)				\
  284 {									\
  285 	unsigned type value;						\
  286 	asm volatile("in" #bwl " %w1, %" #bw "0"			\
  287 		     : "=a"(value) : "Nd"(port));			\
  288 	return value;							\
  289 }									\
  290 									\
  291 static inline void out##bwl##_p(unsigned type value, int port)		\
  292 {									\
  293 	out##bwl(value, port);						\
  294 	slow_down_io();							\
  295 }									\
  296 									\
  297 static inline unsigned type in##bwl##_p(int port)			\
  298 {									\
  299 	unsigned type value = in##bwl(port);				\
  300 	slow_down_io();							\
  301 	return value;							\
  302 }									\
  303 									\
  304 static inline void outs##bwl(int port, const void *addr, unsigned long count) \
  305 {									\
  306 	asm volatile("rep; outs" #bwl					\
  307 		     : "+S"(addr), "+c"(count) : "d"(port));		\
  308 }									\
  309 									\
  310 static inline void ins##bwl(int port, void *addr, unsigned long count)	\
  311 {									\
  312 	asm volatile("rep; ins" #bwl					\
  313 		     : "+D"(addr), "+c"(count) : "d"(port));		\
  314 }
  315 
  316 BUILDIO(b, b, char)
  317 BUILDIO(w, w, short)
  318 BUILDIO(l, , int)
  319 
  320 extern void *xlate_dev_mem_ptr(phys_addr_t phys);
  321 extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
  322 
  323 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
  324 				enum page_cache_mode pcm);
  325 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
  326 extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
  327 
  328 extern bool is_early_ioremap_ptep(pte_t *ptep);
  329 
  330 #ifdef CONFIG_XEN
  331 #include <xen/xen.h>
  332 struct bio_vec;
  333 
  334 extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
  335 				      const struct bio_vec *vec2);
  336 
  337 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2)				\
  338 	(__BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&				\
  339 	 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
  340 #endif	/* CONFIG_XEN */
  341 
  342 #define IO_SPACE_LIMIT 0xffff
  343 
  344 #ifdef CONFIG_MTRR
  345 extern int __must_check arch_phys_wc_index(int handle);
  346 #define arch_phys_wc_index arch_phys_wc_index
  347 
  348 extern int __must_check arch_phys_wc_add(unsigned long base,
  349 					 unsigned long size);
  350 extern void arch_phys_wc_del(int handle);
  351 #define arch_phys_wc_add arch_phys_wc_add
  352 #endif
  353 
  354 #endif /* _ASM_X86_IO_H */                 1 
    2 /*
    3  *  linux/drivers/mmc/host/wbsd.c - Winbond W83L51xD SD/MMC driver
    4  *
    5  *  Copyright (C) 2004-2007 Pierre Ossman, All Rights Reserved.
    6  *
    7  * This program is free software; you can redistribute it and/or modify
    8  * it under the terms of the GNU General Public License as published by
    9  * the Free Software Foundation; either version 2 of the License, or (at
   10  * your option) any later version.
   11  *
   12  *
   13  * Warning!
   14  *
   15  * Changes to the FIFO system should be done with extreme care since
   16  * the hardware is full of bugs related to the FIFO. Known issues are:
   17  *
   18  * - FIFO size field in FSR is always zero.
   19  *
   20  * - FIFO interrupts tend not to work as they should. Interrupts are
   21  *   triggered only for full/empty events, not for threshold values.
   22  *
   23  * - On APIC systems the FIFO empty interrupt is sometimes lost.
   24  */
   25 
   26 #include <linux/module.h>
   27 #include <linux/moduleparam.h>
   28 #include <linux/init.h>
   29 #include <linux/ioport.h>
   30 #include <linux/platform_device.h>
   31 #include <linux/interrupt.h>
   32 #include <linux/dma-mapping.h>
   33 #include <linux/delay.h>
   34 #include <linux/pnp.h>
   35 #include <linux/highmem.h>
   36 #include <linux/mmc/host.h>
   37 #include <linux/scatterlist.h>
   38 #include <linux/slab.h>
   39 
   40 #include <asm/io.h>
   41 #include <asm/dma.h>
   42 
   43 #include "wbsd.h"
   44 
   45 #define DRIVER_NAME "wbsd"
   46 
   47 #define DBG(x...) \
   48 	pr_debug(DRIVER_NAME ": " x)
   49 #define DBGF(f, x...) \
   50 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
   51 
   52 /*
   53  * Device resources
   54  */
   55 
   56 #ifdef CONFIG_PNP
   57 
   58 static const struct pnp_device_id pnp_dev_table[] = {
   59 	{ "WEC0517", 0 },
   60 	{ "WEC0518", 0 },
   61 	{ "", 0 },
   62 };
   63 
   64 MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
   65 
   66 #endif /* CONFIG_PNP */
   67 
   68 static const int config_ports[] = { 0x2E, 0x4E };
   69 static const int unlock_codes[] = { 0x83, 0x87 };
   70 
   71 static const int valid_ids[] = {
   72 	0x7112,
   73 };
   74 
   75 #ifdef CONFIG_PNP
   76 static unsigned int param_nopnp = 0;
   77 #else
   78 static const unsigned int param_nopnp = 1;
   79 #endif
   80 static unsigned int param_io = 0x248;
   81 static unsigned int param_irq = 6;
   82 static int param_dma = 2;
   83 
   84 /*
   85  * Basic functions
   86  */
   87 
   88 static inline void wbsd_unlock_config(struct wbsd_host *host)
   89 {
   90 	BUG_ON(host->config == 0);
   91 
   92 	outb(host->unlock_code, host->config);
   93 	outb(host->unlock_code, host->config);
   94 }
   95 
   96 static inline void wbsd_lock_config(struct wbsd_host *host)
   97 {
   98 	BUG_ON(host->config == 0);
   99 
  100 	outb(LOCK_CODE, host->config);
  101 }
  102 
  103 static inline void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value)
  104 {
  105 	BUG_ON(host->config == 0);
  106 
  107 	outb(reg, host->config);
  108 	outb(value, host->config + 1);
  109 }
  110 
  111 static inline u8 wbsd_read_config(struct wbsd_host *host, u8 reg)
  112 {
  113 	BUG_ON(host->config == 0);
  114 
  115 	outb(reg, host->config);
  116 	return inb(host->config + 1);
  117 }
  118 
  119 static inline void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value)
  120 {
  121 	outb(index, host->base + WBSD_IDXR);
  122 	outb(value, host->base + WBSD_DATAR);
  123 }
  124 
  125 static inline u8 wbsd_read_index(struct wbsd_host *host, u8 index)
  126 {
  127 	outb(index, host->base + WBSD_IDXR);
  128 	return inb(host->base + WBSD_DATAR);
  129 }
  130 
  131 /*
  132  * Common routines
  133  */
  134 
  135 static void wbsd_init_device(struct wbsd_host *host)
  136 {
  137 	u8 setup, ier;
  138 
  139 	/*
  140 	 * Reset chip (SD/MMC part) and fifo.
  141 	 */
  142 	setup = wbsd_read_index(host, WBSD_IDX_SETUP);
  143 	setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET;
  144 	wbsd_write_index(host, WBSD_IDX_SETUP, setup);
  145 
  146 	/*
  147 	 * Set DAT3 to input
  148 	 */
  149 	setup &= ~WBSD_DAT3_H;
  150 	wbsd_write_index(host, WBSD_IDX_SETUP, setup);
  151 	host->flags &= ~WBSD_FIGNORE_DETECT;
  152 
  153 	/*
  154 	 * Read back default clock.
  155 	 */
  156 	host->clk = wbsd_read_index(host, WBSD_IDX_CLK);
  157 
  158 	/*
  159 	 * Power down port.
  160 	 */
  161 	outb(WBSD_POWER_N, host->base + WBSD_CSR);
  162 
  163 	/*
  164 	 * Set maximum timeout.
  165 	 */
  166 	wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F);
  167 
  168 	/*
  169 	 * Test for card presence
  170 	 */
  171 	if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT)
  172 		host->flags |= WBSD_FCARD_PRESENT;
  173 	else
  174 		host->flags &= ~WBSD_FCARD_PRESENT;
  175 
  176 	/*
  177 	 * Enable interesting interrupts.
  178 	 */
  179 	ier = 0;
  180 	ier |= WBSD_EINT_CARD;
  181 	ier |= WBSD_EINT_FIFO_THRE;
  182 	ier |= WBSD_EINT_CRC;
  183 	ier |= WBSD_EINT_TIMEOUT;
  184 	ier |= WBSD_EINT_TC;
  185 
  186 	outb(ier, host->base + WBSD_EIR);
  187 
  188 	/*
  189 	 * Clear interrupts.
  190 	 */
  191 	inb(host->base + WBSD_ISR);
  192 }
  193 
  194 static void wbsd_reset(struct wbsd_host *host)
  195 {
  196 	u8 setup;
  197 
  198 	pr_err("%s: Resetting chip\n", mmc_hostname(host->mmc));
  199 
  200 	/*
  201 	 * Soft reset of chip (SD/MMC part).
  202 	 */
  203 	setup = wbsd_read_index(host, WBSD_IDX_SETUP);
  204 	setup |= WBSD_SOFT_RESET;
  205 	wbsd_write_index(host, WBSD_IDX_SETUP, setup);
  206 }
  207 
  208 static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq)
  209 {
  210 	unsigned long dmaflags;
  211 
  212 	if (host->dma >= 0) {
  213 		/*
  214 		 * Release ISA DMA controller.
  215 		 */
  216 		dmaflags = claim_dma_lock();
  217 		disable_dma(host->dma);
  218 		clear_dma_ff(host->dma);
  219 		release_dma_lock(dmaflags);
  220 
  221 		/*
  222 		 * Disable DMA on host.
  223 		 */
  224 		wbsd_write_index(host, WBSD_IDX_DMA, 0);
  225 	}
  226 
  227 	host->mrq = NULL;
  228 
  229 	/*
  230 	 * MMC layer might call back into the driver so first unlock.
  231 	 */
  232 	spin_unlock(&host->lock);
  233 	mmc_request_done(host->mmc, mrq);
  234 	spin_lock(&host->lock);
  235 }
  236 
  237 /*
  238  * Scatter/gather functions
  239  */
  240 
  241 static inline void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data)
  242 {
  243 	/*
  244 	 * Get info. about SG list from data structure.
  245 	 */
  246 	host->cur_sg = data->sg;
  247 	host->num_sg = data->sg_len;
  248 
  249 	host->offset = 0;
  250 	host->remain = host->cur_sg->length;
  251 }
  252 
  253 static inline int wbsd_next_sg(struct wbsd_host *host)
  254 {
  255 	/*
  256 	 * Skip to next SG entry.
  257 	 */
  258 	host->cur_sg++;
  259 	host->num_sg--;
  260 
  261 	/*
  262 	 * Any entries left?
  263 	 */
  264 	if (host->num_sg > 0) {
  265 		host->offset = 0;
  266 		host->remain = host->cur_sg->length;
  267 	}
  268 
  269 	return host->num_sg;
  270 }
  271 
  272 static inline char *wbsd_sg_to_buffer(struct wbsd_host *host)
  273 {
  274 	return sg_virt(host->cur_sg);
  275 }
  276 
  277 static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
  278 {
  279 	unsigned int len, i;
  280 	struct scatterlist *sg;
  281 	char *dmabuf = host->dma_buffer;
  282 	char *sgbuf;
  283 
  284 	sg = data->sg;
  285 	len = data->sg_len;
  286 
  287 	for (i = 0; i < len; i++) {
  288 		sgbuf = sg_virt(&sg[i]);
  289 		memcpy(dmabuf, sgbuf, sg[i].length);
  290 		dmabuf += sg[i].length;
  291 	}
  292 }
  293 
  294 static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
  295 {
  296 	unsigned int len, i;
  297 	struct scatterlist *sg;
  298 	char *dmabuf = host->dma_buffer;
  299 	char *sgbuf;
  300 
  301 	sg = data->sg;
  302 	len = data->sg_len;
  303 
  304 	for (i = 0; i < len; i++) {
  305 		sgbuf = sg_virt(&sg[i]);
  306 		memcpy(sgbuf, dmabuf, sg[i].length);
  307 		dmabuf += sg[i].length;
  308 	}
  309 }
  310 
  311 /*
  312  * Command handling
  313  */
  314 
  315 static inline void wbsd_get_short_reply(struct wbsd_host *host,
  316 					struct mmc_command *cmd)
  317 {
  318 	/*
  319 	 * Correct response type?
  320 	 */
  321 	if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) {
  322 		cmd->error = -EILSEQ;
  323 		return;
  324 	}
  325 
  326 	cmd->resp[0]  = wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
  327 	cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
  328 	cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
  329 	cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
  330 	cmd->resp[1]  = wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
  331 }
  332 
  333 static inline void wbsd_get_long_reply(struct wbsd_host *host,
  334 	struct mmc_command *cmd)
  335 {
  336 	int i;
  337 
  338 	/*
  339 	 * Correct response type?
  340 	 */
  341 	if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) {
  342 		cmd->error = -EILSEQ;
  343 		return;
  344 	}
  345 
  346 	for (i = 0; i < 4; i++) {
  347 		cmd->resp[i] =
  348 			wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
  349 		cmd->resp[i] |=
  350 			wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16;
  351 		cmd->resp[i] |=
  352 			wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8;
  353 		cmd->resp[i] |=
  354 			wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0;
  355 	}
  356 }
  357 
  358 static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd)
  359 {
  360 	int i;
  361 	u8 status, isr;
  362 
  363 	/*
  364 	 * Clear accumulated ISR. The interrupt routine
  365 	 * will fill this one with events that occur during
  366 	 * transfer.
  367 	 */
  368 	host->isr = 0;
  369 
  370 	/*
  371 	 * Send the command (CRC calculated by host).
  372 	 */
  373 	outb(cmd->opcode, host->base + WBSD_CMDR);
  374 	for (i = 3; i >= 0; i--)
  375 		outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
  376 
  377 	cmd->error = 0;
  378 
  379 	/*
  380 	 * Wait for the request to complete.
  381 	 */
  382 	do {
  383 		status = wbsd_read_index(host, WBSD_IDX_STATUS);
  384 	} while (status & WBSD_CARDTRAFFIC);
  385 
  386 	/*
  387 	 * Do we expect a reply?
  388 	 */
  389 	if (cmd->flags & MMC_RSP_PRESENT) {
  390 		/*
  391 		 * Read back status.
  392 		 */
  393 		isr = host->isr;
  394 
  395 		/* Card removed? */
  396 		if (isr & WBSD_INT_CARD)
  397 			cmd->error = -ENOMEDIUM;
  398 		/* Timeout? */
  399 		else if (isr & WBSD_INT_TIMEOUT)
  400 			cmd->error = -ETIMEDOUT;
  401 		/* CRC? */
  402 		else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
  403 			cmd->error = -EILSEQ;
  404 		/* All ok */
  405 		else {
  406 			if (cmd->flags & MMC_RSP_136)
  407 				wbsd_get_long_reply(host, cmd);
  408 			else
  409 				wbsd_get_short_reply(host, cmd);
  410 		}
  411 	}
  412 }
  413 
  414 /*
  415  * Data functions
  416  */
  417 
  418 static void wbsd_empty_fifo(struct wbsd_host *host)
  419 {
  420 	struct mmc_data *data = host->mrq->cmd->data;
  421 	char *buffer;
  422 	int i, fsr, fifo;
  423 
  424 	/*
  425 	 * Handle excessive data.
  426 	 */
  427 	if (host->num_sg == 0)
  428 		return;
  429 
  430 	buffer = wbsd_sg_to_buffer(host) + host->offset;
  431 
  432 	/*
  433 	 * Drain the fifo. This has a tendency to loop longer
  434 	 * than the FIFO length (usually one block).
  435 	 */
  436 	while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) {
  437 		/*
  438 		 * The size field in the FSR is broken so we have to
  439 		 * do some guessing.
  440 		 */
  441 		if (fsr & WBSD_FIFO_FULL)
  442 			fifo = 16;
  443 		else if (fsr & WBSD_FIFO_FUTHRE)
  444 			fifo = 8;
  445 		else
  446 			fifo = 1;
  447 
  448 		for (i = 0; i < fifo; i++) {
  449 			*buffer = inb(host->base + WBSD_DFR);
  450 			buffer++;
  451 			host->offset++;
  452 			host->remain--;
  453 
  454 			data->bytes_xfered++;
  455 
  456 			/*
  457 			 * End of scatter list entry?
  458 			 */
  459 			if (host->remain == 0) {
  460 				/*
  461 				 * Get next entry. Check if last.
  462 				 */
  463 				if (!wbsd_next_sg(host))
  464 					return;
  465 
  466 				buffer = wbsd_sg_to_buffer(host);
  467 			}
  468 		}
  469 	}
  470 
  471 	/*
  472 	 * This is a very dirty hack to solve a
  473 	 * hardware problem. The chip doesn't trigger
  474 	 * FIFO threshold interrupts properly.
  475 	 */
  476 	if ((data->blocks * data->blksz - data->bytes_xfered) < 16)
  477 		tasklet_schedule(&host->fifo_tasklet);
  478 }
  479 
  480 static void wbsd_fill_fifo(struct wbsd_host *host)
  481 {
  482 	struct mmc_data *data = host->mrq->cmd->data;
  483 	char *buffer;
  484 	int i, fsr, fifo;
  485 
  486 	/*
  487 	 * Check that we aren't being called after the
  488 	 * entire buffer has been transferred.
  489 	 */
  490 	if (host->num_sg == 0)
  491 		return;
  492 
  493 	buffer = wbsd_sg_to_buffer(host) + host->offset;
  494 
  495 	/*
  496 	 * Fill the fifo. This has a tendency to loop longer
  497 	 * than the FIFO length (usually one block).
  498 	 */
  499 	while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) {
  500 		/*
  501 		 * The size field in the FSR is broken so we have to
  502 		 * do some guessing.
  503 		 */
  504 		if (fsr & WBSD_FIFO_EMPTY)
  505 			fifo = 0;
  506 		else if (fsr & WBSD_FIFO_EMTHRE)
  507 			fifo = 8;
  508 		else
  509 			fifo = 15;
  510 
  511 		for (i = 16; i > fifo; i--) {
  512 			outb(*buffer, host->base + WBSD_DFR);
  513 			buffer++;
  514 			host->offset++;
  515 			host->remain--;
  516 
  517 			data->bytes_xfered++;
  518 
  519 			/*
  520 			 * End of scatter list entry?
  521 			 */
  522 			if (host->remain == 0) {
  523 				/*
  524 				 * Get next entry. Check if last.
  525 				 */
  526 				if (!wbsd_next_sg(host))
  527 					return;
  528 
  529 				buffer = wbsd_sg_to_buffer(host);
  530 			}
  531 		}
  532 	}
  533 
  534 	/*
  535 	 * The controller stops sending interrupts for
  536 	 * 'FIFO empty' under certain conditions. So we
  537 	 * need to be a bit more pro-active.
  538 	 */
  539 	tasklet_schedule(&host->fifo_tasklet);
  540 }
  541 
  542 static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
  543 {
  544 	u16 blksize;
  545 	u8 setup;
  546 	unsigned long dmaflags;
  547 	unsigned int size;
  548 
  549 	/*
  550 	 * Calculate size.
  551 	 */
  552 	size = data->blocks * data->blksz;
  553 
  554 	/*
  555 	 * Check timeout values for overflow.
  556 	 * (Yes, some cards cause this value to overflow).
  557 	 */
  558 	if (data->timeout_ns > 127000000)
  559 		wbsd_write_index(host, WBSD_IDX_TAAC, 127);
  560 	else {
  561 		wbsd_write_index(host, WBSD_IDX_TAAC,
  562 			data->timeout_ns / 1000000);
  563 	}
  564 
  565 	if (data->timeout_clks > 255)
  566 		wbsd_write_index(host, WBSD_IDX_NSAC, 255);
  567 	else
  568 		wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks);
  569 
  570 	/*
  571 	 * Inform the chip of how large blocks will be
  572 	 * sent. It needs this to determine when to
  573 	 * calculate CRC.
  574 	 *
  575 	 * Space for CRC must be included in the size.
  576 	 * Two bytes are needed for each data line.
  577 	 */
  578 	if (host->bus_width == MMC_BUS_WIDTH_1) {
  579 		blksize = data->blksz + 2;
  580 
  581 		wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
  582 		wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
  583 	} else if (host->bus_width == MMC_BUS_WIDTH_4) {
  584 		blksize = data->blksz + 2 * 4;
  585 
  586 		wbsd_write_index(host, WBSD_IDX_PBSMSB,
  587 			((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
  588 		wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
  589 	} else {
  590 		data->error = -EINVAL;
  591 		return;
  592 	}
  593 
  594 	/*
  595 	 * Clear the FIFO. This is needed even for DMA
  596 	 * transfers since the chip still uses the FIFO
  597 	 * internally.
  598 	 */
  599 	setup = wbsd_read_index(host, WBSD_IDX_SETUP);
  600 	setup |= WBSD_FIFO_RESET;
  601 	wbsd_write_index(host, WBSD_IDX_SETUP, setup);
  602 
  603 	/*
  604 	 * DMA transfer?
  605 	 */
  606 	if (host->dma >= 0) {
  607 		/*
  608 		 * The buffer for DMA is only 64 kB.
  609 		 */
  610 		BUG_ON(size > 0x10000);
  611 		if (size > 0x10000) {
  612 			data->error = -EINVAL;
  613 			return;
  614 		}
  615 
  616 		/*
  617 		 * Transfer data from the SG list to
  618 		 * the DMA buffer.
  619 		 */
  620 		if (data->flags & MMC_DATA_WRITE)
  621 			wbsd_sg_to_dma(host, data);
  622 
  623 		/*
  624 		 * Initialise the ISA DMA controller.
  625 		 */
  626 		dmaflags = claim_dma_lock();
  627 		disable_dma(host->dma);
  628 		clear_dma_ff(host->dma);
  629 		if (data->flags & MMC_DATA_READ)
  630 			set_dma_mode(host->dma, DMA_MODE_READ & ~0x40);
  631 		else
  632 			set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40);
  633 		set_dma_addr(host->dma, host->dma_addr);
  634 		set_dma_count(host->dma, size);
  635 
  636 		enable_dma(host->dma);
  637 		release_dma_lock(dmaflags);
  638 
  639 		/*
  640 		 * Enable DMA on the host.
  641 		 */
  642 		wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
  643 	} else {
  644 		/*
  645 		 * This flag is used to keep printk
  646 		 * output to a minimum.
  647 		 */
  648 		host->firsterr = 1;
  649 
  650 		/*
  651 		 * Initialise the SG list.
  652 		 */
  653 		wbsd_init_sg(host, data);
  654 
  655 		/*
  656 		 * Turn off DMA.
  657 		 */
  658 		wbsd_write_index(host, WBSD_IDX_DMA, 0);
  659 
  660 		/*
  661 		 * Set up FIFO threshold levels (and fill
  662 		 * buffer if doing a write).
  663 		 */
  664 		if (data->flags & MMC_DATA_READ) {
  665 			wbsd_write_index(host, WBSD_IDX_FIFOEN,
  666 				WBSD_FIFOEN_FULL | 8);
  667 		} else {
  668 			wbsd_write_index(host, WBSD_IDX_FIFOEN,
  669 				WBSD_FIFOEN_EMPTY | 8);
  670 			wbsd_fill_fifo(host);
  671 		}
  672 	}
  673 
  674 	data->error = 0;
  675 }
  676 
  677 static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data)
  678 {
  679 	unsigned long dmaflags;
  680 	int count;
  681 	u8 status;
  682 
  683 	WARN_ON(host->mrq == NULL);
  684 
  685 	/*
  686 	 * Send a stop command if needed.
  687 	 */
  688 	if (data->stop)
  689 		wbsd_send_command(host, data->stop);
  690 
  691 	/*
  692 	 * Wait for the controller to leave data
  693 	 * transfer state.
  694 	 */
  695 	do {
  696 		status = wbsd_read_index(host, WBSD_IDX_STATUS);
  697 	} while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
  698 
  699 	/*
  700 	 * DMA transfer?
  701 	 */
  702 	if (host->dma >= 0) {
  703 		/*
  704 		 * Disable DMA on the host.
  705 		 */
  706 		wbsd_write_index(host, WBSD_IDX_DMA, 0);
  707 
  708 		/*
  709 		 * Turn of ISA DMA controller.
  710 		 */
  711 		dmaflags = claim_dma_lock();
  712 		disable_dma(host->dma);
  713 		clear_dma_ff(host->dma);
  714 		count = get_dma_residue(host->dma);
  715 		release_dma_lock(dmaflags);
  716 
  717 		data->bytes_xfered = host->mrq->data->blocks *
  718 			host->mrq->data->blksz - count;
  719 		data->bytes_xfered -= data->bytes_xfered % data->blksz;
  720 
  721 		/*
  722 		 * Any leftover data?
  723 		 */
  724 		if (count) {
  725 			pr_err("%s: Incomplete DMA transfer. "
  726 				"%d bytes left.\n",
  727 				mmc_hostname(host->mmc), count);
  728 
  729 			if (!data->error)
  730 				data->error = -EIO;
  731 		} else {
  732 			/*
  733 			 * Transfer data from DMA buffer to
  734 			 * SG list.
  735 			 */
  736 			if (data->flags & MMC_DATA_READ)
  737 				wbsd_dma_to_sg(host, data);
  738 		}
  739 
  740 		if (data->error) {
  741 			if (data->bytes_xfered)
  742 				data->bytes_xfered -= data->blksz;
  743 		}
  744 	}
  745 
  746 	wbsd_request_end(host, host->mrq);
  747 }
  748 
  749 /*****************************************************************************\
  750  *                                                                           *
  751  * MMC layer callbacks                                                       *
  752  *                                                                           *
  753 \*****************************************************************************/
  754 
  755 static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
  756 {
  757 	struct wbsd_host *host = mmc_priv(mmc);
  758 	struct mmc_command *cmd;
  759 
  760 	/*
  761 	 * Disable tasklets to avoid a deadlock.
  762 	 */
  763 	spin_lock_bh(&host->lock);
  764 
  765 	BUG_ON(host->mrq != NULL);
  766 
  767 	cmd = mrq->cmd;
  768 
  769 	host->mrq = mrq;
  770 
  771 	/*
  772 	 * Check that there is actually a card in the slot.
  773 	 */
  774 	if (!(host->flags & WBSD_FCARD_PRESENT)) {
  775 		cmd->error = -ENOMEDIUM;
  776 		goto done;
  777 	}
  778 
  779 	if (cmd->data) {
  780 		/*
  781 		 * The hardware is so delightfully stupid that it has a list
  782 		 * of "data" commands. If a command isn't on this list, it'll
  783 		 * just go back to the idle state and won't send any data
  784 		 * interrupts.
  785 		 */
  786 		switch (cmd->opcode) {
  787 		case 11:
  788 		case 17:
  789 		case 18:
  790 		case 20:
  791 		case 24:
  792 		case 25:
  793 		case 26:
  794 		case 27:
  795 		case 30:
  796 		case 42:
  797 		case 56:
  798 			break;
  799 
  800 		/* ACMDs. We don't keep track of state, so we just treat them
  801 		 * like any other command. */
  802 		case 51:
  803 			break;
  804 
  805 		default:
  806 #ifdef CONFIG_MMC_DEBUG
  807 			pr_warn("%s: Data command %d is not supported by this controller\n",
  808 				mmc_hostname(host->mmc), cmd->opcode);
  809 #endif
  810 			cmd->error = -EINVAL;
  811 
  812 			goto done;
  813 		}
  814 	}
  815 
  816 	/*
  817 	 * Does the request include data?
  818 	 */
  819 	if (cmd->data) {
  820 		wbsd_prepare_data(host, cmd->data);
  821 
  822 		if (cmd->data->error)
  823 			goto done;
  824 	}
  825 
  826 	wbsd_send_command(host, cmd);
  827 
  828 	/*
  829 	 * If this is a data transfer the request
  830 	 * will be finished after the data has
  831 	 * transferred.
  832 	 */
  833 	if (cmd->data && !cmd->error) {
  834 		/*
  835 		 * Dirty fix for hardware bug.
  836 		 */
  837 		if (host->dma == -1)
  838 			tasklet_schedule(&host->fifo_tasklet);
  839 
  840 		spin_unlock_bh(&host->lock);
  841 
  842 		return;
  843 	}
  844 
  845 done:
  846 	wbsd_request_end(host, mrq);
  847 
  848 	spin_unlock_bh(&host->lock);
  849 }
  850 
  851 static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  852 {
  853 	struct wbsd_host *host = mmc_priv(mmc);
  854 	u8 clk, setup, pwr;
  855 
  856 	spin_lock_bh(&host->lock);
  857 
  858 	/*
  859 	 * Reset the chip on each power off.
  860 	 * Should clear out any weird states.
  861 	 */
  862 	if (ios->power_mode == MMC_POWER_OFF)
  863 		wbsd_init_device(host);
  864 
  865 	if (ios->clock >= 24000000)
  866 		clk = WBSD_CLK_24M;
  867 	else if (ios->clock >= 16000000)
  868 		clk = WBSD_CLK_16M;
  869 	else if (ios->clock >= 12000000)
  870 		clk = WBSD_CLK_12M;
  871 	else
  872 		clk = WBSD_CLK_375K;
  873 
  874 	/*
  875 	 * Only write to the clock register when
  876 	 * there is an actual change.
  877 	 */
  878 	if (clk != host->clk) {
  879 		wbsd_write_index(host, WBSD_IDX_CLK, clk);
  880 		host->clk = clk;
  881 	}
  882 
  883 	/*
  884 	 * Power up card.
  885 	 */
  886 	if (ios->power_mode != MMC_POWER_OFF) {
  887 		pwr = inb(host->base + WBSD_CSR);
  888 		pwr &= ~WBSD_POWER_N;
  889 		outb(pwr, host->base + WBSD_CSR);
  890 	}
  891 
  892 	/*
  893 	 * MMC cards need to have pin 1 high during init.
  894 	 * It wreaks havoc with the card detection though so
  895 	 * that needs to be disabled.
  896 	 */
  897 	setup = wbsd_read_index(host, WBSD_IDX_SETUP);
  898 	if (ios->chip_select == MMC_CS_HIGH) {
  899 		BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1);
  900 		setup |= WBSD_DAT3_H;
  901 		host->flags |= WBSD_FIGNORE_DETECT;
  902 	} else {
  903 		if (setup & WBSD_DAT3_H) {
  904 			setup &= ~WBSD_DAT3_H;
  905 
  906 			/*
  907 			 * We cannot resume card detection immediately
  908 			 * because of capacitance and delays in the chip.
  909 			 */
  910 			mod_timer(&host->ignore_timer, jiffies + HZ / 100);
  911 		}
  912 	}
  913 	wbsd_write_index(host, WBSD_IDX_SETUP, setup);
  914 
  915 	/*
  916 	 * Store bus width for later. Will be used when
  917 	 * setting up the data transfer.
  918 	 */
  919 	host->bus_width = ios->bus_width;
  920 
  921 	spin_unlock_bh(&host->lock);
  922 }
  923 
  924 static int wbsd_get_ro(struct mmc_host *mmc)
  925 {
  926 	struct wbsd_host *host = mmc_priv(mmc);
  927 	u8 csr;
  928 
  929 	spin_lock_bh(&host->lock);
  930 
  931 	csr = inb(host->base + WBSD_CSR);
  932 	csr |= WBSD_MSLED;
  933 	outb(csr, host->base + WBSD_CSR);
  934 
  935 	mdelay(1);
  936 
  937 	csr = inb(host->base + WBSD_CSR);
  938 	csr &= ~WBSD_MSLED;
  939 	outb(csr, host->base + WBSD_CSR);
  940 
  941 	spin_unlock_bh(&host->lock);
  942 
  943 	return !!(csr & WBSD_WRPT);
  944 }
  945 
  946 static const struct mmc_host_ops wbsd_ops = {
  947 	.request	= wbsd_request,
  948 	.set_ios	= wbsd_set_ios,
  949 	.get_ro		= wbsd_get_ro,
  950 };
  951 
  952 /*****************************************************************************\
  953  *                                                                           *
  954  * Interrupt handling                                                        *
  955  *                                                                           *
  956 \*****************************************************************************/
  957 
  958 /*
  959  * Helper function to reset detection ignore
  960  */
  961 
  962 static void wbsd_reset_ignore(unsigned long data)
  963 {
  964 	struct wbsd_host *host = (struct wbsd_host *)data;
  965 
  966 	BUG_ON(host == NULL);
  967 
  968 	DBG("Resetting card detection ignore\n");
  969 
  970 	spin_lock_bh(&host->lock);
  971 
  972 	host->flags &= ~WBSD_FIGNORE_DETECT;
  973 
  974 	/*
  975 	 * Card status might have changed during the
  976 	 * blackout.
  977 	 */
  978 	tasklet_schedule(&host->card_tasklet);
  979 
  980 	spin_unlock_bh(&host->lock);
  981 }
  982 
  983 /*
  984  * Tasklets
  985  */
  986 
  987 static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host)
  988 {
  989 	WARN_ON(!host->mrq);
  990 	if (!host->mrq)
  991 		return NULL;
  992 
  993 	WARN_ON(!host->mrq->cmd);
  994 	if (!host->mrq->cmd)
  995 		return NULL;
  996 
  997 	WARN_ON(!host->mrq->cmd->data);
  998 	if (!host->mrq->cmd->data)
  999 		return NULL;
 1000 
 1001 	return host->mrq->cmd->data;
 1002 }
 1003 
 1004 static void wbsd_tasklet_card(unsigned long param)
 1005 {
 1006 	struct wbsd_host *host = (struct wbsd_host *)param;
 1007 	u8 csr;
 1008 	int delay = -1;
 1009 
 1010 	spin_lock(&host->lock);
 1011 
 1012 	if (host->flags & WBSD_FIGNORE_DETECT) {
 1013 		spin_unlock(&host->lock);
 1014 		return;
 1015 	}
 1016 
 1017 	csr = inb(host->base + WBSD_CSR);
 1018 	WARN_ON(csr == 0xff);
 1019 
 1020 	if (csr & WBSD_CARDPRESENT) {
 1021 		if (!(host->flags & WBSD_FCARD_PRESENT)) {
 1022 			DBG("Card inserted\n");
 1023 			host->flags |= WBSD_FCARD_PRESENT;
 1024 
 1025 			delay = 500;
 1026 		}
 1027 	} else if (host->flags & WBSD_FCARD_PRESENT) {
 1028 		DBG("Card removed\n");
 1029 		host->flags &= ~WBSD_FCARD_PRESENT;
 1030 
 1031 		if (host->mrq) {
 1032 			pr_err("%s: Card removed during transfer!\n",
 1033 				mmc_hostname(host->mmc));
 1034 			wbsd_reset(host);
 1035 
 1036 			host->mrq->cmd->error = -ENOMEDIUM;
 1037 			tasklet_schedule(&host->finish_tasklet);
 1038 		}
 1039 
 1040 		delay = 0;
 1041 	}
 1042 
 1043 	/*
 1044 	 * Unlock first since we might get a call back.
 1045 	 */
 1046 
 1047 	spin_unlock(&host->lock);
 1048 
 1049 	if (delay != -1)
 1050 		mmc_detect_change(host->mmc, msecs_to_jiffies(delay));
 1051 }
 1052 
 1053 static void wbsd_tasklet_fifo(unsigned long param)
 1054 {
 1055 	struct wbsd_host *host = (struct wbsd_host *)param;
 1056 	struct mmc_data *data;
 1057 
 1058 	spin_lock(&host->lock);
 1059 
 1060 	if (!host->mrq)
 1061 		goto end;
 1062 
 1063 	data = wbsd_get_data(host);
 1064 	if (!data)
 1065 		goto end;
 1066 
 1067 	if (data->flags & MMC_DATA_WRITE)
 1068 		wbsd_fill_fifo(host);
 1069 	else
 1070 		wbsd_empty_fifo(host);
 1071 
 1072 	/*
 1073 	 * Done?
 1074 	 */
 1075 	if (host->num_sg == 0) {
 1076 		wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
 1077 		tasklet_schedule(&host->finish_tasklet);
 1078 	}
 1079 
 1080 end:
 1081 	spin_unlock(&host->lock);
 1082 }
 1083 
 1084 static void wbsd_tasklet_crc(unsigned long param)
 1085 {
 1086 	struct wbsd_host *host = (struct wbsd_host *)param;
 1087 	struct mmc_data *data;
 1088 
 1089 	spin_lock(&host->lock);
 1090 
 1091 	if (!host->mrq)
 1092 		goto end;
 1093 
 1094 	data = wbsd_get_data(host);
 1095 	if (!data)
 1096 		goto end;
 1097 
 1098 	DBGF("CRC error\n");
 1099 
 1100 	data->error = -EILSEQ;
 1101 
 1102 	tasklet_schedule(&host->finish_tasklet);
 1103 
 1104 end:
 1105 	spin_unlock(&host->lock);
 1106 }
 1107 
 1108 static void wbsd_tasklet_timeout(unsigned long param)
 1109 {
 1110 	struct wbsd_host *host = (struct wbsd_host *)param;
 1111 	struct mmc_data *data;
 1112 
 1113 	spin_lock(&host->lock);
 1114 
 1115 	if (!host->mrq)
 1116 		goto end;
 1117 
 1118 	data = wbsd_get_data(host);
 1119 	if (!data)
 1120 		goto end;
 1121 
 1122 	DBGF("Timeout\n");
 1123 
 1124 	data->error = -ETIMEDOUT;
 1125 
 1126 	tasklet_schedule(&host->finish_tasklet);
 1127 
 1128 end:
 1129 	spin_unlock(&host->lock);
 1130 }
 1131 
 1132 static void wbsd_tasklet_finish(unsigned long param)
 1133 {
 1134 	struct wbsd_host *host = (struct wbsd_host *)param;
 1135 	struct mmc_data *data;
 1136 
 1137 	spin_lock(&host->lock);
 1138 
 1139 	WARN_ON(!host->mrq);
 1140 	if (!host->mrq)
 1141 		goto end;
 1142 
 1143 	data = wbsd_get_data(host);
 1144 	if (!data)
 1145 		goto end;
 1146 
 1147 	wbsd_finish_data(host, data);
 1148 
 1149 end:
 1150 	spin_unlock(&host->lock);
 1151 }
 1152 
 1153 /*
 1154  * Interrupt handling
 1155  */
 1156 
 1157 static irqreturn_t wbsd_irq(int irq, void *dev_id)
 1158 {
 1159 	struct wbsd_host *host = dev_id;
 1160 	int isr;
 1161 
 1162 	isr = inb(host->base + WBSD_ISR);
 1163 
 1164 	/*
 1165 	 * Was it actually our hardware that caused the interrupt?
 1166 	 */
 1167 	if (isr == 0xff || isr == 0x00)
 1168 		return IRQ_NONE;
 1169 
 1170 	host->isr |= isr;
 1171 
 1172 	/*
 1173 	 * Schedule tasklets as needed.
 1174 	 */
 1175 	if (isr & WBSD_INT_CARD)
 1176 		tasklet_schedule(&host->card_tasklet);
 1177 	if (isr & WBSD_INT_FIFO_THRE)
 1178 		tasklet_schedule(&host->fifo_tasklet);
 1179 	if (isr & WBSD_INT_CRC)
 1180 		tasklet_hi_schedule(&host->crc_tasklet);
 1181 	if (isr & WBSD_INT_TIMEOUT)
 1182 		tasklet_hi_schedule(&host->timeout_tasklet);
 1183 	if (isr & WBSD_INT_TC)
 1184 		tasklet_schedule(&host->finish_tasklet);
 1185 
 1186 	return IRQ_HANDLED;
 1187 }
 1188 
 1189 /*****************************************************************************\
 1190  *                                                                           *
 1191  * Device initialisation and shutdown                                        *
 1192  *                                                                           *
 1193 \*****************************************************************************/
 1194 
 1195 /*
 1196  * Allocate/free MMC structure.
 1197  */
 1198 
 1199 static int wbsd_alloc_mmc(struct device *dev)
 1200 {
 1201 	struct mmc_host *mmc;
 1202 	struct wbsd_host *host;
 1203 
 1204 	/*
 1205 	 * Allocate MMC structure.
 1206 	 */
 1207 	mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
 1208 	if (!mmc)
 1209 		return -ENOMEM;
 1210 
 1211 	host = mmc_priv(mmc);
 1212 	host->mmc = mmc;
 1213 
 1214 	host->dma = -1;
 1215 
 1216 	/*
 1217 	 * Set host parameters.
 1218 	 */
 1219 	mmc->ops = &wbsd_ops;
 1220 	mmc->f_min = 375000;
 1221 	mmc->f_max = 24000000;
 1222 	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 1223 	mmc->caps = MMC_CAP_4_BIT_DATA;
 1224 
 1225 	spin_lock_init(&host->lock);
 1226 
 1227 	/*
 1228 	 * Set up timers
 1229 	 */
 1230 	init_timer(&host->ignore_timer);
 1231 	host->ignore_timer.data = (unsigned long)host;
 1232 	host->ignore_timer.function = wbsd_reset_ignore;
 1233 
 1234 	/*
 1235 	 * Maximum number of segments. Worst case is one sector per segment
 1236 	 * so this will be 64kB/512.
 1237 	 */
 1238 	mmc->max_segs = 128;
 1239 
 1240 	/*
 1241 	 * Maximum request size. Also limited by 64KiB buffer.
 1242 	 */
 1243 	mmc->max_req_size = 65536;
 1244 
 1245 	/*
 1246 	 * Maximum segment size. Could be one segment with the maximum number
 1247 	 * of bytes.
 1248 	 */
 1249 	mmc->max_seg_size = mmc->max_req_size;
 1250 
 1251 	/*
 1252 	 * Maximum block size. We have 12 bits (= 4095) but have to subtract
 1253 	 * space for CRC. So the maximum is 4095 - 4*2 = 4087.
 1254 	 */
 1255 	mmc->max_blk_size = 4087;
 1256 
 1257 	/*
 1258 	 * Maximum block count. There is no real limit so the maximum
 1259 	 * request size will be the only restriction.
 1260 	 */
 1261 	mmc->max_blk_count = mmc->max_req_size;
 1262 
 1263 	dev_set_drvdata(dev, mmc);
 1264 
 1265 	return 0;
 1266 }
 1267 
 1268 static void wbsd_free_mmc(struct device *dev)
 1269 {
 1270 	struct mmc_host *mmc;
 1271 	struct wbsd_host *host;
 1272 
 1273 	mmc = dev_get_drvdata(dev);
 1274 	if (!mmc)
 1275 		return;
 1276 
 1277 	host = mmc_priv(mmc);
 1278 	BUG_ON(host == NULL);
 1279 
 1280 	del_timer_sync(&host->ignore_timer);
 1281 
 1282 	mmc_free_host(mmc);
 1283 
 1284 	dev_set_drvdata(dev, NULL);
 1285 }
 1286 
 1287 /*
 1288  * Scan for known chip id:s
 1289  */
 1290 
 1291 static int wbsd_scan(struct wbsd_host *host)
 1292 {
 1293 	int i, j, k;
 1294 	int id;
 1295 
 1296 	/*
 1297 	 * Iterate through all ports, all codes to
 1298 	 * find hardware that is in our known list.
 1299 	 */
 1300 	for (i = 0; i < ARRAY_SIZE(config_ports); i++) {
 1301 		if (!request_region(config_ports[i], 2, DRIVER_NAME))
 1302 			continue;
 1303 
 1304 		for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) {
 1305 			id = 0xFFFF;
 1306 
 1307 			host->config = config_ports[i];
 1308 			host->unlock_code = unlock_codes[j];
 1309 
 1310 			wbsd_unlock_config(host);
 1311 
 1312 			outb(WBSD_CONF_ID_HI, config_ports[i]);
 1313 			id = inb(config_ports[i] + 1) << 8;
 1314 
 1315 			outb(WBSD_CONF_ID_LO, config_ports[i]);
 1316 			id |= inb(config_ports[i] + 1);
 1317 
 1318 			wbsd_lock_config(host);
 1319 
 1320 			for (k = 0; k < ARRAY_SIZE(valid_ids); k++) {
 1321 				if (id == valid_ids[k]) {
 1322 					host->chip_id = id;
 1323 
 1324 					return 0;
 1325 				}
 1326 			}
 1327 
 1328 			if (id != 0xFFFF) {
 1329 				DBG("Unknown hardware (id %x) found at %x\n",
 1330 					id, config_ports[i]);
 1331 			}
 1332 		}
 1333 
 1334 		release_region(config_ports[i], 2);
 1335 	}
 1336 
 1337 	host->config = 0;
 1338 	host->unlock_code = 0;
 1339 
 1340 	return -ENODEV;
 1341 }
 1342 
 1343 /*
 1344  * Allocate/free io port ranges
 1345  */
 1346 
 1347 static int wbsd_request_region(struct wbsd_host *host, int base)
 1348 {
 1349 	if (base & 0x7)
 1350 		return -EINVAL;
 1351 
 1352 	if (!request_region(base, 8, DRIVER_NAME))
 1353 		return -EIO;
 1354 
 1355 	host->base = base;
 1356 
 1357 	return 0;
 1358 }
 1359 
 1360 static void wbsd_release_regions(struct wbsd_host *host)
 1361 {
 1362 	if (host->base)
 1363 		release_region(host->base, 8);
 1364 
 1365 	host->base = 0;
 1366 
 1367 	if (host->config)
 1368 		release_region(host->config, 2);
 1369 
 1370 	host->config = 0;
 1371 }
 1372 
 1373 /*
 1374  * Allocate/free DMA port and buffer
 1375  */
 1376 
 1377 static void wbsd_request_dma(struct wbsd_host *host, int dma)
 1378 {
 1379 	if (dma < 0)
 1380 		return;
 1381 
 1382 	if (request_dma(dma, DRIVER_NAME))
 1383 		goto err;
 1384 
 1385 	/*
 1386 	 * We need to allocate a special buffer in
 1387 	 * order for ISA to be able to DMA to it.
 1388 	 */
 1389 	host->dma_buffer = kmalloc(WBSD_DMA_SIZE,
 1390 		GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
 1391 	if (!host->dma_buffer)
 1392 		goto free;
 1393 
 1394 	/*
 1395 	 * Translate the address to a physical address.
 1396 	 */
 1397 	host->dma_addr = dma_map_single(mmc_dev(host->mmc), host->dma_buffer,
 1398 		WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
 1399 
 1400 	/*
 1401 	 * ISA DMA must be aligned on a 64k basis.
 1402 	 */
 1403 	if ((host->dma_addr & 0xffff) != 0)
 1404 		goto kfree;
 1405 	/*
 1406 	 * ISA cannot access memory above 16 MB.
 1407 	 */
 1408 	else if (host->dma_addr >= 0x1000000)
 1409 		goto kfree;
 1410 
 1411 	host->dma = dma;
 1412 
 1413 	return;
 1414 
 1415 kfree:
 1416 	/*
 1417 	 * If we've gotten here then there is some kind of alignment bug
 1418 	 */
 1419 	BUG_ON(1);
 1420 
 1421 	dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
 1422 		WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
 1423 	host->dma_addr = 0;
 1424 
 1425 	kfree(host->dma_buffer);
 1426 	host->dma_buffer = NULL;
 1427 
 1428 free:
 1429 	free_dma(dma);
 1430 
 1431 err:
 1432 	pr_warn(DRIVER_NAME ": Unable to allocate DMA %d - falling back on FIFO\n",
 1433 		dma);
 1434 }
 1435 
 1436 static void wbsd_release_dma(struct wbsd_host *host)
 1437 {
 1438 	if (host->dma_addr) {
 1439 		dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
 1440 			WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
 1441 	}
 1442 	kfree(host->dma_buffer);
 1443 	if (host->dma >= 0)
 1444 		free_dma(host->dma);
 1445 
 1446 	host->dma = -1;
 1447 	host->dma_buffer = NULL;
 1448 	host->dma_addr = 0;
 1449 }
 1450 
 1451 /*
 1452  * Allocate/free IRQ.
 1453  */
 1454 
 1455 static int wbsd_request_irq(struct wbsd_host *host, int irq)
 1456 {
 1457 	int ret;
 1458 
 1459 	/*
 1460 	 * Set up tasklets. Must be done before requesting interrupt.
 1461 	 */
 1462 	tasklet_init(&host->card_tasklet, wbsd_tasklet_card,
 1463 			(unsigned long)host);
 1464 	tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo,
 1465 			(unsigned long)host);
 1466 	tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc,
 1467 			(unsigned long)host);
 1468 	tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout,
 1469 			(unsigned long)host);
 1470 	tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish,
 1471 			(unsigned long)host);
 1472 
 1473 	/*
 1474 	 * Allocate interrupt.
 1475 	 */
 1476 	ret = request_irq(irq, wbsd_irq, IRQF_SHARED, DRIVER_NAME, host);
 1477 	if (ret)
 1478 		return ret;
 1479 
 1480 	host->irq = irq;
 1481 
 1482 	return 0;
 1483 }
 1484 
 1485 static void  wbsd_release_irq(struct wbsd_host *host)
 1486 {
 1487 	if (!host->irq)
 1488 		return;
 1489 
 1490 	free_irq(host->irq, host);
 1491 
 1492 	host->irq = 0;
 1493 
 1494 	tasklet_kill(&host->card_tasklet);
 1495 	tasklet_kill(&host->fifo_tasklet);
 1496 	tasklet_kill(&host->crc_tasklet);
 1497 	tasklet_kill(&host->timeout_tasklet);
 1498 	tasklet_kill(&host->finish_tasklet);
 1499 }
 1500 
 1501 /*
 1502  * Allocate all resources for the host.
 1503  */
 1504 
 1505 static int wbsd_request_resources(struct wbsd_host *host,
 1506 	int base, int irq, int dma)
 1507 {
 1508 	int ret;
 1509 
 1510 	/*
 1511 	 * Allocate I/O ports.
 1512 	 */
 1513 	ret = wbsd_request_region(host, base);
 1514 	if (ret)
 1515 		return ret;
 1516 
 1517 	/*
 1518 	 * Allocate interrupt.
 1519 	 */
 1520 	ret = wbsd_request_irq(host, irq);
 1521 	if (ret)
 1522 		return ret;
 1523 
 1524 	/*
 1525 	 * Allocate DMA.
 1526 	 */
 1527 	wbsd_request_dma(host, dma);
 1528 
 1529 	return 0;
 1530 }
 1531 
 1532 /*
 1533  * Release all resources for the host.
 1534  */
 1535 
 1536 static void wbsd_release_resources(struct wbsd_host *host)
 1537 {
 1538 	wbsd_release_dma(host);
 1539 	wbsd_release_irq(host);
 1540 	wbsd_release_regions(host);
 1541 }
 1542 
 1543 /*
 1544  * Configure the resources the chip should use.
 1545  */
 1546 
 1547 static void wbsd_chip_config(struct wbsd_host *host)
 1548 {
 1549 	wbsd_unlock_config(host);
 1550 
 1551 	/*
 1552 	 * Reset the chip.
 1553 	 */
 1554 	wbsd_write_config(host, WBSD_CONF_SWRST, 1);
 1555 	wbsd_write_config(host, WBSD_CONF_SWRST, 0);
 1556 
 1557 	/*
 1558 	 * Select SD/MMC function.
 1559 	 */
 1560 	wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
 1561 
 1562 	/*
 1563 	 * Set up card detection.
 1564 	 */
 1565 	wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11);
 1566 
 1567 	/*
 1568 	 * Configure chip
 1569 	 */
 1570 	wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8);
 1571 	wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff);
 1572 
 1573 	wbsd_write_config(host, WBSD_CONF_IRQ, host->irq);
 1574 
 1575 	if (host->dma >= 0)
 1576 		wbsd_write_config(host, WBSD_CONF_DRQ, host->dma);
 1577 
 1578 	/*
 1579 	 * Enable and power up chip.
 1580 	 */
 1581 	wbsd_write_config(host, WBSD_CONF_ENABLE, 1);
 1582 	wbsd_write_config(host, WBSD_CONF_POWER, 0x20);
 1583 
 1584 	wbsd_lock_config(host);
 1585 }
 1586 
 1587 /*
 1588  * Check that configured resources are correct.
 1589  */
 1590 
 1591 static int wbsd_chip_validate(struct wbsd_host *host)
 1592 {
 1593 	int base, irq, dma;
 1594 
 1595 	wbsd_unlock_config(host);
 1596 
 1597 	/*
 1598 	 * Select SD/MMC function.
 1599 	 */
 1600 	wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
 1601 
 1602 	/*
 1603 	 * Read configuration.
 1604 	 */
 1605 	base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8;
 1606 	base |= wbsd_read_config(host, WBSD_CONF_PORT_LO);
 1607 
 1608 	irq = wbsd_read_config(host, WBSD_CONF_IRQ);
 1609 
 1610 	dma = wbsd_read_config(host, WBSD_CONF_DRQ);
 1611 
 1612 	wbsd_lock_config(host);
 1613 
 1614 	/*
 1615 	 * Validate against given configuration.
 1616 	 */
 1617 	if (base != host->base)
 1618 		return 0;
 1619 	if (irq != host->irq)
 1620 		return 0;
 1621 	if ((dma != host->dma) && (host->dma != -1))
 1622 		return 0;
 1623 
 1624 	return 1;
 1625 }
 1626 
 1627 /*
 1628  * Powers down the SD function
 1629  */
 1630 
 1631 static void wbsd_chip_poweroff(struct wbsd_host *host)
 1632 {
 1633 	wbsd_unlock_config(host);
 1634 
 1635 	wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
 1636 	wbsd_write_config(host, WBSD_CONF_ENABLE, 0);
 1637 
 1638 	wbsd_lock_config(host);
 1639 }
 1640 
 1641 /*****************************************************************************\
 1642  *                                                                           *
 1643  * Devices setup and shutdown                                                *
 1644  *                                                                           *
 1645 \*****************************************************************************/
 1646 
 1647 static int wbsd_init(struct device *dev, int base, int irq, int dma,
 1648 	int pnp)
 1649 {
 1650 	struct wbsd_host *host = NULL;
 1651 	struct mmc_host *mmc = NULL;
 1652 	int ret;
 1653 
 1654 	ret = wbsd_alloc_mmc(dev);
 1655 	if (ret)
 1656 		return ret;
 1657 
 1658 	mmc = dev_get_drvdata(dev);
 1659 	host = mmc_priv(mmc);
 1660 
 1661 	/*
 1662 	 * Scan for hardware.
 1663 	 */
 1664 	ret = wbsd_scan(host);
 1665 	if (ret) {
 1666 		if (pnp && (ret == -ENODEV)) {
 1667 			pr_warn(DRIVER_NAME ": Unable to confirm device presence - you may experience lock-ups\n");
 1668 		} else {
 1669 			wbsd_free_mmc(dev);
 1670 			return ret;
 1671 		}
 1672 	}
 1673 
 1674 	/*
 1675 	 * Request resources.
 1676 	 */
 1677 	ret = wbsd_request_resources(host, base, irq, dma);
 1678 	if (ret) {
 1679 		wbsd_release_resources(host);
 1680 		wbsd_free_mmc(dev);
 1681 		return ret;
 1682 	}
 1683 
 1684 	/*
 1685 	 * See if chip needs to be configured.
 1686 	 */
 1687 	if (pnp) {
 1688 		if ((host->config != 0) && !wbsd_chip_validate(host)) {
 1689 			pr_warn(DRIVER_NAME ": PnP active but chip not configured! You probably have a buggy BIOS. Configuring chip manually.\n");
 1690 			wbsd_chip_config(host);
 1691 		}
 1692 	} else
 1693 		wbsd_chip_config(host);
 1694 
 1695 	/*
 1696 	 * Power Management stuff. No idea how this works.
 1697 	 * Not tested.
 1698 	 */
 1699 #ifdef CONFIG_PM
 1700 	if (host->config) {
 1701 		wbsd_unlock_config(host);
 1702 		wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
 1703 		wbsd_lock_config(host);
 1704 	}
 1705 #endif
 1706 	/*
 1707 	 * Allow device to initialise itself properly.
 1708 	 */
 1709 	mdelay(5);
 1710 
 1711 	/*
 1712 	 * Reset the chip into a known state.
 1713 	 */
 1714 	wbsd_init_device(host);
 1715 
 1716 	mmc_add_host(mmc);
 1717 
 1718 	pr_info("%s: W83L51xD", mmc_hostname(mmc));
 1719 	if (host->chip_id != 0)
 1720 		printk(" id %x", (int)host->chip_id);
 1721 	printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
 1722 	if (host->dma >= 0)
 1723 		printk(" dma %d", (int)host->dma);
 1724 	else
 1725 		printk(" FIFO");
 1726 	if (pnp)
 1727 		printk(" PnP");
 1728 	printk("\n");
 1729 
 1730 	return 0;
 1731 }
 1732 
 1733 static void wbsd_shutdown(struct device *dev, int pnp)
 1734 {
 1735 	struct mmc_host *mmc = dev_get_drvdata(dev);
 1736 	struct wbsd_host *host;
 1737 
 1738 	if (!mmc)
 1739 		return;
 1740 
 1741 	host = mmc_priv(mmc);
 1742 
 1743 	mmc_remove_host(mmc);
 1744 
 1745 	/*
 1746 	 * Power down the SD/MMC function.
 1747 	 */
 1748 	if (!pnp)
 1749 		wbsd_chip_poweroff(host);
 1750 
 1751 	wbsd_release_resources(host);
 1752 
 1753 	wbsd_free_mmc(dev);
 1754 }
 1755 
 1756 /*
 1757  * Non-PnP
 1758  */
 1759 
 1760 static int wbsd_probe(struct platform_device *dev)
 1761 {
 1762 	/* Use the module parameters for resources */
 1763 	return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0);
 1764 }
 1765 
 1766 static int wbsd_remove(struct platform_device *dev)
 1767 {
 1768 	wbsd_shutdown(&dev->dev, 0);
 1769 
 1770 	return 0;
 1771 }
 1772 
 1773 /*
 1774  * PnP
 1775  */
 1776 
 1777 #ifdef CONFIG_PNP
 1778 
 1779 static int
 1780 wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)
 1781 {
 1782 	int io, irq, dma;
 1783 
 1784 	/*
 1785 	 * Get resources from PnP layer.
 1786 	 */
 1787 	io = pnp_port_start(pnpdev, 0);
 1788 	irq = pnp_irq(pnpdev, 0);
 1789 	if (pnp_dma_valid(pnpdev, 0))
 1790 		dma = pnp_dma(pnpdev, 0);
 1791 	else
 1792 		dma = -1;
 1793 
 1794 	DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma);
 1795 
 1796 	return wbsd_init(&pnpdev->dev, io, irq, dma, 1);
 1797 }
 1798 
 1799 static void wbsd_pnp_remove(struct pnp_dev *dev)
 1800 {
 1801 	wbsd_shutdown(&dev->dev, 1);
 1802 }
 1803 
 1804 #endif /* CONFIG_PNP */
 1805 
 1806 /*
 1807  * Power management
 1808  */
 1809 
 1810 #ifdef CONFIG_PM
 1811 
 1812 static int wbsd_platform_suspend(struct platform_device *dev,
 1813 				 pm_message_t state)
 1814 {
 1815 	struct mmc_host *mmc = platform_get_drvdata(dev);
 1816 	struct wbsd_host *host;
 1817 
 1818 	if (mmc == NULL)
 1819 		return 0;
 1820 
 1821 	DBGF("Suspending...\n");
 1822 
 1823 	host = mmc_priv(mmc);
 1824 
 1825 	wbsd_chip_poweroff(host);
 1826 	return 0;
 1827 }
 1828 
 1829 static int wbsd_platform_resume(struct platform_device *dev)
 1830 {
 1831 	struct mmc_host *mmc = platform_get_drvdata(dev);
 1832 	struct wbsd_host *host;
 1833 
 1834 	if (mmc == NULL)
 1835 		return 0;
 1836 
 1837 	DBGF("Resuming...\n");
 1838 
 1839 	host = mmc_priv(mmc);
 1840 
 1841 	wbsd_chip_config(host);
 1842 
 1843 	/*
 1844 	 * Allow device to initialise itself properly.
 1845 	 */
 1846 	mdelay(5);
 1847 
 1848 	wbsd_init_device(host);
 1849 	return 0;
 1850 }
 1851 
 1852 #ifdef CONFIG_PNP
 1853 
 1854 static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
 1855 {
 1856 	struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
 1857 
 1858 	if (mmc == NULL)
 1859 		return 0;
 1860 
 1861 	DBGF("Suspending...\n");
 1862 	return 0;
 1863 }
 1864 
 1865 static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
 1866 {
 1867 	struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
 1868 	struct wbsd_host *host;
 1869 
 1870 	if (mmc == NULL)
 1871 		return 0;
 1872 
 1873 	DBGF("Resuming...\n");
 1874 
 1875 	host = mmc_priv(mmc);
 1876 
 1877 	/*
 1878 	 * See if chip needs to be configured.
 1879 	 */
 1880 	if (host->config != 0) {
 1881 		if (!wbsd_chip_validate(host)) {
 1882 			pr_warn(DRIVER_NAME ": PnP active but chip not configured! You probably have a buggy BIOS. Configuring chip manually.\n");
 1883 			wbsd_chip_config(host);
 1884 		}
 1885 	}
 1886 
 1887 	/*
 1888 	 * Allow device to initialise itself properly.
 1889 	 */
 1890 	mdelay(5);
 1891 
 1892 	wbsd_init_device(host);
 1893 	return 0;
 1894 }
 1895 
 1896 #endif /* CONFIG_PNP */
 1897 
 1898 #else /* CONFIG_PM */
 1899 
 1900 #define wbsd_platform_suspend NULL
 1901 #define wbsd_platform_resume NULL
 1902 
 1903 #define wbsd_pnp_suspend NULL
 1904 #define wbsd_pnp_resume NULL
 1905 
 1906 #endif /* CONFIG_PM */
 1907 
 1908 static struct platform_device *wbsd_device;
 1909 
 1910 static struct platform_driver wbsd_driver = {
 1911 	.probe		= wbsd_probe,
 1912 	.remove		= wbsd_remove,
 1913 
 1914 	.suspend	= wbsd_platform_suspend,
 1915 	.resume		= wbsd_platform_resume,
 1916 	.driver		= {
 1917 		.name	= DRIVER_NAME,
 1918 	},
 1919 };
 1920 
 1921 #ifdef CONFIG_PNP
 1922 
 1923 static struct pnp_driver wbsd_pnp_driver = {
 1924 	.name		= DRIVER_NAME,
 1925 	.id_table	= pnp_dev_table,
 1926 	.probe		= wbsd_pnp_probe,
 1927 	.remove		= wbsd_pnp_remove,
 1928 
 1929 	.suspend	= wbsd_pnp_suspend,
 1930 	.resume		= wbsd_pnp_resume,
 1931 };
 1932 
 1933 #endif /* CONFIG_PNP */
 1934 
 1935 /*
 1936  * Module loading/unloading
 1937  */
 1938 
 1939 static int __init wbsd_drv_init(void)
 1940 {
 1941 	int result;
 1942 
 1943 	pr_info(DRIVER_NAME
 1944 		": Winbond W83L51xD SD/MMC card interface driver\n");
 1945 	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
 1946 
 1947 #ifdef CONFIG_PNP
 1948 
 1949 	if (!param_nopnp) {
 1950 		result = pnp_register_driver(&wbsd_pnp_driver);
 1951 		if (result < 0)
 1952 			return result;
 1953 	}
 1954 #endif /* CONFIG_PNP */
 1955 
 1956 	if (param_nopnp) {
 1957 		result = platform_driver_register(&wbsd_driver);
 1958 		if (result < 0)
 1959 			return result;
 1960 
 1961 		wbsd_device = platform_device_alloc(DRIVER_NAME, -1);
 1962 		if (!wbsd_device) {
 1963 			platform_driver_unregister(&wbsd_driver);
 1964 			return -ENOMEM;
 1965 		}
 1966 
 1967 		result = platform_device_add(wbsd_device);
 1968 		if (result) {
 1969 			platform_device_put(wbsd_device);
 1970 			platform_driver_unregister(&wbsd_driver);
 1971 			return result;
 1972 		}
 1973 	}
 1974 
 1975 	return 0;
 1976 }
 1977 
 1978 static void __exit wbsd_drv_exit(void)
 1979 {
 1980 #ifdef CONFIG_PNP
 1981 
 1982 	if (!param_nopnp)
 1983 		pnp_unregister_driver(&wbsd_pnp_driver);
 1984 
 1985 #endif /* CONFIG_PNP */
 1986 
 1987 	if (param_nopnp) {
 1988 		platform_device_unregister(wbsd_device);
 1989 
 1990 		platform_driver_unregister(&wbsd_driver);
 1991 	}
 1992 
 1993 	DBG("unloaded\n");
 1994 }
 1995 
 1996 module_init(wbsd_drv_init);
 1997 module_exit(wbsd_drv_exit);
 1998 #ifdef CONFIG_PNP
 1999 module_param_named(nopnp, param_nopnp, uint, 0444);
 2000 #endif
 2001 module_param_named(io, param_io, uint, 0444);
 2002 module_param_named(irq, param_irq, uint, 0444);
 2003 module_param_named(dma, param_dma, int, 0444);
 2004 
 2005 MODULE_LICENSE("GPL");
 2006 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
 2007 MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
 2008 
 2009 #ifdef CONFIG_PNP
 2010 MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)");
 2011 #endif
 2012 MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)");
 2013 MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)");
 2014 MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");
 2015 
 2016 
 2017 
 2018 
 2019 
 2020 /* LDV_COMMENT_BEGIN_MAIN */
 2021 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
 2022 
 2023 /*###########################################################################*/
 2024 
 2025 /*############## Driver Environment Generator 0.2 output ####################*/
 2026 
 2027 /*###########################################################################*/
 2028 
 2029 
 2030 
 2031 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
 2032 void ldv_check_final_state(void);
 2033 
 2034 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
 2035 void ldv_check_return_value(int res);
 2036 
 2037 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
 2038 void ldv_check_return_value_probe(int res);
 2039 
 2040 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
 2041 void ldv_initialize(void);
 2042 
 2043 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
 2044 void ldv_handler_precall(void);
 2045 
 2046 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
 2047 int nondet_int(void);
 2048 
 2049 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
 2050 int LDV_IN_INTERRUPT;
 2051 
 2052 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
 2053 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
 2054 
 2055 
 2056 
 2057 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
 2058 	/*============================= VARIABLE DECLARATION PART   =============================*/
 2059 	/** STRUCT: struct type: mmc_host_ops, struct name: wbsd_ops **/
 2060 	/* content: static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)*/
 2061 	/* LDV_COMMENT_BEGIN_PREP */
 2062 	#define DRIVER_NAME "wbsd"
 2063 	#define DBG(x...) \
 2064 	pr_debug(DRIVER_NAME ": " x)
 2065 	#define DBGF(f, x...) \
 2066 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2067 	#ifdef CONFIG_PNP
 2068 	#endif 
 2069 	#ifdef CONFIG_PNP
 2070 	#else
 2071 	#endif
 2072 	/* LDV_COMMENT_END_PREP */
 2073 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_request" */
 2074 	struct mmc_host * var_group1;
 2075 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_request" */
 2076 	struct mmc_request * var_group2;
 2077 	/* LDV_COMMENT_BEGIN_PREP */
 2078 	#ifdef CONFIG_PM
 2079 	#endif
 2080 	#ifdef CONFIG_PNP
 2081 	#endif 
 2082 	#ifdef CONFIG_PM
 2083 	#ifdef CONFIG_PNP
 2084 	#endif 
 2085 	#else 
 2086 	#define wbsd_platform_suspend NULL
 2087 	#define wbsd_platform_resume NULL
 2088 	#define wbsd_pnp_suspend NULL
 2089 	#define wbsd_pnp_resume NULL
 2090 	#endif 
 2091 	#ifdef CONFIG_PNP
 2092 	#endif 
 2093 	#ifdef CONFIG_PNP
 2094 	#endif 
 2095 	#ifdef CONFIG_PNP
 2096 	#endif 
 2097 	#ifdef CONFIG_PNP
 2098 	#endif
 2099 	#ifdef CONFIG_PNP
 2100 	#endif
 2101 	/* LDV_COMMENT_END_PREP */
 2102 	/* content: static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)*/
 2103 	/* LDV_COMMENT_BEGIN_PREP */
 2104 	#define DRIVER_NAME "wbsd"
 2105 	#define DBG(x...) \
 2106 	pr_debug(DRIVER_NAME ": " x)
 2107 	#define DBGF(f, x...) \
 2108 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2109 	#ifdef CONFIG_PNP
 2110 	#endif 
 2111 	#ifdef CONFIG_PNP
 2112 	#else
 2113 	#endif
 2114 	#ifdef CONFIG_MMC_DEBUG
 2115 	#endif
 2116 	/* LDV_COMMENT_END_PREP */
 2117 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_set_ios" */
 2118 	struct mmc_ios * var_group3;
 2119 	/* LDV_COMMENT_BEGIN_PREP */
 2120 	#ifdef CONFIG_PM
 2121 	#endif
 2122 	#ifdef CONFIG_PNP
 2123 	#endif 
 2124 	#ifdef CONFIG_PM
 2125 	#ifdef CONFIG_PNP
 2126 	#endif 
 2127 	#else 
 2128 	#define wbsd_platform_suspend NULL
 2129 	#define wbsd_platform_resume NULL
 2130 	#define wbsd_pnp_suspend NULL
 2131 	#define wbsd_pnp_resume NULL
 2132 	#endif 
 2133 	#ifdef CONFIG_PNP
 2134 	#endif 
 2135 	#ifdef CONFIG_PNP
 2136 	#endif 
 2137 	#ifdef CONFIG_PNP
 2138 	#endif 
 2139 	#ifdef CONFIG_PNP
 2140 	#endif
 2141 	#ifdef CONFIG_PNP
 2142 	#endif
 2143 	/* LDV_COMMENT_END_PREP */
 2144 	/* content: static int wbsd_get_ro(struct mmc_host *mmc)*/
 2145 	/* LDV_COMMENT_BEGIN_PREP */
 2146 	#define DRIVER_NAME "wbsd"
 2147 	#define DBG(x...) \
 2148 	pr_debug(DRIVER_NAME ": " x)
 2149 	#define DBGF(f, x...) \
 2150 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2151 	#ifdef CONFIG_PNP
 2152 	#endif 
 2153 	#ifdef CONFIG_PNP
 2154 	#else
 2155 	#endif
 2156 	#ifdef CONFIG_MMC_DEBUG
 2157 	#endif
 2158 	/* LDV_COMMENT_END_PREP */
 2159 	/* LDV_COMMENT_BEGIN_PREP */
 2160 	#ifdef CONFIG_PM
 2161 	#endif
 2162 	#ifdef CONFIG_PNP
 2163 	#endif 
 2164 	#ifdef CONFIG_PM
 2165 	#ifdef CONFIG_PNP
 2166 	#endif 
 2167 	#else 
 2168 	#define wbsd_platform_suspend NULL
 2169 	#define wbsd_platform_resume NULL
 2170 	#define wbsd_pnp_suspend NULL
 2171 	#define wbsd_pnp_resume NULL
 2172 	#endif 
 2173 	#ifdef CONFIG_PNP
 2174 	#endif 
 2175 	#ifdef CONFIG_PNP
 2176 	#endif 
 2177 	#ifdef CONFIG_PNP
 2178 	#endif 
 2179 	#ifdef CONFIG_PNP
 2180 	#endif
 2181 	#ifdef CONFIG_PNP
 2182 	#endif
 2183 	/* LDV_COMMENT_END_PREP */
 2184 
 2185 	/** STRUCT: struct type: platform_driver, struct name: wbsd_driver **/
 2186 	/* content: static int wbsd_probe(struct platform_device *dev)*/
 2187 	/* LDV_COMMENT_BEGIN_PREP */
 2188 	#define DRIVER_NAME "wbsd"
 2189 	#define DBG(x...) \
 2190 	pr_debug(DRIVER_NAME ": " x)
 2191 	#define DBGF(f, x...) \
 2192 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2193 	#ifdef CONFIG_PNP
 2194 	#endif 
 2195 	#ifdef CONFIG_PNP
 2196 	#else
 2197 	#endif
 2198 	#ifdef CONFIG_MMC_DEBUG
 2199 	#endif
 2200 	#ifdef CONFIG_PM
 2201 	#endif
 2202 	/* LDV_COMMENT_END_PREP */
 2203 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_probe" */
 2204 	struct platform_device * var_group4;
 2205 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "wbsd_probe" */
 2206 	static int res_wbsd_probe_48;
 2207 	/* LDV_COMMENT_BEGIN_PREP */
 2208 	#ifdef CONFIG_PNP
 2209 	#endif 
 2210 	#ifdef CONFIG_PM
 2211 	#ifdef CONFIG_PNP
 2212 	#endif 
 2213 	#else 
 2214 	#define wbsd_platform_suspend NULL
 2215 	#define wbsd_platform_resume NULL
 2216 	#define wbsd_pnp_suspend NULL
 2217 	#define wbsd_pnp_resume NULL
 2218 	#endif 
 2219 	#ifdef CONFIG_PNP
 2220 	#endif 
 2221 	#ifdef CONFIG_PNP
 2222 	#endif 
 2223 	#ifdef CONFIG_PNP
 2224 	#endif 
 2225 	#ifdef CONFIG_PNP
 2226 	#endif
 2227 	#ifdef CONFIG_PNP
 2228 	#endif
 2229 	/* LDV_COMMENT_END_PREP */
 2230 	/* content: static int wbsd_remove(struct platform_device *dev)*/
 2231 	/* LDV_COMMENT_BEGIN_PREP */
 2232 	#define DRIVER_NAME "wbsd"
 2233 	#define DBG(x...) \
 2234 	pr_debug(DRIVER_NAME ": " x)
 2235 	#define DBGF(f, x...) \
 2236 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2237 	#ifdef CONFIG_PNP
 2238 	#endif 
 2239 	#ifdef CONFIG_PNP
 2240 	#else
 2241 	#endif
 2242 	#ifdef CONFIG_MMC_DEBUG
 2243 	#endif
 2244 	#ifdef CONFIG_PM
 2245 	#endif
 2246 	/* LDV_COMMENT_END_PREP */
 2247 	/* LDV_COMMENT_BEGIN_PREP */
 2248 	#ifdef CONFIG_PNP
 2249 	#endif 
 2250 	#ifdef CONFIG_PM
 2251 	#ifdef CONFIG_PNP
 2252 	#endif 
 2253 	#else 
 2254 	#define wbsd_platform_suspend NULL
 2255 	#define wbsd_platform_resume NULL
 2256 	#define wbsd_pnp_suspend NULL
 2257 	#define wbsd_pnp_resume NULL
 2258 	#endif 
 2259 	#ifdef CONFIG_PNP
 2260 	#endif 
 2261 	#ifdef CONFIG_PNP
 2262 	#endif 
 2263 	#ifdef CONFIG_PNP
 2264 	#endif 
 2265 	#ifdef CONFIG_PNP
 2266 	#endif
 2267 	#ifdef CONFIG_PNP
 2268 	#endif
 2269 	/* LDV_COMMENT_END_PREP */
 2270 	/* content: static int wbsd_platform_suspend(struct platform_device *dev, pm_message_t state)*/
 2271 	/* LDV_COMMENT_BEGIN_PREP */
 2272 	#define DRIVER_NAME "wbsd"
 2273 	#define DBG(x...) \
 2274 	pr_debug(DRIVER_NAME ": " x)
 2275 	#define DBGF(f, x...) \
 2276 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2277 	#ifdef CONFIG_PNP
 2278 	#endif 
 2279 	#ifdef CONFIG_PNP
 2280 	#else
 2281 	#endif
 2282 	#ifdef CONFIG_MMC_DEBUG
 2283 	#endif
 2284 	#ifdef CONFIG_PM
 2285 	#endif
 2286 	#ifdef CONFIG_PNP
 2287 	#endif 
 2288 	#ifdef CONFIG_PM
 2289 	/* LDV_COMMENT_END_PREP */
 2290 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_platform_suspend" */
 2291 	pm_message_t  var_wbsd_platform_suspend_52_p1;
 2292 	/* LDV_COMMENT_BEGIN_PREP */
 2293 	#ifdef CONFIG_PNP
 2294 	#endif 
 2295 	#else 
 2296 	#define wbsd_platform_suspend NULL
 2297 	#define wbsd_platform_resume NULL
 2298 	#define wbsd_pnp_suspend NULL
 2299 	#define wbsd_pnp_resume NULL
 2300 	#endif 
 2301 	#ifdef CONFIG_PNP
 2302 	#endif 
 2303 	#ifdef CONFIG_PNP
 2304 	#endif 
 2305 	#ifdef CONFIG_PNP
 2306 	#endif 
 2307 	#ifdef CONFIG_PNP
 2308 	#endif
 2309 	#ifdef CONFIG_PNP
 2310 	#endif
 2311 	/* LDV_COMMENT_END_PREP */
 2312 	/* content: static int wbsd_platform_resume(struct platform_device *dev)*/
 2313 	/* LDV_COMMENT_BEGIN_PREP */
 2314 	#define DRIVER_NAME "wbsd"
 2315 	#define DBG(x...) \
 2316 	pr_debug(DRIVER_NAME ": " x)
 2317 	#define DBGF(f, x...) \
 2318 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2319 	#ifdef CONFIG_PNP
 2320 	#endif 
 2321 	#ifdef CONFIG_PNP
 2322 	#else
 2323 	#endif
 2324 	#ifdef CONFIG_MMC_DEBUG
 2325 	#endif
 2326 	#ifdef CONFIG_PM
 2327 	#endif
 2328 	#ifdef CONFIG_PNP
 2329 	#endif 
 2330 	#ifdef CONFIG_PM
 2331 	/* LDV_COMMENT_END_PREP */
 2332 	/* LDV_COMMENT_BEGIN_PREP */
 2333 	#ifdef CONFIG_PNP
 2334 	#endif 
 2335 	#else 
 2336 	#define wbsd_platform_suspend NULL
 2337 	#define wbsd_platform_resume NULL
 2338 	#define wbsd_pnp_suspend NULL
 2339 	#define wbsd_pnp_resume NULL
 2340 	#endif 
 2341 	#ifdef CONFIG_PNP
 2342 	#endif 
 2343 	#ifdef CONFIG_PNP
 2344 	#endif 
 2345 	#ifdef CONFIG_PNP
 2346 	#endif 
 2347 	#ifdef CONFIG_PNP
 2348 	#endif
 2349 	#ifdef CONFIG_PNP
 2350 	#endif
 2351 	/* LDV_COMMENT_END_PREP */
 2352 
 2353 	/** STRUCT: struct type: pnp_driver, struct name: wbsd_pnp_driver **/
 2354 	/* content: static int wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)*/
 2355 	/* LDV_COMMENT_BEGIN_PREP */
 2356 	#define DRIVER_NAME "wbsd"
 2357 	#define DBG(x...) \
 2358 	pr_debug(DRIVER_NAME ": " x)
 2359 	#define DBGF(f, x...) \
 2360 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2361 	#ifdef CONFIG_PNP
 2362 	#endif 
 2363 	#ifdef CONFIG_PNP
 2364 	#else
 2365 	#endif
 2366 	#ifdef CONFIG_MMC_DEBUG
 2367 	#endif
 2368 	#ifdef CONFIG_PM
 2369 	#endif
 2370 	#ifdef CONFIG_PNP
 2371 	/* LDV_COMMENT_END_PREP */
 2372 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_pnp_probe" */
 2373 	struct pnp_dev * var_group5;
 2374 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_pnp_probe" */
 2375 	const struct pnp_device_id * var_wbsd_pnp_probe_50_p1;
 2376 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "wbsd_pnp_probe" */
 2377 	static int res_wbsd_pnp_probe_50;
 2378 	/* LDV_COMMENT_BEGIN_PREP */
 2379 	#endif 
 2380 	#ifdef CONFIG_PM
 2381 	#ifdef CONFIG_PNP
 2382 	#endif 
 2383 	#else 
 2384 	#define wbsd_platform_suspend NULL
 2385 	#define wbsd_platform_resume NULL
 2386 	#define wbsd_pnp_suspend NULL
 2387 	#define wbsd_pnp_resume NULL
 2388 	#endif 
 2389 	#ifdef CONFIG_PNP
 2390 	#endif 
 2391 	#ifdef CONFIG_PNP
 2392 	#endif 
 2393 	#ifdef CONFIG_PNP
 2394 	#endif 
 2395 	#ifdef CONFIG_PNP
 2396 	#endif
 2397 	#ifdef CONFIG_PNP
 2398 	#endif
 2399 	/* LDV_COMMENT_END_PREP */
 2400 	/* content: static void wbsd_pnp_remove(struct pnp_dev *dev)*/
 2401 	/* LDV_COMMENT_BEGIN_PREP */
 2402 	#define DRIVER_NAME "wbsd"
 2403 	#define DBG(x...) \
 2404 	pr_debug(DRIVER_NAME ": " x)
 2405 	#define DBGF(f, x...) \
 2406 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2407 	#ifdef CONFIG_PNP
 2408 	#endif 
 2409 	#ifdef CONFIG_PNP
 2410 	#else
 2411 	#endif
 2412 	#ifdef CONFIG_MMC_DEBUG
 2413 	#endif
 2414 	#ifdef CONFIG_PM
 2415 	#endif
 2416 	#ifdef CONFIG_PNP
 2417 	/* LDV_COMMENT_END_PREP */
 2418 	/* LDV_COMMENT_BEGIN_PREP */
 2419 	#endif 
 2420 	#ifdef CONFIG_PM
 2421 	#ifdef CONFIG_PNP
 2422 	#endif 
 2423 	#else 
 2424 	#define wbsd_platform_suspend NULL
 2425 	#define wbsd_platform_resume NULL
 2426 	#define wbsd_pnp_suspend NULL
 2427 	#define wbsd_pnp_resume NULL
 2428 	#endif 
 2429 	#ifdef CONFIG_PNP
 2430 	#endif 
 2431 	#ifdef CONFIG_PNP
 2432 	#endif 
 2433 	#ifdef CONFIG_PNP
 2434 	#endif 
 2435 	#ifdef CONFIG_PNP
 2436 	#endif
 2437 	#ifdef CONFIG_PNP
 2438 	#endif
 2439 	/* LDV_COMMENT_END_PREP */
 2440 	/* content: static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)*/
 2441 	/* LDV_COMMENT_BEGIN_PREP */
 2442 	#define DRIVER_NAME "wbsd"
 2443 	#define DBG(x...) \
 2444 	pr_debug(DRIVER_NAME ": " x)
 2445 	#define DBGF(f, x...) \
 2446 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2447 	#ifdef CONFIG_PNP
 2448 	#endif 
 2449 	#ifdef CONFIG_PNP
 2450 	#else
 2451 	#endif
 2452 	#ifdef CONFIG_MMC_DEBUG
 2453 	#endif
 2454 	#ifdef CONFIG_PM
 2455 	#endif
 2456 	#ifdef CONFIG_PNP
 2457 	#endif 
 2458 	#ifdef CONFIG_PM
 2459 	#ifdef CONFIG_PNP
 2460 	/* LDV_COMMENT_END_PREP */
 2461 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_pnp_suspend" */
 2462 	pm_message_t  var_wbsd_pnp_suspend_54_p1;
 2463 	/* LDV_COMMENT_BEGIN_PREP */
 2464 	#endif 
 2465 	#else 
 2466 	#define wbsd_platform_suspend NULL
 2467 	#define wbsd_platform_resume NULL
 2468 	#define wbsd_pnp_suspend NULL
 2469 	#define wbsd_pnp_resume NULL
 2470 	#endif 
 2471 	#ifdef CONFIG_PNP
 2472 	#endif 
 2473 	#ifdef CONFIG_PNP
 2474 	#endif 
 2475 	#ifdef CONFIG_PNP
 2476 	#endif 
 2477 	#ifdef CONFIG_PNP
 2478 	#endif
 2479 	#ifdef CONFIG_PNP
 2480 	#endif
 2481 	/* LDV_COMMENT_END_PREP */
 2482 	/* content: static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)*/
 2483 	/* LDV_COMMENT_BEGIN_PREP */
 2484 	#define DRIVER_NAME "wbsd"
 2485 	#define DBG(x...) \
 2486 	pr_debug(DRIVER_NAME ": " x)
 2487 	#define DBGF(f, x...) \
 2488 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2489 	#ifdef CONFIG_PNP
 2490 	#endif 
 2491 	#ifdef CONFIG_PNP
 2492 	#else
 2493 	#endif
 2494 	#ifdef CONFIG_MMC_DEBUG
 2495 	#endif
 2496 	#ifdef CONFIG_PM
 2497 	#endif
 2498 	#ifdef CONFIG_PNP
 2499 	#endif 
 2500 	#ifdef CONFIG_PM
 2501 	#ifdef CONFIG_PNP
 2502 	/* LDV_COMMENT_END_PREP */
 2503 	/* LDV_COMMENT_BEGIN_PREP */
 2504 	#endif 
 2505 	#else 
 2506 	#define wbsd_platform_suspend NULL
 2507 	#define wbsd_platform_resume NULL
 2508 	#define wbsd_pnp_suspend NULL
 2509 	#define wbsd_pnp_resume NULL
 2510 	#endif 
 2511 	#ifdef CONFIG_PNP
 2512 	#endif 
 2513 	#ifdef CONFIG_PNP
 2514 	#endif 
 2515 	#ifdef CONFIG_PNP
 2516 	#endif 
 2517 	#ifdef CONFIG_PNP
 2518 	#endif
 2519 	#ifdef CONFIG_PNP
 2520 	#endif
 2521 	/* LDV_COMMENT_END_PREP */
 2522 
 2523 	/** CALLBACK SECTION request_irq **/
 2524 	/* content: static irqreturn_t wbsd_irq(int irq, void *dev_id)*/
 2525 	/* LDV_COMMENT_BEGIN_PREP */
 2526 	#define DRIVER_NAME "wbsd"
 2527 	#define DBG(x...) \
 2528 	pr_debug(DRIVER_NAME ": " x)
 2529 	#define DBGF(f, x...) \
 2530 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2531 	#ifdef CONFIG_PNP
 2532 	#endif 
 2533 	#ifdef CONFIG_PNP
 2534 	#else
 2535 	#endif
 2536 	#ifdef CONFIG_MMC_DEBUG
 2537 	#endif
 2538 	/* LDV_COMMENT_END_PREP */
 2539 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_irq" */
 2540 	int  var_wbsd_irq_31_p0;
 2541 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_irq" */
 2542 	void * var_wbsd_irq_31_p1;
 2543 	/* LDV_COMMENT_BEGIN_PREP */
 2544 	#ifdef CONFIG_PM
 2545 	#endif
 2546 	#ifdef CONFIG_PNP
 2547 	#endif 
 2548 	#ifdef CONFIG_PM
 2549 	#ifdef CONFIG_PNP
 2550 	#endif 
 2551 	#else 
 2552 	#define wbsd_platform_suspend NULL
 2553 	#define wbsd_platform_resume NULL
 2554 	#define wbsd_pnp_suspend NULL
 2555 	#define wbsd_pnp_resume NULL
 2556 	#endif 
 2557 	#ifdef CONFIG_PNP
 2558 	#endif 
 2559 	#ifdef CONFIG_PNP
 2560 	#endif 
 2561 	#ifdef CONFIG_PNP
 2562 	#endif 
 2563 	#ifdef CONFIG_PNP
 2564 	#endif
 2565 	#ifdef CONFIG_PNP
 2566 	#endif
 2567 	/* LDV_COMMENT_END_PREP */
 2568 
 2569 	/** TIMER SECTION timer **/
 2570 	/* content: static void wbsd_reset_ignore(unsigned long data)*/
 2571 	/* LDV_COMMENT_BEGIN_PREP */
 2572 	#define DRIVER_NAME "wbsd"
 2573 	#define DBG(x...) \
 2574 	pr_debug(DRIVER_NAME ": " x)
 2575 	#define DBGF(f, x...) \
 2576 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2577 	#ifdef CONFIG_PNP
 2578 	#endif 
 2579 	#ifdef CONFIG_PNP
 2580 	#else
 2581 	#endif
 2582 	#ifdef CONFIG_MMC_DEBUG
 2583 	#endif
 2584 	/* LDV_COMMENT_END_PREP */
 2585 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_reset_ignore" */
 2586 	unsigned long  var_wbsd_reset_ignore_24_p0;
 2587 	/* LDV_COMMENT_BEGIN_PREP */
 2588 	#ifdef CONFIG_PM
 2589 	#endif
 2590 	#ifdef CONFIG_PNP
 2591 	#endif 
 2592 	#ifdef CONFIG_PM
 2593 	#ifdef CONFIG_PNP
 2594 	#endif 
 2595 	#else 
 2596 	#define wbsd_platform_suspend NULL
 2597 	#define wbsd_platform_resume NULL
 2598 	#define wbsd_pnp_suspend NULL
 2599 	#define wbsd_pnp_resume NULL
 2600 	#endif 
 2601 	#ifdef CONFIG_PNP
 2602 	#endif 
 2603 	#ifdef CONFIG_PNP
 2604 	#endif 
 2605 	#ifdef CONFIG_PNP
 2606 	#endif 
 2607 	#ifdef CONFIG_PNP
 2608 	#endif
 2609 	#ifdef CONFIG_PNP
 2610 	#endif
 2611 	/* LDV_COMMENT_END_PREP */
 2612 
 2613 
 2614 
 2615 
 2616 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 2617 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 2618 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 2619 	LDV_IN_INTERRUPT=1;
 2620 
 2621 
 2622 
 2623 
 2624 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 2625 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 2626 	/*============================= FUNCTION CALL SECTION       =============================*/
 2627 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 2628 	ldv_initialize();
 2629 
 2630 	/** INIT: init_type: ST_MODULE_INIT **/
 2631 	/* content: static int __init wbsd_drv_init(void)*/
 2632 	/* LDV_COMMENT_BEGIN_PREP */
 2633 	#define DRIVER_NAME "wbsd"
 2634 	#define DBG(x...) \
 2635 	pr_debug(DRIVER_NAME ": " x)
 2636 	#define DBGF(f, x...) \
 2637 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2638 	#ifdef CONFIG_PNP
 2639 	#endif 
 2640 	#ifdef CONFIG_PNP
 2641 	#else
 2642 	#endif
 2643 	#ifdef CONFIG_MMC_DEBUG
 2644 	#endif
 2645 	#ifdef CONFIG_PM
 2646 	#endif
 2647 	#ifdef CONFIG_PNP
 2648 	#endif 
 2649 	#ifdef CONFIG_PM
 2650 	#ifdef CONFIG_PNP
 2651 	#endif 
 2652 	#else 
 2653 	#define wbsd_platform_suspend NULL
 2654 	#define wbsd_platform_resume NULL
 2655 	#define wbsd_pnp_suspend NULL
 2656 	#define wbsd_pnp_resume NULL
 2657 	#endif 
 2658 	#ifdef CONFIG_PNP
 2659 	#endif 
 2660 	/* LDV_COMMENT_END_PREP */
 2661 	/* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
 2662 	ldv_handler_precall();
 2663 	 if(wbsd_drv_init()) 
 2664 		goto ldv_final;
 2665 	/* LDV_COMMENT_BEGIN_PREP */
 2666 	#ifdef CONFIG_PNP
 2667 	#endif 
 2668 	#ifdef CONFIG_PNP
 2669 	#endif
 2670 	#ifdef CONFIG_PNP
 2671 	#endif
 2672 	/* LDV_COMMENT_END_PREP */
 2673 	
 2674 
 2675 	int ldv_s_wbsd_driver_platform_driver = 0;
 2676 
 2677 	int ldv_s_wbsd_pnp_driver_pnp_driver = 0;
 2678 	
 2679 
 2680 	
 2681 
 2682 	
 2683 
 2684 
 2685 	while(  nondet_int()
 2686 		|| !(ldv_s_wbsd_driver_platform_driver == 0)
 2687 		|| !(ldv_s_wbsd_pnp_driver_pnp_driver == 0)
 2688 	) {
 2689 
 2690 		switch(nondet_int()) {
 2691 
 2692 			case 0: {
 2693 
 2694 				/** STRUCT: struct type: mmc_host_ops, struct name: wbsd_ops **/
 2695 				
 2696 
 2697 				/* content: static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)*/
 2698 				/* LDV_COMMENT_BEGIN_PREP */
 2699 				#define DRIVER_NAME "wbsd"
 2700 				#define DBG(x...) \
 2701 	pr_debug(DRIVER_NAME ": " x)
 2702 				#define DBGF(f, x...) \
 2703 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2704 				#ifdef CONFIG_PNP
 2705 				#endif 
 2706 				#ifdef CONFIG_PNP
 2707 				#else
 2708 				#endif
 2709 				/* LDV_COMMENT_END_PREP */
 2710 				/* LDV_COMMENT_FUNCTION_CALL Function from field "request" from driver structure with callbacks "wbsd_ops" */
 2711 				ldv_handler_precall();
 2712 				wbsd_request( var_group1, var_group2);
 2713 				/* LDV_COMMENT_BEGIN_PREP */
 2714 				#ifdef CONFIG_PM
 2715 				#endif
 2716 				#ifdef CONFIG_PNP
 2717 				#endif 
 2718 				#ifdef CONFIG_PM
 2719 				#ifdef CONFIG_PNP
 2720 				#endif 
 2721 				#else 
 2722 				#define wbsd_platform_suspend NULL
 2723 				#define wbsd_platform_resume NULL
 2724 				#define wbsd_pnp_suspend NULL
 2725 				#define wbsd_pnp_resume NULL
 2726 				#endif 
 2727 				#ifdef CONFIG_PNP
 2728 				#endif 
 2729 				#ifdef CONFIG_PNP
 2730 				#endif 
 2731 				#ifdef CONFIG_PNP
 2732 				#endif 
 2733 				#ifdef CONFIG_PNP
 2734 				#endif
 2735 				#ifdef CONFIG_PNP
 2736 				#endif
 2737 				/* LDV_COMMENT_END_PREP */
 2738 				
 2739 
 2740 				
 2741 
 2742 			}
 2743 
 2744 			break;
 2745 			case 1: {
 2746 
 2747 				/** STRUCT: struct type: mmc_host_ops, struct name: wbsd_ops **/
 2748 				
 2749 
 2750 				/* content: static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)*/
 2751 				/* LDV_COMMENT_BEGIN_PREP */
 2752 				#define DRIVER_NAME "wbsd"
 2753 				#define DBG(x...) \
 2754 	pr_debug(DRIVER_NAME ": " x)
 2755 				#define DBGF(f, x...) \
 2756 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2757 				#ifdef CONFIG_PNP
 2758 				#endif 
 2759 				#ifdef CONFIG_PNP
 2760 				#else
 2761 				#endif
 2762 				#ifdef CONFIG_MMC_DEBUG
 2763 				#endif
 2764 				/* LDV_COMMENT_END_PREP */
 2765 				/* LDV_COMMENT_FUNCTION_CALL Function from field "set_ios" from driver structure with callbacks "wbsd_ops" */
 2766 				ldv_handler_precall();
 2767 				wbsd_set_ios( var_group1, var_group3);
 2768 				/* LDV_COMMENT_BEGIN_PREP */
 2769 				#ifdef CONFIG_PM
 2770 				#endif
 2771 				#ifdef CONFIG_PNP
 2772 				#endif 
 2773 				#ifdef CONFIG_PM
 2774 				#ifdef CONFIG_PNP
 2775 				#endif 
 2776 				#else 
 2777 				#define wbsd_platform_suspend NULL
 2778 				#define wbsd_platform_resume NULL
 2779 				#define wbsd_pnp_suspend NULL
 2780 				#define wbsd_pnp_resume NULL
 2781 				#endif 
 2782 				#ifdef CONFIG_PNP
 2783 				#endif 
 2784 				#ifdef CONFIG_PNP
 2785 				#endif 
 2786 				#ifdef CONFIG_PNP
 2787 				#endif 
 2788 				#ifdef CONFIG_PNP
 2789 				#endif
 2790 				#ifdef CONFIG_PNP
 2791 				#endif
 2792 				/* LDV_COMMENT_END_PREP */
 2793 				
 2794 
 2795 				
 2796 
 2797 			}
 2798 
 2799 			break;
 2800 			case 2: {
 2801 
 2802 				/** STRUCT: struct type: mmc_host_ops, struct name: wbsd_ops **/
 2803 				
 2804 
 2805 				/* content: static int wbsd_get_ro(struct mmc_host *mmc)*/
 2806 				/* LDV_COMMENT_BEGIN_PREP */
 2807 				#define DRIVER_NAME "wbsd"
 2808 				#define DBG(x...) \
 2809 	pr_debug(DRIVER_NAME ": " x)
 2810 				#define DBGF(f, x...) \
 2811 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2812 				#ifdef CONFIG_PNP
 2813 				#endif 
 2814 				#ifdef CONFIG_PNP
 2815 				#else
 2816 				#endif
 2817 				#ifdef CONFIG_MMC_DEBUG
 2818 				#endif
 2819 				/* LDV_COMMENT_END_PREP */
 2820 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_ro" from driver structure with callbacks "wbsd_ops" */
 2821 				ldv_handler_precall();
 2822 				wbsd_get_ro( var_group1);
 2823 				/* LDV_COMMENT_BEGIN_PREP */
 2824 				#ifdef CONFIG_PM
 2825 				#endif
 2826 				#ifdef CONFIG_PNP
 2827 				#endif 
 2828 				#ifdef CONFIG_PM
 2829 				#ifdef CONFIG_PNP
 2830 				#endif 
 2831 				#else 
 2832 				#define wbsd_platform_suspend NULL
 2833 				#define wbsd_platform_resume NULL
 2834 				#define wbsd_pnp_suspend NULL
 2835 				#define wbsd_pnp_resume NULL
 2836 				#endif 
 2837 				#ifdef CONFIG_PNP
 2838 				#endif 
 2839 				#ifdef CONFIG_PNP
 2840 				#endif 
 2841 				#ifdef CONFIG_PNP
 2842 				#endif 
 2843 				#ifdef CONFIG_PNP
 2844 				#endif
 2845 				#ifdef CONFIG_PNP
 2846 				#endif
 2847 				/* LDV_COMMENT_END_PREP */
 2848 				
 2849 
 2850 				
 2851 
 2852 			}
 2853 
 2854 			break;
 2855 			case 3: {
 2856 
 2857 				/** STRUCT: struct type: platform_driver, struct name: wbsd_driver **/
 2858 				if(ldv_s_wbsd_driver_platform_driver==0) {
 2859 
 2860 				/* content: static int wbsd_probe(struct platform_device *dev)*/
 2861 				/* LDV_COMMENT_BEGIN_PREP */
 2862 				#define DRIVER_NAME "wbsd"
 2863 				#define DBG(x...) \
 2864 	pr_debug(DRIVER_NAME ": " x)
 2865 				#define DBGF(f, x...) \
 2866 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2867 				#ifdef CONFIG_PNP
 2868 				#endif 
 2869 				#ifdef CONFIG_PNP
 2870 				#else
 2871 				#endif
 2872 				#ifdef CONFIG_MMC_DEBUG
 2873 				#endif
 2874 				#ifdef CONFIG_PM
 2875 				#endif
 2876 				/* LDV_COMMENT_END_PREP */
 2877 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "wbsd_driver". Standart function test for correct return result. */
 2878 				res_wbsd_probe_48 = wbsd_probe( var_group4);
 2879 				 ldv_check_return_value(res_wbsd_probe_48);
 2880 				 ldv_check_return_value_probe(res_wbsd_probe_48);
 2881 				 if(res_wbsd_probe_48) 
 2882 					goto ldv_module_exit;
 2883 				/* LDV_COMMENT_BEGIN_PREP */
 2884 				#ifdef CONFIG_PNP
 2885 				#endif 
 2886 				#ifdef CONFIG_PM
 2887 				#ifdef CONFIG_PNP
 2888 				#endif 
 2889 				#else 
 2890 				#define wbsd_platform_suspend NULL
 2891 				#define wbsd_platform_resume NULL
 2892 				#define wbsd_pnp_suspend NULL
 2893 				#define wbsd_pnp_resume NULL
 2894 				#endif 
 2895 				#ifdef CONFIG_PNP
 2896 				#endif 
 2897 				#ifdef CONFIG_PNP
 2898 				#endif 
 2899 				#ifdef CONFIG_PNP
 2900 				#endif 
 2901 				#ifdef CONFIG_PNP
 2902 				#endif
 2903 				#ifdef CONFIG_PNP
 2904 				#endif
 2905 				/* LDV_COMMENT_END_PREP */
 2906 				ldv_s_wbsd_driver_platform_driver++;
 2907 
 2908 				}
 2909 
 2910 			}
 2911 
 2912 			break;
 2913 			case 4: {
 2914 
 2915 				/** STRUCT: struct type: platform_driver, struct name: wbsd_driver **/
 2916 				if(ldv_s_wbsd_driver_platform_driver==1) {
 2917 
 2918 				/* content: static int wbsd_platform_suspend(struct platform_device *dev, pm_message_t state)*/
 2919 				/* LDV_COMMENT_BEGIN_PREP */
 2920 				#define DRIVER_NAME "wbsd"
 2921 				#define DBG(x...) \
 2922 	pr_debug(DRIVER_NAME ": " x)
 2923 				#define DBGF(f, x...) \
 2924 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2925 				#ifdef CONFIG_PNP
 2926 				#endif 
 2927 				#ifdef CONFIG_PNP
 2928 				#else
 2929 				#endif
 2930 				#ifdef CONFIG_MMC_DEBUG
 2931 				#endif
 2932 				#ifdef CONFIG_PM
 2933 				#endif
 2934 				#ifdef CONFIG_PNP
 2935 				#endif 
 2936 				#ifdef CONFIG_PM
 2937 				/* LDV_COMMENT_END_PREP */
 2938 				/* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "wbsd_driver" */
 2939 				ldv_handler_precall();
 2940 				wbsd_platform_suspend( var_group4, var_wbsd_platform_suspend_52_p1);
 2941 				/* LDV_COMMENT_BEGIN_PREP */
 2942 				#ifdef CONFIG_PNP
 2943 				#endif 
 2944 				#else 
 2945 				#define wbsd_platform_suspend NULL
 2946 				#define wbsd_platform_resume NULL
 2947 				#define wbsd_pnp_suspend NULL
 2948 				#define wbsd_pnp_resume NULL
 2949 				#endif 
 2950 				#ifdef CONFIG_PNP
 2951 				#endif 
 2952 				#ifdef CONFIG_PNP
 2953 				#endif 
 2954 				#ifdef CONFIG_PNP
 2955 				#endif 
 2956 				#ifdef CONFIG_PNP
 2957 				#endif
 2958 				#ifdef CONFIG_PNP
 2959 				#endif
 2960 				/* LDV_COMMENT_END_PREP */
 2961 				ldv_s_wbsd_driver_platform_driver++;
 2962 
 2963 				}
 2964 
 2965 			}
 2966 
 2967 			break;
 2968 			case 5: {
 2969 
 2970 				/** STRUCT: struct type: platform_driver, struct name: wbsd_driver **/
 2971 				if(ldv_s_wbsd_driver_platform_driver==2) {
 2972 
 2973 				/* content: static int wbsd_platform_resume(struct platform_device *dev)*/
 2974 				/* LDV_COMMENT_BEGIN_PREP */
 2975 				#define DRIVER_NAME "wbsd"
 2976 				#define DBG(x...) \
 2977 	pr_debug(DRIVER_NAME ": " x)
 2978 				#define DBGF(f, x...) \
 2979 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2980 				#ifdef CONFIG_PNP
 2981 				#endif 
 2982 				#ifdef CONFIG_PNP
 2983 				#else
 2984 				#endif
 2985 				#ifdef CONFIG_MMC_DEBUG
 2986 				#endif
 2987 				#ifdef CONFIG_PM
 2988 				#endif
 2989 				#ifdef CONFIG_PNP
 2990 				#endif 
 2991 				#ifdef CONFIG_PM
 2992 				/* LDV_COMMENT_END_PREP */
 2993 				/* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "wbsd_driver" */
 2994 				ldv_handler_precall();
 2995 				wbsd_platform_resume( var_group4);
 2996 				/* LDV_COMMENT_BEGIN_PREP */
 2997 				#ifdef CONFIG_PNP
 2998 				#endif 
 2999 				#else 
 3000 				#define wbsd_platform_suspend NULL
 3001 				#define wbsd_platform_resume NULL
 3002 				#define wbsd_pnp_suspend NULL
 3003 				#define wbsd_pnp_resume NULL
 3004 				#endif 
 3005 				#ifdef CONFIG_PNP
 3006 				#endif 
 3007 				#ifdef CONFIG_PNP
 3008 				#endif 
 3009 				#ifdef CONFIG_PNP
 3010 				#endif 
 3011 				#ifdef CONFIG_PNP
 3012 				#endif
 3013 				#ifdef CONFIG_PNP
 3014 				#endif
 3015 				/* LDV_COMMENT_END_PREP */
 3016 				ldv_s_wbsd_driver_platform_driver++;
 3017 
 3018 				}
 3019 
 3020 			}
 3021 
 3022 			break;
 3023 			case 6: {
 3024 
 3025 				/** STRUCT: struct type: platform_driver, struct name: wbsd_driver **/
 3026 				if(ldv_s_wbsd_driver_platform_driver==3) {
 3027 
 3028 				/* content: static int wbsd_remove(struct platform_device *dev)*/
 3029 				/* LDV_COMMENT_BEGIN_PREP */
 3030 				#define DRIVER_NAME "wbsd"
 3031 				#define DBG(x...) \
 3032 	pr_debug(DRIVER_NAME ": " x)
 3033 				#define DBGF(f, x...) \
 3034 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 3035 				#ifdef CONFIG_PNP
 3036 				#endif 
 3037 				#ifdef CONFIG_PNP
 3038 				#else
 3039 				#endif
 3040 				#ifdef CONFIG_MMC_DEBUG
 3041 				#endif
 3042 				#ifdef CONFIG_PM
 3043 				#endif
 3044 				/* LDV_COMMENT_END_PREP */
 3045 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "wbsd_driver" */
 3046 				ldv_handler_precall();
 3047 				wbsd_remove( var_group4);
 3048 				/* LDV_COMMENT_BEGIN_PREP */
 3049 				#ifdef CONFIG_PNP
 3050 				#endif 
 3051 				#ifdef CONFIG_PM
 3052 				#ifdef CONFIG_PNP
 3053 				#endif 
 3054 				#else 
 3055 				#define wbsd_platform_suspend NULL
 3056 				#define wbsd_platform_resume NULL
 3057 				#define wbsd_pnp_suspend NULL
 3058 				#define wbsd_pnp_resume NULL
 3059 				#endif 
 3060 				#ifdef CONFIG_PNP
 3061 				#endif 
 3062 				#ifdef CONFIG_PNP
 3063 				#endif 
 3064 				#ifdef CONFIG_PNP
 3065 				#endif 
 3066 				#ifdef CONFIG_PNP
 3067 				#endif
 3068 				#ifdef CONFIG_PNP
 3069 				#endif
 3070 				/* LDV_COMMENT_END_PREP */
 3071 				ldv_s_wbsd_driver_platform_driver=0;
 3072 
 3073 				}
 3074 
 3075 			}
 3076 
 3077 			break;
 3078 			case 7: {
 3079 
 3080 				/** STRUCT: struct type: pnp_driver, struct name: wbsd_pnp_driver **/
 3081 				if(ldv_s_wbsd_pnp_driver_pnp_driver==0) {
 3082 
 3083 				/* content: static int wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)*/
 3084 				/* LDV_COMMENT_BEGIN_PREP */
 3085 				#define DRIVER_NAME "wbsd"
 3086 				#define DBG(x...) \
 3087 	pr_debug(DRIVER_NAME ": " x)
 3088 				#define DBGF(f, x...) \
 3089 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 3090 				#ifdef CONFIG_PNP
 3091 				#endif 
 3092 				#ifdef CONFIG_PNP
 3093 				#else
 3094 				#endif
 3095 				#ifdef CONFIG_MMC_DEBUG
 3096 				#endif
 3097 				#ifdef CONFIG_PM
 3098 				#endif
 3099 				#ifdef CONFIG_PNP
 3100 				/* LDV_COMMENT_END_PREP */
 3101 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "wbsd_pnp_driver". Standart function test for correct return result. */
 3102 				res_wbsd_pnp_probe_50 = wbsd_pnp_probe( var_group5, var_wbsd_pnp_probe_50_p1);
 3103 				 ldv_check_return_value(res_wbsd_pnp_probe_50);
 3104 				 ldv_check_return_value_probe(res_wbsd_pnp_probe_50);
 3105 				 if(res_wbsd_pnp_probe_50) 
 3106 					goto ldv_module_exit;
 3107 				/* LDV_COMMENT_BEGIN_PREP */
 3108 				#endif 
 3109 				#ifdef CONFIG_PM
 3110 				#ifdef CONFIG_PNP
 3111 				#endif 
 3112 				#else 
 3113 				#define wbsd_platform_suspend NULL
 3114 				#define wbsd_platform_resume NULL
 3115 				#define wbsd_pnp_suspend NULL
 3116 				#define wbsd_pnp_resume NULL
 3117 				#endif 
 3118 				#ifdef CONFIG_PNP
 3119 				#endif 
 3120 				#ifdef CONFIG_PNP
 3121 				#endif 
 3122 				#ifdef CONFIG_PNP
 3123 				#endif 
 3124 				#ifdef CONFIG_PNP
 3125 				#endif
 3126 				#ifdef CONFIG_PNP
 3127 				#endif
 3128 				/* LDV_COMMENT_END_PREP */
 3129 				ldv_s_wbsd_pnp_driver_pnp_driver++;
 3130 
 3131 				}
 3132 
 3133 			}
 3134 
 3135 			break;
 3136 			case 8: {
 3137 
 3138 				/** STRUCT: struct type: pnp_driver, struct name: wbsd_pnp_driver **/
 3139 				if(ldv_s_wbsd_pnp_driver_pnp_driver==1) {
 3140 
 3141 				/* content: static void wbsd_pnp_remove(struct pnp_dev *dev)*/
 3142 				/* LDV_COMMENT_BEGIN_PREP */
 3143 				#define DRIVER_NAME "wbsd"
 3144 				#define DBG(x...) \
 3145 	pr_debug(DRIVER_NAME ": " x)
 3146 				#define DBGF(f, x...) \
 3147 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 3148 				#ifdef CONFIG_PNP
 3149 				#endif 
 3150 				#ifdef CONFIG_PNP
 3151 				#else
 3152 				#endif
 3153 				#ifdef CONFIG_MMC_DEBUG
 3154 				#endif
 3155 				#ifdef CONFIG_PM
 3156 				#endif
 3157 				#ifdef CONFIG_PNP
 3158 				/* LDV_COMMENT_END_PREP */
 3159 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "wbsd_pnp_driver" */
 3160 				ldv_handler_precall();
 3161 				wbsd_pnp_remove( var_group5);
 3162 				/* LDV_COMMENT_BEGIN_PREP */
 3163 				#endif 
 3164 				#ifdef CONFIG_PM
 3165 				#ifdef CONFIG_PNP
 3166 				#endif 
 3167 				#else 
 3168 				#define wbsd_platform_suspend NULL
 3169 				#define wbsd_platform_resume NULL
 3170 				#define wbsd_pnp_suspend NULL
 3171 				#define wbsd_pnp_resume NULL
 3172 				#endif 
 3173 				#ifdef CONFIG_PNP
 3174 				#endif 
 3175 				#ifdef CONFIG_PNP
 3176 				#endif 
 3177 				#ifdef CONFIG_PNP
 3178 				#endif 
 3179 				#ifdef CONFIG_PNP
 3180 				#endif
 3181 				#ifdef CONFIG_PNP
 3182 				#endif
 3183 				/* LDV_COMMENT_END_PREP */
 3184 				ldv_s_wbsd_pnp_driver_pnp_driver=0;
 3185 
 3186 				}
 3187 
 3188 			}
 3189 
 3190 			break;
 3191 			case 9: {
 3192 
 3193 				/** STRUCT: struct type: pnp_driver, struct name: wbsd_pnp_driver **/
 3194 				
 3195 
 3196 				/* content: static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)*/
 3197 				/* LDV_COMMENT_BEGIN_PREP */
 3198 				#define DRIVER_NAME "wbsd"
 3199 				#define DBG(x...) \
 3200 	pr_debug(DRIVER_NAME ": " x)
 3201 				#define DBGF(f, x...) \
 3202 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 3203 				#ifdef CONFIG_PNP
 3204 				#endif 
 3205 				#ifdef CONFIG_PNP
 3206 				#else
 3207 				#endif
 3208 				#ifdef CONFIG_MMC_DEBUG
 3209 				#endif
 3210 				#ifdef CONFIG_PM
 3211 				#endif
 3212 				#ifdef CONFIG_PNP
 3213 				#endif 
 3214 				#ifdef CONFIG_PM
 3215 				#ifdef CONFIG_PNP
 3216 				/* LDV_COMMENT_END_PREP */
 3217 				/* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "wbsd_pnp_driver" */
 3218 				ldv_handler_precall();
 3219 				wbsd_pnp_suspend( var_group5, var_wbsd_pnp_suspend_54_p1);
 3220 				/* LDV_COMMENT_BEGIN_PREP */
 3221 				#endif 
 3222 				#else 
 3223 				#define wbsd_platform_suspend NULL
 3224 				#define wbsd_platform_resume NULL
 3225 				#define wbsd_pnp_suspend NULL
 3226 				#define wbsd_pnp_resume NULL
 3227 				#endif 
 3228 				#ifdef CONFIG_PNP
 3229 				#endif 
 3230 				#ifdef CONFIG_PNP
 3231 				#endif 
 3232 				#ifdef CONFIG_PNP
 3233 				#endif 
 3234 				#ifdef CONFIG_PNP
 3235 				#endif
 3236 				#ifdef CONFIG_PNP
 3237 				#endif
 3238 				/* LDV_COMMENT_END_PREP */
 3239 				
 3240 
 3241 				
 3242 
 3243 			}
 3244 
 3245 			break;
 3246 			case 10: {
 3247 
 3248 				/** STRUCT: struct type: pnp_driver, struct name: wbsd_pnp_driver **/
 3249 				
 3250 
 3251 				/* content: static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)*/
 3252 				/* LDV_COMMENT_BEGIN_PREP */
 3253 				#define DRIVER_NAME "wbsd"
 3254 				#define DBG(x...) \
 3255 	pr_debug(DRIVER_NAME ": " x)
 3256 				#define DBGF(f, x...) \
 3257 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 3258 				#ifdef CONFIG_PNP
 3259 				#endif 
 3260 				#ifdef CONFIG_PNP
 3261 				#else
 3262 				#endif
 3263 				#ifdef CONFIG_MMC_DEBUG
 3264 				#endif
 3265 				#ifdef CONFIG_PM
 3266 				#endif
 3267 				#ifdef CONFIG_PNP
 3268 				#endif 
 3269 				#ifdef CONFIG_PM
 3270 				#ifdef CONFIG_PNP
 3271 				/* LDV_COMMENT_END_PREP */
 3272 				/* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "wbsd_pnp_driver" */
 3273 				ldv_handler_precall();
 3274 				wbsd_pnp_resume( var_group5);
 3275 				/* LDV_COMMENT_BEGIN_PREP */
 3276 				#endif 
 3277 				#else 
 3278 				#define wbsd_platform_suspend NULL
 3279 				#define wbsd_platform_resume NULL
 3280 				#define wbsd_pnp_suspend NULL
 3281 				#define wbsd_pnp_resume NULL
 3282 				#endif 
 3283 				#ifdef CONFIG_PNP
 3284 				#endif 
 3285 				#ifdef CONFIG_PNP
 3286 				#endif 
 3287 				#ifdef CONFIG_PNP
 3288 				#endif 
 3289 				#ifdef CONFIG_PNP
 3290 				#endif
 3291 				#ifdef CONFIG_PNP
 3292 				#endif
 3293 				/* LDV_COMMENT_END_PREP */
 3294 				
 3295 
 3296 				
 3297 
 3298 			}
 3299 
 3300 			break;
 3301 			case 11: {
 3302 
 3303 				/** CALLBACK SECTION request_irq **/
 3304 				LDV_IN_INTERRUPT=2;
 3305 
 3306 				/* content: static irqreturn_t wbsd_irq(int irq, void *dev_id)*/
 3307 				/* LDV_COMMENT_BEGIN_PREP */
 3308 				#define DRIVER_NAME "wbsd"
 3309 				#define DBG(x...) \
 3310 	pr_debug(DRIVER_NAME ": " x)
 3311 				#define DBGF(f, x...) \
 3312 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 3313 				#ifdef CONFIG_PNP
 3314 				#endif 
 3315 				#ifdef CONFIG_PNP
 3316 				#else
 3317 				#endif
 3318 				#ifdef CONFIG_MMC_DEBUG
 3319 				#endif
 3320 				/* LDV_COMMENT_END_PREP */
 3321 				/* LDV_COMMENT_FUNCTION_CALL */
 3322 				ldv_handler_precall();
 3323 				wbsd_irq( var_wbsd_irq_31_p0, var_wbsd_irq_31_p1);
 3324 				/* LDV_COMMENT_BEGIN_PREP */
 3325 				#ifdef CONFIG_PM
 3326 				#endif
 3327 				#ifdef CONFIG_PNP
 3328 				#endif 
 3329 				#ifdef CONFIG_PM
 3330 				#ifdef CONFIG_PNP
 3331 				#endif 
 3332 				#else 
 3333 				#define wbsd_platform_suspend NULL
 3334 				#define wbsd_platform_resume NULL
 3335 				#define wbsd_pnp_suspend NULL
 3336 				#define wbsd_pnp_resume NULL
 3337 				#endif 
 3338 				#ifdef CONFIG_PNP
 3339 				#endif 
 3340 				#ifdef CONFIG_PNP
 3341 				#endif 
 3342 				#ifdef CONFIG_PNP
 3343 				#endif 
 3344 				#ifdef CONFIG_PNP
 3345 				#endif
 3346 				#ifdef CONFIG_PNP
 3347 				#endif
 3348 				/* LDV_COMMENT_END_PREP */
 3349 				LDV_IN_INTERRUPT=1;
 3350 
 3351 				
 3352 
 3353 			}
 3354 
 3355 			break;
 3356 			case 12: {
 3357 
 3358 				/** TIMER SECTION timer **/
 3359 				
 3360 
 3361 				/* content: static void wbsd_reset_ignore(unsigned long data)*/
 3362 				/* LDV_COMMENT_BEGIN_PREP */
 3363 				#define DRIVER_NAME "wbsd"
 3364 				#define DBG(x...) \
 3365 	pr_debug(DRIVER_NAME ": " x)
 3366 				#define DBGF(f, x...) \
 3367 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 3368 				#ifdef CONFIG_PNP
 3369 				#endif 
 3370 				#ifdef CONFIG_PNP
 3371 				#else
 3372 				#endif
 3373 				#ifdef CONFIG_MMC_DEBUG
 3374 				#endif
 3375 				/* LDV_COMMENT_END_PREP */
 3376 				/* LDV_COMMENT_FUNCTION_CALL */
 3377 				ldv_handler_precall();
 3378 				wbsd_reset_ignore( var_wbsd_reset_ignore_24_p0);
 3379 				/* LDV_COMMENT_BEGIN_PREP */
 3380 				#ifdef CONFIG_PM
 3381 				#endif
 3382 				#ifdef CONFIG_PNP
 3383 				#endif 
 3384 				#ifdef CONFIG_PM
 3385 				#ifdef CONFIG_PNP
 3386 				#endif 
 3387 				#else 
 3388 				#define wbsd_platform_suspend NULL
 3389 				#define wbsd_platform_resume NULL
 3390 				#define wbsd_pnp_suspend NULL
 3391 				#define wbsd_pnp_resume NULL
 3392 				#endif 
 3393 				#ifdef CONFIG_PNP
 3394 				#endif 
 3395 				#ifdef CONFIG_PNP
 3396 				#endif 
 3397 				#ifdef CONFIG_PNP
 3398 				#endif 
 3399 				#ifdef CONFIG_PNP
 3400 				#endif
 3401 				#ifdef CONFIG_PNP
 3402 				#endif
 3403 				/* LDV_COMMENT_END_PREP */
 3404 				
 3405 
 3406 				
 3407 
 3408 			}
 3409 
 3410 			break;
 3411 			default: break;
 3412 
 3413 		}
 3414 
 3415 	}
 3416 
 3417 	ldv_module_exit: 
 3418 
 3419 	/** INIT: init_type: ST_MODULE_EXIT **/
 3420 	/* content: static void __exit wbsd_drv_exit(void)*/
 3421 	/* LDV_COMMENT_BEGIN_PREP */
 3422 	#define DRIVER_NAME "wbsd"
 3423 	#define DBG(x...) \
 3424 	pr_debug(DRIVER_NAME ": " x)
 3425 	#define DBGF(f, x...) \
 3426 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 3427 	#ifdef CONFIG_PNP
 3428 	#endif 
 3429 	#ifdef CONFIG_PNP
 3430 	#else
 3431 	#endif
 3432 	#ifdef CONFIG_MMC_DEBUG
 3433 	#endif
 3434 	#ifdef CONFIG_PM
 3435 	#endif
 3436 	#ifdef CONFIG_PNP
 3437 	#endif 
 3438 	#ifdef CONFIG_PM
 3439 	#ifdef CONFIG_PNP
 3440 	#endif 
 3441 	#else 
 3442 	#define wbsd_platform_suspend NULL
 3443 	#define wbsd_platform_resume NULL
 3444 	#define wbsd_pnp_suspend NULL
 3445 	#define wbsd_pnp_resume NULL
 3446 	#endif 
 3447 	#ifdef CONFIG_PNP
 3448 	#endif 
 3449 	#ifdef CONFIG_PNP
 3450 	#endif 
 3451 	/* LDV_COMMENT_END_PREP */
 3452 	/* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
 3453 	ldv_handler_precall();
 3454 	wbsd_drv_exit();
 3455 	/* LDV_COMMENT_BEGIN_PREP */
 3456 	#ifdef CONFIG_PNP
 3457 	#endif
 3458 	#ifdef CONFIG_PNP
 3459 	#endif
 3460 	/* LDV_COMMENT_END_PREP */
 3461 
 3462 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 3463 	ldv_final: ldv_check_final_state();
 3464 
 3465 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 3466 	return;
 3467 
 3468 }
 3469 #endif
 3470 
 3471 /* LDV_COMMENT_END_MAIN */                 1 
    2 #include <linux/kernel.h>
    3 bool ldv_is_err(const void *ptr);
    4 bool ldv_is_err_or_null(const void *ptr);
    5 void* ldv_err_ptr(long error);
    6 long ldv_ptr_err(const void *ptr);
    7 
    8 extern void ldv_dma_map_page(void);
    9 extern void ldv_dma_mapping_error(void);
   10 #line 1 "/home/vitaly/ldv-launches/work/current--X--drivers--X--defaultlinux-4.9-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.9-rc1.tar.xz/csd_deg_dscv/2264/dscv_tempdir/dscv/ri/331_1a/drivers/mmc/host/wbsd.c"
   11 
   12 /*
   13  *  linux/drivers/mmc/host/wbsd.c - Winbond W83L51xD SD/MMC driver
   14  *
   15  *  Copyright (C) 2004-2007 Pierre Ossman, All Rights Reserved.
   16  *
   17  * This program is free software; you can redistribute it and/or modify
   18  * it under the terms of the GNU General Public License as published by
   19  * the Free Software Foundation; either version 2 of the License, or (at
   20  * your option) any later version.
   21  *
   22  *
   23  * Warning!
   24  *
   25  * Changes to the FIFO system should be done with extreme care since
   26  * the hardware is full of bugs related to the FIFO. Known issues are:
   27  *
   28  * - FIFO size field in FSR is always zero.
   29  *
   30  * - FIFO interrupts tend not to work as they should. Interrupts are
   31  *   triggered only for full/empty events, not for threshold values.
   32  *
   33  * - On APIC systems the FIFO empty interrupt is sometimes lost.
   34  */
   35 
   36 #include <linux/module.h>
   37 #include <linux/moduleparam.h>
   38 #include <linux/init.h>
   39 #include <linux/ioport.h>
   40 #include <linux/platform_device.h>
   41 #include <linux/interrupt.h>
   42 #include <linux/dma-mapping.h>
   43 #include <linux/delay.h>
   44 #include <linux/pnp.h>
   45 #include <linux/highmem.h>
   46 #include <linux/mmc/host.h>
   47 #include <linux/scatterlist.h>
   48 #include <linux/slab.h>
   49 
   50 #include <asm/io.h>
   51 #include <asm/dma.h>
   52 
   53 #include "wbsd.h"
   54 
   55 #define DRIVER_NAME "wbsd"
   56 
   57 #define DBG(x...) \
   58 	pr_debug(DRIVER_NAME ": " x)
   59 #define DBGF(f, x...) \
   60 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
   61 
   62 /*
   63  * Device resources
   64  */
   65 
   66 #ifdef CONFIG_PNP
   67 
   68 static const struct pnp_device_id pnp_dev_table[] = {
   69 	{ "WEC0517", 0 },
   70 	{ "WEC0518", 0 },
   71 	{ "", 0 },
   72 };
   73 
   74 MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
   75 
   76 #endif /* CONFIG_PNP */
   77 
   78 static const int config_ports[] = { 0x2E, 0x4E };
   79 static const int unlock_codes[] = { 0x83, 0x87 };
   80 
   81 static const int valid_ids[] = {
   82 	0x7112,
   83 };
   84 
   85 #ifdef CONFIG_PNP
   86 static unsigned int param_nopnp = 0;
   87 #else
   88 static const unsigned int param_nopnp = 1;
   89 #endif
   90 static unsigned int param_io = 0x248;
   91 static unsigned int param_irq = 6;
   92 static int param_dma = 2;
   93 
   94 /*
   95  * Basic functions
   96  */
   97 
   98 static inline void wbsd_unlock_config(struct wbsd_host *host)
   99 {
  100 	BUG_ON(host->config == 0);
  101 
  102 	outb(host->unlock_code, host->config);
  103 	outb(host->unlock_code, host->config);
  104 }
  105 
  106 static inline void wbsd_lock_config(struct wbsd_host *host)
  107 {
  108 	BUG_ON(host->config == 0);
  109 
  110 	outb(LOCK_CODE, host->config);
  111 }
  112 
  113 static inline void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value)
  114 {
  115 	BUG_ON(host->config == 0);
  116 
  117 	outb(reg, host->config);
  118 	outb(value, host->config + 1);
  119 }
  120 
  121 static inline u8 wbsd_read_config(struct wbsd_host *host, u8 reg)
  122 {
  123 	BUG_ON(host->config == 0);
  124 
  125 	outb(reg, host->config);
  126 	return inb(host->config + 1);
  127 }
  128 
  129 static inline void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value)
  130 {
  131 	outb(index, host->base + WBSD_IDXR);
  132 	outb(value, host->base + WBSD_DATAR);
  133 }
  134 
  135 static inline u8 wbsd_read_index(struct wbsd_host *host, u8 index)
  136 {
  137 	outb(index, host->base + WBSD_IDXR);
  138 	return inb(host->base + WBSD_DATAR);
  139 }
  140 
  141 /*
  142  * Common routines
  143  */
  144 
  145 static void wbsd_init_device(struct wbsd_host *host)
  146 {
  147 	u8 setup, ier;
  148 
  149 	/*
  150 	 * Reset chip (SD/MMC part) and fifo.
  151 	 */
  152 	setup = wbsd_read_index(host, WBSD_IDX_SETUP);
  153 	setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET;
  154 	wbsd_write_index(host, WBSD_IDX_SETUP, setup);
  155 
  156 	/*
  157 	 * Set DAT3 to input
  158 	 */
  159 	setup &= ~WBSD_DAT3_H;
  160 	wbsd_write_index(host, WBSD_IDX_SETUP, setup);
  161 	host->flags &= ~WBSD_FIGNORE_DETECT;
  162 
  163 	/*
  164 	 * Read back default clock.
  165 	 */
  166 	host->clk = wbsd_read_index(host, WBSD_IDX_CLK);
  167 
  168 	/*
  169 	 * Power down port.
  170 	 */
  171 	outb(WBSD_POWER_N, host->base + WBSD_CSR);
  172 
  173 	/*
  174 	 * Set maximum timeout.
  175 	 */
  176 	wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F);
  177 
  178 	/*
  179 	 * Test for card presence
  180 	 */
  181 	if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT)
  182 		host->flags |= WBSD_FCARD_PRESENT;
  183 	else
  184 		host->flags &= ~WBSD_FCARD_PRESENT;
  185 
  186 	/*
  187 	 * Enable interesting interrupts.
  188 	 */
  189 	ier = 0;
  190 	ier |= WBSD_EINT_CARD;
  191 	ier |= WBSD_EINT_FIFO_THRE;
  192 	ier |= WBSD_EINT_CRC;
  193 	ier |= WBSD_EINT_TIMEOUT;
  194 	ier |= WBSD_EINT_TC;
  195 
  196 	outb(ier, host->base + WBSD_EIR);
  197 
  198 	/*
  199 	 * Clear interrupts.
  200 	 */
  201 	inb(host->base + WBSD_ISR);
  202 }
  203 
  204 static void wbsd_reset(struct wbsd_host *host)
  205 {
  206 	u8 setup;
  207 
  208 	pr_err("%s: Resetting chip\n", mmc_hostname(host->mmc));
  209 
  210 	/*
  211 	 * Soft reset of chip (SD/MMC part).
  212 	 */
  213 	setup = wbsd_read_index(host, WBSD_IDX_SETUP);
  214 	setup |= WBSD_SOFT_RESET;
  215 	wbsd_write_index(host, WBSD_IDX_SETUP, setup);
  216 }
  217 
  218 static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq)
  219 {
  220 	unsigned long dmaflags;
  221 
  222 	if (host->dma >= 0) {
  223 		/*
  224 		 * Release ISA DMA controller.
  225 		 */
  226 		dmaflags = claim_dma_lock();
  227 		disable_dma(host->dma);
  228 		clear_dma_ff(host->dma);
  229 		release_dma_lock(dmaflags);
  230 
  231 		/*
  232 		 * Disable DMA on host.
  233 		 */
  234 		wbsd_write_index(host, WBSD_IDX_DMA, 0);
  235 	}
  236 
  237 	host->mrq = NULL;
  238 
  239 	/*
  240 	 * MMC layer might call back into the driver so first unlock.
  241 	 */
  242 	spin_unlock(&host->lock);
  243 	mmc_request_done(host->mmc, mrq);
  244 	spin_lock(&host->lock);
  245 }
  246 
  247 /*
  248  * Scatter/gather functions
  249  */
  250 
  251 static inline void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data)
  252 {
  253 	/*
  254 	 * Get info. about SG list from data structure.
  255 	 */
  256 	host->cur_sg = data->sg;
  257 	host->num_sg = data->sg_len;
  258 
  259 	host->offset = 0;
  260 	host->remain = host->cur_sg->length;
  261 }
  262 
  263 static inline int wbsd_next_sg(struct wbsd_host *host)
  264 {
  265 	/*
  266 	 * Skip to next SG entry.
  267 	 */
  268 	host->cur_sg++;
  269 	host->num_sg--;
  270 
  271 	/*
  272 	 * Any entries left?
  273 	 */
  274 	if (host->num_sg > 0) {
  275 		host->offset = 0;
  276 		host->remain = host->cur_sg->length;
  277 	}
  278 
  279 	return host->num_sg;
  280 }
  281 
  282 static inline char *wbsd_sg_to_buffer(struct wbsd_host *host)
  283 {
  284 	return sg_virt(host->cur_sg);
  285 }
  286 
  287 static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
  288 {
  289 	unsigned int len, i;
  290 	struct scatterlist *sg;
  291 	char *dmabuf = host->dma_buffer;
  292 	char *sgbuf;
  293 
  294 	sg = data->sg;
  295 	len = data->sg_len;
  296 
  297 	for (i = 0; i < len; i++) {
  298 		sgbuf = sg_virt(&sg[i]);
  299 		memcpy(dmabuf, sgbuf, sg[i].length);
  300 		dmabuf += sg[i].length;
  301 	}
  302 }
  303 
  304 static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
  305 {
  306 	unsigned int len, i;
  307 	struct scatterlist *sg;
  308 	char *dmabuf = host->dma_buffer;
  309 	char *sgbuf;
  310 
  311 	sg = data->sg;
  312 	len = data->sg_len;
  313 
  314 	for (i = 0; i < len; i++) {
  315 		sgbuf = sg_virt(&sg[i]);
  316 		memcpy(sgbuf, dmabuf, sg[i].length);
  317 		dmabuf += sg[i].length;
  318 	}
  319 }
  320 
  321 /*
  322  * Command handling
  323  */
  324 
  325 static inline void wbsd_get_short_reply(struct wbsd_host *host,
  326 					struct mmc_command *cmd)
  327 {
  328 	/*
  329 	 * Correct response type?
  330 	 */
  331 	if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) {
  332 		cmd->error = -EILSEQ;
  333 		return;
  334 	}
  335 
  336 	cmd->resp[0]  = wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
  337 	cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
  338 	cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
  339 	cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
  340 	cmd->resp[1]  = wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
  341 }
  342 
  343 static inline void wbsd_get_long_reply(struct wbsd_host *host,
  344 	struct mmc_command *cmd)
  345 {
  346 	int i;
  347 
  348 	/*
  349 	 * Correct response type?
  350 	 */
  351 	if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) {
  352 		cmd->error = -EILSEQ;
  353 		return;
  354 	}
  355 
  356 	for (i = 0; i < 4; i++) {
  357 		cmd->resp[i] =
  358 			wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
  359 		cmd->resp[i] |=
  360 			wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16;
  361 		cmd->resp[i] |=
  362 			wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8;
  363 		cmd->resp[i] |=
  364 			wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0;
  365 	}
  366 }
  367 
  368 static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd)
  369 {
  370 	int i;
  371 	u8 status, isr;
  372 
  373 	/*
  374 	 * Clear accumulated ISR. The interrupt routine
  375 	 * will fill this one with events that occur during
  376 	 * transfer.
  377 	 */
  378 	host->isr = 0;
  379 
  380 	/*
  381 	 * Send the command (CRC calculated by host).
  382 	 */
  383 	outb(cmd->opcode, host->base + WBSD_CMDR);
  384 	for (i = 3; i >= 0; i--)
  385 		outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
  386 
  387 	cmd->error = 0;
  388 
  389 	/*
  390 	 * Wait for the request to complete.
  391 	 */
  392 	do {
  393 		status = wbsd_read_index(host, WBSD_IDX_STATUS);
  394 	} while (status & WBSD_CARDTRAFFIC);
  395 
  396 	/*
  397 	 * Do we expect a reply?
  398 	 */
  399 	if (cmd->flags & MMC_RSP_PRESENT) {
  400 		/*
  401 		 * Read back status.
  402 		 */
  403 		isr = host->isr;
  404 
  405 		/* Card removed? */
  406 		if (isr & WBSD_INT_CARD)
  407 			cmd->error = -ENOMEDIUM;
  408 		/* Timeout? */
  409 		else if (isr & WBSD_INT_TIMEOUT)
  410 			cmd->error = -ETIMEDOUT;
  411 		/* CRC? */
  412 		else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
  413 			cmd->error = -EILSEQ;
  414 		/* All ok */
  415 		else {
  416 			if (cmd->flags & MMC_RSP_136)
  417 				wbsd_get_long_reply(host, cmd);
  418 			else
  419 				wbsd_get_short_reply(host, cmd);
  420 		}
  421 	}
  422 }
  423 
  424 /*
  425  * Data functions
  426  */
  427 
  428 static void wbsd_empty_fifo(struct wbsd_host *host)
  429 {
  430 	struct mmc_data *data = host->mrq->cmd->data;
  431 	char *buffer;
  432 	int i, fsr, fifo;
  433 
  434 	/*
  435 	 * Handle excessive data.
  436 	 */
  437 	if (host->num_sg == 0)
  438 		return;
  439 
  440 	buffer = wbsd_sg_to_buffer(host) + host->offset;
  441 
  442 	/*
  443 	 * Drain the fifo. This has a tendency to loop longer
  444 	 * than the FIFO length (usually one block).
  445 	 */
  446 	while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) {
  447 		/*
  448 		 * The size field in the FSR is broken so we have to
  449 		 * do some guessing.
  450 		 */
  451 		if (fsr & WBSD_FIFO_FULL)
  452 			fifo = 16;
  453 		else if (fsr & WBSD_FIFO_FUTHRE)
  454 			fifo = 8;
  455 		else
  456 			fifo = 1;
  457 
  458 		for (i = 0; i < fifo; i++) {
  459 			*buffer = inb(host->base + WBSD_DFR);
  460 			buffer++;
  461 			host->offset++;
  462 			host->remain--;
  463 
  464 			data->bytes_xfered++;
  465 
  466 			/*
  467 			 * End of scatter list entry?
  468 			 */
  469 			if (host->remain == 0) {
  470 				/*
  471 				 * Get next entry. Check if last.
  472 				 */
  473 				if (!wbsd_next_sg(host))
  474 					return;
  475 
  476 				buffer = wbsd_sg_to_buffer(host);
  477 			}
  478 		}
  479 	}
  480 
  481 	/*
  482 	 * This is a very dirty hack to solve a
  483 	 * hardware problem. The chip doesn't trigger
  484 	 * FIFO threshold interrupts properly.
  485 	 */
  486 	if ((data->blocks * data->blksz - data->bytes_xfered) < 16)
  487 		tasklet_schedule(&host->fifo_tasklet);
  488 }
  489 
  490 static void wbsd_fill_fifo(struct wbsd_host *host)
  491 {
  492 	struct mmc_data *data = host->mrq->cmd->data;
  493 	char *buffer;
  494 	int i, fsr, fifo;
  495 
  496 	/*
  497 	 * Check that we aren't being called after the
  498 	 * entire buffer has been transferred.
  499 	 */
  500 	if (host->num_sg == 0)
  501 		return;
  502 
  503 	buffer = wbsd_sg_to_buffer(host) + host->offset;
  504 
  505 	/*
  506 	 * Fill the fifo. This has a tendency to loop longer
  507 	 * than the FIFO length (usually one block).
  508 	 */
  509 	while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) {
  510 		/*
  511 		 * The size field in the FSR is broken so we have to
  512 		 * do some guessing.
  513 		 */
  514 		if (fsr & WBSD_FIFO_EMPTY)
  515 			fifo = 0;
  516 		else if (fsr & WBSD_FIFO_EMTHRE)
  517 			fifo = 8;
  518 		else
  519 			fifo = 15;
  520 
  521 		for (i = 16; i > fifo; i--) {
  522 			outb(*buffer, host->base + WBSD_DFR);
  523 			buffer++;
  524 			host->offset++;
  525 			host->remain--;
  526 
  527 			data->bytes_xfered++;
  528 
  529 			/*
  530 			 * End of scatter list entry?
  531 			 */
  532 			if (host->remain == 0) {
  533 				/*
  534 				 * Get next entry. Check if last.
  535 				 */
  536 				if (!wbsd_next_sg(host))
  537 					return;
  538 
  539 				buffer = wbsd_sg_to_buffer(host);
  540 			}
  541 		}
  542 	}
  543 
  544 	/*
  545 	 * The controller stops sending interrupts for
  546 	 * 'FIFO empty' under certain conditions. So we
  547 	 * need to be a bit more pro-active.
  548 	 */
  549 	tasklet_schedule(&host->fifo_tasklet);
  550 }
  551 
  552 static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
  553 {
  554 	u16 blksize;
  555 	u8 setup;
  556 	unsigned long dmaflags;
  557 	unsigned int size;
  558 
  559 	/*
  560 	 * Calculate size.
  561 	 */
  562 	size = data->blocks * data->blksz;
  563 
  564 	/*
  565 	 * Check timeout values for overflow.
  566 	 * (Yes, some cards cause this value to overflow).
  567 	 */
  568 	if (data->timeout_ns > 127000000)
  569 		wbsd_write_index(host, WBSD_IDX_TAAC, 127);
  570 	else {
  571 		wbsd_write_index(host, WBSD_IDX_TAAC,
  572 			data->timeout_ns / 1000000);
  573 	}
  574 
  575 	if (data->timeout_clks > 255)
  576 		wbsd_write_index(host, WBSD_IDX_NSAC, 255);
  577 	else
  578 		wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks);
  579 
  580 	/*
  581 	 * Inform the chip of how large blocks will be
  582 	 * sent. It needs this to determine when to
  583 	 * calculate CRC.
  584 	 *
  585 	 * Space for CRC must be included in the size.
  586 	 * Two bytes are needed for each data line.
  587 	 */
  588 	if (host->bus_width == MMC_BUS_WIDTH_1) {
  589 		blksize = data->blksz + 2;
  590 
  591 		wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
  592 		wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
  593 	} else if (host->bus_width == MMC_BUS_WIDTH_4) {
  594 		blksize = data->blksz + 2 * 4;
  595 
  596 		wbsd_write_index(host, WBSD_IDX_PBSMSB,
  597 			((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
  598 		wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
  599 	} else {
  600 		data->error = -EINVAL;
  601 		return;
  602 	}
  603 
  604 	/*
  605 	 * Clear the FIFO. This is needed even for DMA
  606 	 * transfers since the chip still uses the FIFO
  607 	 * internally.
  608 	 */
  609 	setup = wbsd_read_index(host, WBSD_IDX_SETUP);
  610 	setup |= WBSD_FIFO_RESET;
  611 	wbsd_write_index(host, WBSD_IDX_SETUP, setup);
  612 
  613 	/*
  614 	 * DMA transfer?
  615 	 */
  616 	if (host->dma >= 0) {
  617 		/*
  618 		 * The buffer for DMA is only 64 kB.
  619 		 */
  620 		BUG_ON(size > 0x10000);
  621 		if (size > 0x10000) {
  622 			data->error = -EINVAL;
  623 			return;
  624 		}
  625 
  626 		/*
  627 		 * Transfer data from the SG list to
  628 		 * the DMA buffer.
  629 		 */
  630 		if (data->flags & MMC_DATA_WRITE)
  631 			wbsd_sg_to_dma(host, data);
  632 
  633 		/*
  634 		 * Initialise the ISA DMA controller.
  635 		 */
  636 		dmaflags = claim_dma_lock();
  637 		disable_dma(host->dma);
  638 		clear_dma_ff(host->dma);
  639 		if (data->flags & MMC_DATA_READ)
  640 			set_dma_mode(host->dma, DMA_MODE_READ & ~0x40);
  641 		else
  642 			set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40);
  643 		set_dma_addr(host->dma, host->dma_addr);
  644 		set_dma_count(host->dma, size);
  645 
  646 		enable_dma(host->dma);
  647 		release_dma_lock(dmaflags);
  648 
  649 		/*
  650 		 * Enable DMA on the host.
  651 		 */
  652 		wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
  653 	} else {
  654 		/*
  655 		 * This flag is used to keep printk
  656 		 * output to a minimum.
  657 		 */
  658 		host->firsterr = 1;
  659 
  660 		/*
  661 		 * Initialise the SG list.
  662 		 */
  663 		wbsd_init_sg(host, data);
  664 
  665 		/*
  666 		 * Turn off DMA.
  667 		 */
  668 		wbsd_write_index(host, WBSD_IDX_DMA, 0);
  669 
  670 		/*
  671 		 * Set up FIFO threshold levels (and fill
  672 		 * buffer if doing a write).
  673 		 */
  674 		if (data->flags & MMC_DATA_READ) {
  675 			wbsd_write_index(host, WBSD_IDX_FIFOEN,
  676 				WBSD_FIFOEN_FULL | 8);
  677 		} else {
  678 			wbsd_write_index(host, WBSD_IDX_FIFOEN,
  679 				WBSD_FIFOEN_EMPTY | 8);
  680 			wbsd_fill_fifo(host);
  681 		}
  682 	}
  683 
  684 	data->error = 0;
  685 }
  686 
  687 static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data)
  688 {
  689 	unsigned long dmaflags;
  690 	int count;
  691 	u8 status;
  692 
  693 	WARN_ON(host->mrq == NULL);
  694 
  695 	/*
  696 	 * Send a stop command if needed.
  697 	 */
  698 	if (data->stop)
  699 		wbsd_send_command(host, data->stop);
  700 
  701 	/*
  702 	 * Wait for the controller to leave data
  703 	 * transfer state.
  704 	 */
  705 	do {
  706 		status = wbsd_read_index(host, WBSD_IDX_STATUS);
  707 	} while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
  708 
  709 	/*
  710 	 * DMA transfer?
  711 	 */
  712 	if (host->dma >= 0) {
  713 		/*
  714 		 * Disable DMA on the host.
  715 		 */
  716 		wbsd_write_index(host, WBSD_IDX_DMA, 0);
  717 
  718 		/*
  719 		 * Turn of ISA DMA controller.
  720 		 */
  721 		dmaflags = claim_dma_lock();
  722 		disable_dma(host->dma);
  723 		clear_dma_ff(host->dma);
  724 		count = get_dma_residue(host->dma);
  725 		release_dma_lock(dmaflags);
  726 
  727 		data->bytes_xfered = host->mrq->data->blocks *
  728 			host->mrq->data->blksz - count;
  729 		data->bytes_xfered -= data->bytes_xfered % data->blksz;
  730 
  731 		/*
  732 		 * Any leftover data?
  733 		 */
  734 		if (count) {
  735 			pr_err("%s: Incomplete DMA transfer. "
  736 				"%d bytes left.\n",
  737 				mmc_hostname(host->mmc), count);
  738 
  739 			if (!data->error)
  740 				data->error = -EIO;
  741 		} else {
  742 			/*
  743 			 * Transfer data from DMA buffer to
  744 			 * SG list.
  745 			 */
  746 			if (data->flags & MMC_DATA_READ)
  747 				wbsd_dma_to_sg(host, data);
  748 		}
  749 
  750 		if (data->error) {
  751 			if (data->bytes_xfered)
  752 				data->bytes_xfered -= data->blksz;
  753 		}
  754 	}
  755 
  756 	wbsd_request_end(host, host->mrq);
  757 }
  758 
  759 /*****************************************************************************\
  760  *                                                                           *
  761  * MMC layer callbacks                                                       *
  762  *                                                                           *
  763 \*****************************************************************************/
  764 
  765 static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
  766 {
  767 	struct wbsd_host *host = mmc_priv(mmc);
  768 	struct mmc_command *cmd;
  769 
  770 	/*
  771 	 * Disable tasklets to avoid a deadlock.
  772 	 */
  773 	spin_lock_bh(&host->lock);
  774 
  775 	BUG_ON(host->mrq != NULL);
  776 
  777 	cmd = mrq->cmd;
  778 
  779 	host->mrq = mrq;
  780 
  781 	/*
  782 	 * Check that there is actually a card in the slot.
  783 	 */
  784 	if (!(host->flags & WBSD_FCARD_PRESENT)) {
  785 		cmd->error = -ENOMEDIUM;
  786 		goto done;
  787 	}
  788 
  789 	if (cmd->data) {
  790 		/*
  791 		 * The hardware is so delightfully stupid that it has a list
  792 		 * of "data" commands. If a command isn't on this list, it'll
  793 		 * just go back to the idle state and won't send any data
  794 		 * interrupts.
  795 		 */
  796 		switch (cmd->opcode) {
  797 		case 11:
  798 		case 17:
  799 		case 18:
  800 		case 20:
  801 		case 24:
  802 		case 25:
  803 		case 26:
  804 		case 27:
  805 		case 30:
  806 		case 42:
  807 		case 56:
  808 			break;
  809 
  810 		/* ACMDs. We don't keep track of state, so we just treat them
  811 		 * like any other command. */
  812 		case 51:
  813 			break;
  814 
  815 		default:
  816 #ifdef CONFIG_MMC_DEBUG
  817 			pr_warn("%s: Data command %d is not supported by this controller\n",
  818 				mmc_hostname(host->mmc), cmd->opcode);
  819 #endif
  820 			cmd->error = -EINVAL;
  821 
  822 			goto done;
  823 		}
  824 	}
  825 
  826 	/*
  827 	 * Does the request include data?
  828 	 */
  829 	if (cmd->data) {
  830 		wbsd_prepare_data(host, cmd->data);
  831 
  832 		if (cmd->data->error)
  833 			goto done;
  834 	}
  835 
  836 	wbsd_send_command(host, cmd);
  837 
  838 	/*
  839 	 * If this is a data transfer the request
  840 	 * will be finished after the data has
  841 	 * transferred.
  842 	 */
  843 	if (cmd->data && !cmd->error) {
  844 		/*
  845 		 * Dirty fix for hardware bug.
  846 		 */
  847 		if (host->dma == -1)
  848 			tasklet_schedule(&host->fifo_tasklet);
  849 
  850 		spin_unlock_bh(&host->lock);
  851 
  852 		return;
  853 	}
  854 
  855 done:
  856 	wbsd_request_end(host, mrq);
  857 
  858 	spin_unlock_bh(&host->lock);
  859 }
  860 
  861 static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  862 {
  863 	struct wbsd_host *host = mmc_priv(mmc);
  864 	u8 clk, setup, pwr;
  865 
  866 	spin_lock_bh(&host->lock);
  867 
  868 	/*
  869 	 * Reset the chip on each power off.
  870 	 * Should clear out any weird states.
  871 	 */
  872 	if (ios->power_mode == MMC_POWER_OFF)
  873 		wbsd_init_device(host);
  874 
  875 	if (ios->clock >= 24000000)
  876 		clk = WBSD_CLK_24M;
  877 	else if (ios->clock >= 16000000)
  878 		clk = WBSD_CLK_16M;
  879 	else if (ios->clock >= 12000000)
  880 		clk = WBSD_CLK_12M;
  881 	else
  882 		clk = WBSD_CLK_375K;
  883 
  884 	/*
  885 	 * Only write to the clock register when
  886 	 * there is an actual change.
  887 	 */
  888 	if (clk != host->clk) {
  889 		wbsd_write_index(host, WBSD_IDX_CLK, clk);
  890 		host->clk = clk;
  891 	}
  892 
  893 	/*
  894 	 * Power up card.
  895 	 */
  896 	if (ios->power_mode != MMC_POWER_OFF) {
  897 		pwr = inb(host->base + WBSD_CSR);
  898 		pwr &= ~WBSD_POWER_N;
  899 		outb(pwr, host->base + WBSD_CSR);
  900 	}
  901 
  902 	/*
  903 	 * MMC cards need to have pin 1 high during init.
  904 	 * It wreaks havoc with the card detection though so
  905 	 * that needs to be disabled.
  906 	 */
  907 	setup = wbsd_read_index(host, WBSD_IDX_SETUP);
  908 	if (ios->chip_select == MMC_CS_HIGH) {
  909 		BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1);
  910 		setup |= WBSD_DAT3_H;
  911 		host->flags |= WBSD_FIGNORE_DETECT;
  912 	} else {
  913 		if (setup & WBSD_DAT3_H) {
  914 			setup &= ~WBSD_DAT3_H;
  915 
  916 			/*
  917 			 * We cannot resume card detection immediately
  918 			 * because of capacitance and delays in the chip.
  919 			 */
  920 			mod_timer(&host->ignore_timer, jiffies + HZ / 100);
  921 		}
  922 	}
  923 	wbsd_write_index(host, WBSD_IDX_SETUP, setup);
  924 
  925 	/*
  926 	 * Store bus width for later. Will be used when
  927 	 * setting up the data transfer.
  928 	 */
  929 	host->bus_width = ios->bus_width;
  930 
  931 	spin_unlock_bh(&host->lock);
  932 }
  933 
  934 static int wbsd_get_ro(struct mmc_host *mmc)
  935 {
  936 	struct wbsd_host *host = mmc_priv(mmc);
  937 	u8 csr;
  938 
  939 	spin_lock_bh(&host->lock);
  940 
  941 	csr = inb(host->base + WBSD_CSR);
  942 	csr |= WBSD_MSLED;
  943 	outb(csr, host->base + WBSD_CSR);
  944 
  945 	mdelay(1);
  946 
  947 	csr = inb(host->base + WBSD_CSR);
  948 	csr &= ~WBSD_MSLED;
  949 	outb(csr, host->base + WBSD_CSR);
  950 
  951 	spin_unlock_bh(&host->lock);
  952 
  953 	return !!(csr & WBSD_WRPT);
  954 }
  955 
  956 static const struct mmc_host_ops wbsd_ops = {
  957 	.request	= wbsd_request,
  958 	.set_ios	= wbsd_set_ios,
  959 	.get_ro		= wbsd_get_ro,
  960 };
  961 
  962 /*****************************************************************************\
  963  *                                                                           *
  964  * Interrupt handling                                                        *
  965  *                                                                           *
  966 \*****************************************************************************/
  967 
  968 /*
  969  * Helper function to reset detection ignore
  970  */
  971 
  972 static void wbsd_reset_ignore(unsigned long data)
  973 {
  974 	struct wbsd_host *host = (struct wbsd_host *)data;
  975 
  976 	BUG_ON(host == NULL);
  977 
  978 	DBG("Resetting card detection ignore\n");
  979 
  980 	spin_lock_bh(&host->lock);
  981 
  982 	host->flags &= ~WBSD_FIGNORE_DETECT;
  983 
  984 	/*
  985 	 * Card status might have changed during the
  986 	 * blackout.
  987 	 */
  988 	tasklet_schedule(&host->card_tasklet);
  989 
  990 	spin_unlock_bh(&host->lock);
  991 }
  992 
  993 /*
  994  * Tasklets
  995  */
  996 
  997 static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host)
  998 {
  999 	WARN_ON(!host->mrq);
 1000 	if (!host->mrq)
 1001 		return NULL;
 1002 
 1003 	WARN_ON(!host->mrq->cmd);
 1004 	if (!host->mrq->cmd)
 1005 		return NULL;
 1006 
 1007 	WARN_ON(!host->mrq->cmd->data);
 1008 	if (!host->mrq->cmd->data)
 1009 		return NULL;
 1010 
 1011 	return host->mrq->cmd->data;
 1012 }
 1013 
 1014 static void wbsd_tasklet_card(unsigned long param)
 1015 {
 1016 	struct wbsd_host *host = (struct wbsd_host *)param;
 1017 	u8 csr;
 1018 	int delay = -1;
 1019 
 1020 	spin_lock(&host->lock);
 1021 
 1022 	if (host->flags & WBSD_FIGNORE_DETECT) {
 1023 		spin_unlock(&host->lock);
 1024 		return;
 1025 	}
 1026 
 1027 	csr = inb(host->base + WBSD_CSR);
 1028 	WARN_ON(csr == 0xff);
 1029 
 1030 	if (csr & WBSD_CARDPRESENT) {
 1031 		if (!(host->flags & WBSD_FCARD_PRESENT)) {
 1032 			DBG("Card inserted\n");
 1033 			host->flags |= WBSD_FCARD_PRESENT;
 1034 
 1035 			delay = 500;
 1036 		}
 1037 	} else if (host->flags & WBSD_FCARD_PRESENT) {
 1038 		DBG("Card removed\n");
 1039 		host->flags &= ~WBSD_FCARD_PRESENT;
 1040 
 1041 		if (host->mrq) {
 1042 			pr_err("%s: Card removed during transfer!\n",
 1043 				mmc_hostname(host->mmc));
 1044 			wbsd_reset(host);
 1045 
 1046 			host->mrq->cmd->error = -ENOMEDIUM;
 1047 			tasklet_schedule(&host->finish_tasklet);
 1048 		}
 1049 
 1050 		delay = 0;
 1051 	}
 1052 
 1053 	/*
 1054 	 * Unlock first since we might get a call back.
 1055 	 */
 1056 
 1057 	spin_unlock(&host->lock);
 1058 
 1059 	if (delay != -1)
 1060 		mmc_detect_change(host->mmc, msecs_to_jiffies(delay));
 1061 }
 1062 
 1063 static void wbsd_tasklet_fifo(unsigned long param)
 1064 {
 1065 	struct wbsd_host *host = (struct wbsd_host *)param;
 1066 	struct mmc_data *data;
 1067 
 1068 	spin_lock(&host->lock);
 1069 
 1070 	if (!host->mrq)
 1071 		goto end;
 1072 
 1073 	data = wbsd_get_data(host);
 1074 	if (!data)
 1075 		goto end;
 1076 
 1077 	if (data->flags & MMC_DATA_WRITE)
 1078 		wbsd_fill_fifo(host);
 1079 	else
 1080 		wbsd_empty_fifo(host);
 1081 
 1082 	/*
 1083 	 * Done?
 1084 	 */
 1085 	if (host->num_sg == 0) {
 1086 		wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
 1087 		tasklet_schedule(&host->finish_tasklet);
 1088 	}
 1089 
 1090 end:
 1091 	spin_unlock(&host->lock);
 1092 }
 1093 
 1094 static void wbsd_tasklet_crc(unsigned long param)
 1095 {
 1096 	struct wbsd_host *host = (struct wbsd_host *)param;
 1097 	struct mmc_data *data;
 1098 
 1099 	spin_lock(&host->lock);
 1100 
 1101 	if (!host->mrq)
 1102 		goto end;
 1103 
 1104 	data = wbsd_get_data(host);
 1105 	if (!data)
 1106 		goto end;
 1107 
 1108 	DBGF("CRC error\n");
 1109 
 1110 	data->error = -EILSEQ;
 1111 
 1112 	tasklet_schedule(&host->finish_tasklet);
 1113 
 1114 end:
 1115 	spin_unlock(&host->lock);
 1116 }
 1117 
 1118 static void wbsd_tasklet_timeout(unsigned long param)
 1119 {
 1120 	struct wbsd_host *host = (struct wbsd_host *)param;
 1121 	struct mmc_data *data;
 1122 
 1123 	spin_lock(&host->lock);
 1124 
 1125 	if (!host->mrq)
 1126 		goto end;
 1127 
 1128 	data = wbsd_get_data(host);
 1129 	if (!data)
 1130 		goto end;
 1131 
 1132 	DBGF("Timeout\n");
 1133 
 1134 	data->error = -ETIMEDOUT;
 1135 
 1136 	tasklet_schedule(&host->finish_tasklet);
 1137 
 1138 end:
 1139 	spin_unlock(&host->lock);
 1140 }
 1141 
 1142 static void wbsd_tasklet_finish(unsigned long param)
 1143 {
 1144 	struct wbsd_host *host = (struct wbsd_host *)param;
 1145 	struct mmc_data *data;
 1146 
 1147 	spin_lock(&host->lock);
 1148 
 1149 	WARN_ON(!host->mrq);
 1150 	if (!host->mrq)
 1151 		goto end;
 1152 
 1153 	data = wbsd_get_data(host);
 1154 	if (!data)
 1155 		goto end;
 1156 
 1157 	wbsd_finish_data(host, data);
 1158 
 1159 end:
 1160 	spin_unlock(&host->lock);
 1161 }
 1162 
 1163 /*
 1164  * Interrupt handling
 1165  */
 1166 
 1167 static irqreturn_t wbsd_irq(int irq, void *dev_id)
 1168 {
 1169 	struct wbsd_host *host = dev_id;
 1170 	int isr;
 1171 
 1172 	isr = inb(host->base + WBSD_ISR);
 1173 
 1174 	/*
 1175 	 * Was it actually our hardware that caused the interrupt?
 1176 	 */
 1177 	if (isr == 0xff || isr == 0x00)
 1178 		return IRQ_NONE;
 1179 
 1180 	host->isr |= isr;
 1181 
 1182 	/*
 1183 	 * Schedule tasklets as needed.
 1184 	 */
 1185 	if (isr & WBSD_INT_CARD)
 1186 		tasklet_schedule(&host->card_tasklet);
 1187 	if (isr & WBSD_INT_FIFO_THRE)
 1188 		tasklet_schedule(&host->fifo_tasklet);
 1189 	if (isr & WBSD_INT_CRC)
 1190 		tasklet_hi_schedule(&host->crc_tasklet);
 1191 	if (isr & WBSD_INT_TIMEOUT)
 1192 		tasklet_hi_schedule(&host->timeout_tasklet);
 1193 	if (isr & WBSD_INT_TC)
 1194 		tasklet_schedule(&host->finish_tasklet);
 1195 
 1196 	return IRQ_HANDLED;
 1197 }
 1198 
 1199 /*****************************************************************************\
 1200  *                                                                           *
 1201  * Device initialisation and shutdown                                        *
 1202  *                                                                           *
 1203 \*****************************************************************************/
 1204 
 1205 /*
 1206  * Allocate/free MMC structure.
 1207  */
 1208 
 1209 static int wbsd_alloc_mmc(struct device *dev)
 1210 {
 1211 	struct mmc_host *mmc;
 1212 	struct wbsd_host *host;
 1213 
 1214 	/*
 1215 	 * Allocate MMC structure.
 1216 	 */
 1217 	mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
 1218 	if (!mmc)
 1219 		return -ENOMEM;
 1220 
 1221 	host = mmc_priv(mmc);
 1222 	host->mmc = mmc;
 1223 
 1224 	host->dma = -1;
 1225 
 1226 	/*
 1227 	 * Set host parameters.
 1228 	 */
 1229 	mmc->ops = &wbsd_ops;
 1230 	mmc->f_min = 375000;
 1231 	mmc->f_max = 24000000;
 1232 	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 1233 	mmc->caps = MMC_CAP_4_BIT_DATA;
 1234 
 1235 	spin_lock_init(&host->lock);
 1236 
 1237 	/*
 1238 	 * Set up timers
 1239 	 */
 1240 	init_timer(&host->ignore_timer);
 1241 	host->ignore_timer.data = (unsigned long)host;
 1242 	host->ignore_timer.function = wbsd_reset_ignore;
 1243 
 1244 	/*
 1245 	 * Maximum number of segments. Worst case is one sector per segment
 1246 	 * so this will be 64kB/512.
 1247 	 */
 1248 	mmc->max_segs = 128;
 1249 
 1250 	/*
 1251 	 * Maximum request size. Also limited by 64KiB buffer.
 1252 	 */
 1253 	mmc->max_req_size = 65536;
 1254 
 1255 	/*
 1256 	 * Maximum segment size. Could be one segment with the maximum number
 1257 	 * of bytes.
 1258 	 */
 1259 	mmc->max_seg_size = mmc->max_req_size;
 1260 
 1261 	/*
 1262 	 * Maximum block size. We have 12 bits (= 4095) but have to subtract
 1263 	 * space for CRC. So the maximum is 4095 - 4*2 = 4087.
 1264 	 */
 1265 	mmc->max_blk_size = 4087;
 1266 
 1267 	/*
 1268 	 * Maximum block count. There is no real limit so the maximum
 1269 	 * request size will be the only restriction.
 1270 	 */
 1271 	mmc->max_blk_count = mmc->max_req_size;
 1272 
 1273 	dev_set_drvdata(dev, mmc);
 1274 
 1275 	return 0;
 1276 }
 1277 
 1278 static void wbsd_free_mmc(struct device *dev)
 1279 {
 1280 	struct mmc_host *mmc;
 1281 	struct wbsd_host *host;
 1282 
 1283 	mmc = dev_get_drvdata(dev);
 1284 	if (!mmc)
 1285 		return;
 1286 
 1287 	host = mmc_priv(mmc);
 1288 	BUG_ON(host == NULL);
 1289 
 1290 	del_timer_sync(&host->ignore_timer);
 1291 
 1292 	mmc_free_host(mmc);
 1293 
 1294 	dev_set_drvdata(dev, NULL);
 1295 }
 1296 
 1297 /*
 1298  * Scan for known chip id:s
 1299  */
 1300 
 1301 static int wbsd_scan(struct wbsd_host *host)
 1302 {
 1303 	int i, j, k;
 1304 	int id;
 1305 
 1306 	/*
 1307 	 * Iterate through all ports, all codes to
 1308 	 * find hardware that is in our known list.
 1309 	 */
 1310 	for (i = 0; i < ARRAY_SIZE(config_ports); i++) {
 1311 		if (!request_region(config_ports[i], 2, DRIVER_NAME))
 1312 			continue;
 1313 
 1314 		for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) {
 1315 			id = 0xFFFF;
 1316 
 1317 			host->config = config_ports[i];
 1318 			host->unlock_code = unlock_codes[j];
 1319 
 1320 			wbsd_unlock_config(host);
 1321 
 1322 			outb(WBSD_CONF_ID_HI, config_ports[i]);
 1323 			id = inb(config_ports[i] + 1) << 8;
 1324 
 1325 			outb(WBSD_CONF_ID_LO, config_ports[i]);
 1326 			id |= inb(config_ports[i] + 1);
 1327 
 1328 			wbsd_lock_config(host);
 1329 
 1330 			for (k = 0; k < ARRAY_SIZE(valid_ids); k++) {
 1331 				if (id == valid_ids[k]) {
 1332 					host->chip_id = id;
 1333 
 1334 					return 0;
 1335 				}
 1336 			}
 1337 
 1338 			if (id != 0xFFFF) {
 1339 				DBG("Unknown hardware (id %x) found at %x\n",
 1340 					id, config_ports[i]);
 1341 			}
 1342 		}
 1343 
 1344 		release_region(config_ports[i], 2);
 1345 	}
 1346 
 1347 	host->config = 0;
 1348 	host->unlock_code = 0;
 1349 
 1350 	return -ENODEV;
 1351 }
 1352 
 1353 /*
 1354  * Allocate/free io port ranges
 1355  */
 1356 
 1357 static int wbsd_request_region(struct wbsd_host *host, int base)
 1358 {
 1359 	if (base & 0x7)
 1360 		return -EINVAL;
 1361 
 1362 	if (!request_region(base, 8, DRIVER_NAME))
 1363 		return -EIO;
 1364 
 1365 	host->base = base;
 1366 
 1367 	return 0;
 1368 }
 1369 
 1370 static void wbsd_release_regions(struct wbsd_host *host)
 1371 {
 1372 	if (host->base)
 1373 		release_region(host->base, 8);
 1374 
 1375 	host->base = 0;
 1376 
 1377 	if (host->config)
 1378 		release_region(host->config, 2);
 1379 
 1380 	host->config = 0;
 1381 }
 1382 
 1383 /*
 1384  * Allocate/free DMA port and buffer
 1385  */
 1386 
 1387 static void wbsd_request_dma(struct wbsd_host *host, int dma)
 1388 {
 1389 	if (dma < 0)
 1390 		return;
 1391 
 1392 	if (request_dma(dma, DRIVER_NAME))
 1393 		goto err;
 1394 
 1395 	/*
 1396 	 * We need to allocate a special buffer in
 1397 	 * order for ISA to be able to DMA to it.
 1398 	 */
 1399 	host->dma_buffer = kmalloc(WBSD_DMA_SIZE,
 1400 		GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
 1401 	if (!host->dma_buffer)
 1402 		goto free;
 1403 
 1404 	/*
 1405 	 * Translate the address to a physical address.
 1406 	 */
 1407 	host->dma_addr = dma_map_single(mmc_dev(host->mmc), host->dma_buffer,
 1408 		WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
 1409 
 1410 	/*
 1411 	 * ISA DMA must be aligned on a 64k basis.
 1412 	 */
 1413 	if ((host->dma_addr & 0xffff) != 0)
 1414 		goto kfree;
 1415 	/*
 1416 	 * ISA cannot access memory above 16 MB.
 1417 	 */
 1418 	else if (host->dma_addr >= 0x1000000)
 1419 		goto kfree;
 1420 
 1421 	host->dma = dma;
 1422 
 1423 	return;
 1424 
 1425 kfree:
 1426 	/*
 1427 	 * If we've gotten here then there is some kind of alignment bug
 1428 	 */
 1429 	BUG_ON(1);
 1430 
 1431 	dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
 1432 		WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
 1433 	host->dma_addr = 0;
 1434 
 1435 	kfree(host->dma_buffer);
 1436 	host->dma_buffer = NULL;
 1437 
 1438 free:
 1439 	free_dma(dma);
 1440 
 1441 err:
 1442 	pr_warn(DRIVER_NAME ": Unable to allocate DMA %d - falling back on FIFO\n",
 1443 		dma);
 1444 }
 1445 
 1446 static void wbsd_release_dma(struct wbsd_host *host)
 1447 {
 1448 	if (host->dma_addr) {
 1449 		dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
 1450 			WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
 1451 	}
 1452 	kfree(host->dma_buffer);
 1453 	if (host->dma >= 0)
 1454 		free_dma(host->dma);
 1455 
 1456 	host->dma = -1;
 1457 	host->dma_buffer = NULL;
 1458 	host->dma_addr = 0;
 1459 }
 1460 
 1461 /*
 1462  * Allocate/free IRQ.
 1463  */
 1464 
 1465 static int wbsd_request_irq(struct wbsd_host *host, int irq)
 1466 {
 1467 	int ret;
 1468 
 1469 	/*
 1470 	 * Set up tasklets. Must be done before requesting interrupt.
 1471 	 */
 1472 	tasklet_init(&host->card_tasklet, wbsd_tasklet_card,
 1473 			(unsigned long)host);
 1474 	tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo,
 1475 			(unsigned long)host);
 1476 	tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc,
 1477 			(unsigned long)host);
 1478 	tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout,
 1479 			(unsigned long)host);
 1480 	tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish,
 1481 			(unsigned long)host);
 1482 
 1483 	/*
 1484 	 * Allocate interrupt.
 1485 	 */
 1486 	ret = request_irq(irq, wbsd_irq, IRQF_SHARED, DRIVER_NAME, host);
 1487 	if (ret)
 1488 		return ret;
 1489 
 1490 	host->irq = irq;
 1491 
 1492 	return 0;
 1493 }
 1494 
 1495 static void  wbsd_release_irq(struct wbsd_host *host)
 1496 {
 1497 	if (!host->irq)
 1498 		return;
 1499 
 1500 	free_irq(host->irq, host);
 1501 
 1502 	host->irq = 0;
 1503 
 1504 	tasklet_kill(&host->card_tasklet);
 1505 	tasklet_kill(&host->fifo_tasklet);
 1506 	tasklet_kill(&host->crc_tasklet);
 1507 	tasklet_kill(&host->timeout_tasklet);
 1508 	tasklet_kill(&host->finish_tasklet);
 1509 }
 1510 
 1511 /*
 1512  * Allocate all resources for the host.
 1513  */
 1514 
 1515 static int wbsd_request_resources(struct wbsd_host *host,
 1516 	int base, int irq, int dma)
 1517 {
 1518 	int ret;
 1519 
 1520 	/*
 1521 	 * Allocate I/O ports.
 1522 	 */
 1523 	ret = wbsd_request_region(host, base);
 1524 	if (ret)
 1525 		return ret;
 1526 
 1527 	/*
 1528 	 * Allocate interrupt.
 1529 	 */
 1530 	ret = wbsd_request_irq(host, irq);
 1531 	if (ret)
 1532 		return ret;
 1533 
 1534 	/*
 1535 	 * Allocate DMA.
 1536 	 */
 1537 	wbsd_request_dma(host, dma);
 1538 
 1539 	return 0;
 1540 }
 1541 
 1542 /*
 1543  * Release all resources for the host.
 1544  */
 1545 
 1546 static void wbsd_release_resources(struct wbsd_host *host)
 1547 {
 1548 	wbsd_release_dma(host);
 1549 	wbsd_release_irq(host);
 1550 	wbsd_release_regions(host);
 1551 }
 1552 
 1553 /*
 1554  * Configure the resources the chip should use.
 1555  */
 1556 
 1557 static void wbsd_chip_config(struct wbsd_host *host)
 1558 {
 1559 	wbsd_unlock_config(host);
 1560 
 1561 	/*
 1562 	 * Reset the chip.
 1563 	 */
 1564 	wbsd_write_config(host, WBSD_CONF_SWRST, 1);
 1565 	wbsd_write_config(host, WBSD_CONF_SWRST, 0);
 1566 
 1567 	/*
 1568 	 * Select SD/MMC function.
 1569 	 */
 1570 	wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
 1571 
 1572 	/*
 1573 	 * Set up card detection.
 1574 	 */
 1575 	wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11);
 1576 
 1577 	/*
 1578 	 * Configure chip
 1579 	 */
 1580 	wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8);
 1581 	wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff);
 1582 
 1583 	wbsd_write_config(host, WBSD_CONF_IRQ, host->irq);
 1584 
 1585 	if (host->dma >= 0)
 1586 		wbsd_write_config(host, WBSD_CONF_DRQ, host->dma);
 1587 
 1588 	/*
 1589 	 * Enable and power up chip.
 1590 	 */
 1591 	wbsd_write_config(host, WBSD_CONF_ENABLE, 1);
 1592 	wbsd_write_config(host, WBSD_CONF_POWER, 0x20);
 1593 
 1594 	wbsd_lock_config(host);
 1595 }
 1596 
 1597 /*
 1598  * Check that configured resources are correct.
 1599  */
 1600 
 1601 static int wbsd_chip_validate(struct wbsd_host *host)
 1602 {
 1603 	int base, irq, dma;
 1604 
 1605 	wbsd_unlock_config(host);
 1606 
 1607 	/*
 1608 	 * Select SD/MMC function.
 1609 	 */
 1610 	wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
 1611 
 1612 	/*
 1613 	 * Read configuration.
 1614 	 */
 1615 	base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8;
 1616 	base |= wbsd_read_config(host, WBSD_CONF_PORT_LO);
 1617 
 1618 	irq = wbsd_read_config(host, WBSD_CONF_IRQ);
 1619 
 1620 	dma = wbsd_read_config(host, WBSD_CONF_DRQ);
 1621 
 1622 	wbsd_lock_config(host);
 1623 
 1624 	/*
 1625 	 * Validate against given configuration.
 1626 	 */
 1627 	if (base != host->base)
 1628 		return 0;
 1629 	if (irq != host->irq)
 1630 		return 0;
 1631 	if ((dma != host->dma) && (host->dma != -1))
 1632 		return 0;
 1633 
 1634 	return 1;
 1635 }
 1636 
 1637 /*
 1638  * Powers down the SD function
 1639  */
 1640 
 1641 static void wbsd_chip_poweroff(struct wbsd_host *host)
 1642 {
 1643 	wbsd_unlock_config(host);
 1644 
 1645 	wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
 1646 	wbsd_write_config(host, WBSD_CONF_ENABLE, 0);
 1647 
 1648 	wbsd_lock_config(host);
 1649 }
 1650 
 1651 /*****************************************************************************\
 1652  *                                                                           *
 1653  * Devices setup and shutdown                                                *
 1654  *                                                                           *
 1655 \*****************************************************************************/
 1656 
 1657 static int wbsd_init(struct device *dev, int base, int irq, int dma,
 1658 	int pnp)
 1659 {
 1660 	struct wbsd_host *host = NULL;
 1661 	struct mmc_host *mmc = NULL;
 1662 	int ret;
 1663 
 1664 	ret = wbsd_alloc_mmc(dev);
 1665 	if (ret)
 1666 		return ret;
 1667 
 1668 	mmc = dev_get_drvdata(dev);
 1669 	host = mmc_priv(mmc);
 1670 
 1671 	/*
 1672 	 * Scan for hardware.
 1673 	 */
 1674 	ret = wbsd_scan(host);
 1675 	if (ret) {
 1676 		if (pnp && (ret == -ENODEV)) {
 1677 			pr_warn(DRIVER_NAME ": Unable to confirm device presence - you may experience lock-ups\n");
 1678 		} else {
 1679 			wbsd_free_mmc(dev);
 1680 			return ret;
 1681 		}
 1682 	}
 1683 
 1684 	/*
 1685 	 * Request resources.
 1686 	 */
 1687 	ret = wbsd_request_resources(host, base, irq, dma);
 1688 	if (ret) {
 1689 		wbsd_release_resources(host);
 1690 		wbsd_free_mmc(dev);
 1691 		return ret;
 1692 	}
 1693 
 1694 	/*
 1695 	 * See if chip needs to be configured.
 1696 	 */
 1697 	if (pnp) {
 1698 		if ((host->config != 0) && !wbsd_chip_validate(host)) {
 1699 			pr_warn(DRIVER_NAME ": PnP active but chip not configured! You probably have a buggy BIOS. Configuring chip manually.\n");
 1700 			wbsd_chip_config(host);
 1701 		}
 1702 	} else
 1703 		wbsd_chip_config(host);
 1704 
 1705 	/*
 1706 	 * Power Management stuff. No idea how this works.
 1707 	 * Not tested.
 1708 	 */
 1709 #ifdef CONFIG_PM
 1710 	if (host->config) {
 1711 		wbsd_unlock_config(host);
 1712 		wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
 1713 		wbsd_lock_config(host);
 1714 	}
 1715 #endif
 1716 	/*
 1717 	 * Allow device to initialise itself properly.
 1718 	 */
 1719 	mdelay(5);
 1720 
 1721 	/*
 1722 	 * Reset the chip into a known state.
 1723 	 */
 1724 	wbsd_init_device(host);
 1725 
 1726 	mmc_add_host(mmc);
 1727 
 1728 	pr_info("%s: W83L51xD", mmc_hostname(mmc));
 1729 	if (host->chip_id != 0)
 1730 		printk(" id %x", (int)host->chip_id);
 1731 	printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
 1732 	if (host->dma >= 0)
 1733 		printk(" dma %d", (int)host->dma);
 1734 	else
 1735 		printk(" FIFO");
 1736 	if (pnp)
 1737 		printk(" PnP");
 1738 	printk("\n");
 1739 
 1740 	return 0;
 1741 }
 1742 
 1743 static void wbsd_shutdown(struct device *dev, int pnp)
 1744 {
 1745 	struct mmc_host *mmc = dev_get_drvdata(dev);
 1746 	struct wbsd_host *host;
 1747 
 1748 	if (!mmc)
 1749 		return;
 1750 
 1751 	host = mmc_priv(mmc);
 1752 
 1753 	mmc_remove_host(mmc);
 1754 
 1755 	/*
 1756 	 * Power down the SD/MMC function.
 1757 	 */
 1758 	if (!pnp)
 1759 		wbsd_chip_poweroff(host);
 1760 
 1761 	wbsd_release_resources(host);
 1762 
 1763 	wbsd_free_mmc(dev);
 1764 }
 1765 
 1766 /*
 1767  * Non-PnP
 1768  */
 1769 
 1770 static int wbsd_probe(struct platform_device *dev)
 1771 {
 1772 	/* Use the module parameters for resources */
 1773 	return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0);
 1774 }
 1775 
 1776 static int wbsd_remove(struct platform_device *dev)
 1777 {
 1778 	wbsd_shutdown(&dev->dev, 0);
 1779 
 1780 	return 0;
 1781 }
 1782 
 1783 /*
 1784  * PnP
 1785  */
 1786 
 1787 #ifdef CONFIG_PNP
 1788 
 1789 static int
 1790 wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)
 1791 {
 1792 	int io, irq, dma;
 1793 
 1794 	/*
 1795 	 * Get resources from PnP layer.
 1796 	 */
 1797 	io = pnp_port_start(pnpdev, 0);
 1798 	irq = pnp_irq(pnpdev, 0);
 1799 	if (pnp_dma_valid(pnpdev, 0))
 1800 		dma = pnp_dma(pnpdev, 0);
 1801 	else
 1802 		dma = -1;
 1803 
 1804 	DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma);
 1805 
 1806 	return wbsd_init(&pnpdev->dev, io, irq, dma, 1);
 1807 }
 1808 
 1809 static void wbsd_pnp_remove(struct pnp_dev *dev)
 1810 {
 1811 	wbsd_shutdown(&dev->dev, 1);
 1812 }
 1813 
 1814 #endif /* CONFIG_PNP */
 1815 
 1816 /*
 1817  * Power management
 1818  */
 1819 
 1820 #ifdef CONFIG_PM
 1821 
 1822 static int wbsd_platform_suspend(struct platform_device *dev,
 1823 				 pm_message_t state)
 1824 {
 1825 	struct mmc_host *mmc = platform_get_drvdata(dev);
 1826 	struct wbsd_host *host;
 1827 
 1828 	if (mmc == NULL)
 1829 		return 0;
 1830 
 1831 	DBGF("Suspending...\n");
 1832 
 1833 	host = mmc_priv(mmc);
 1834 
 1835 	wbsd_chip_poweroff(host);
 1836 	return 0;
 1837 }
 1838 
 1839 static int wbsd_platform_resume(struct platform_device *dev)
 1840 {
 1841 	struct mmc_host *mmc = platform_get_drvdata(dev);
 1842 	struct wbsd_host *host;
 1843 
 1844 	if (mmc == NULL)
 1845 		return 0;
 1846 
 1847 	DBGF("Resuming...\n");
 1848 
 1849 	host = mmc_priv(mmc);
 1850 
 1851 	wbsd_chip_config(host);
 1852 
 1853 	/*
 1854 	 * Allow device to initialise itself properly.
 1855 	 */
 1856 	mdelay(5);
 1857 
 1858 	wbsd_init_device(host);
 1859 	return 0;
 1860 }
 1861 
 1862 #ifdef CONFIG_PNP
 1863 
 1864 static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
 1865 {
 1866 	struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
 1867 
 1868 	if (mmc == NULL)
 1869 		return 0;
 1870 
 1871 	DBGF("Suspending...\n");
 1872 	return 0;
 1873 }
 1874 
 1875 static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
 1876 {
 1877 	struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
 1878 	struct wbsd_host *host;
 1879 
 1880 	if (mmc == NULL)
 1881 		return 0;
 1882 
 1883 	DBGF("Resuming...\n");
 1884 
 1885 	host = mmc_priv(mmc);
 1886 
 1887 	/*
 1888 	 * See if chip needs to be configured.
 1889 	 */
 1890 	if (host->config != 0) {
 1891 		if (!wbsd_chip_validate(host)) {
 1892 			pr_warn(DRIVER_NAME ": PnP active but chip not configured! You probably have a buggy BIOS. Configuring chip manually.\n");
 1893 			wbsd_chip_config(host);
 1894 		}
 1895 	}
 1896 
 1897 	/*
 1898 	 * Allow device to initialise itself properly.
 1899 	 */
 1900 	mdelay(5);
 1901 
 1902 	wbsd_init_device(host);
 1903 	return 0;
 1904 }
 1905 
 1906 #endif /* CONFIG_PNP */
 1907 
 1908 #else /* CONFIG_PM */
 1909 
 1910 #define wbsd_platform_suspend NULL
 1911 #define wbsd_platform_resume NULL
 1912 
 1913 #define wbsd_pnp_suspend NULL
 1914 #define wbsd_pnp_resume NULL
 1915 
 1916 #endif /* CONFIG_PM */
 1917 
 1918 static struct platform_device *wbsd_device;
 1919 
 1920 static struct platform_driver wbsd_driver = {
 1921 	.probe		= wbsd_probe,
 1922 	.remove		= wbsd_remove,
 1923 
 1924 	.suspend	= wbsd_platform_suspend,
 1925 	.resume		= wbsd_platform_resume,
 1926 	.driver		= {
 1927 		.name	= DRIVER_NAME,
 1928 	},
 1929 };
 1930 
 1931 #ifdef CONFIG_PNP
 1932 
 1933 static struct pnp_driver wbsd_pnp_driver = {
 1934 	.name		= DRIVER_NAME,
 1935 	.id_table	= pnp_dev_table,
 1936 	.probe		= wbsd_pnp_probe,
 1937 	.remove		= wbsd_pnp_remove,
 1938 
 1939 	.suspend	= wbsd_pnp_suspend,
 1940 	.resume		= wbsd_pnp_resume,
 1941 };
 1942 
 1943 #endif /* CONFIG_PNP */
 1944 
 1945 /*
 1946  * Module loading/unloading
 1947  */
 1948 
 1949 static int __init wbsd_drv_init(void)
 1950 {
 1951 	int result;
 1952 
 1953 	pr_info(DRIVER_NAME
 1954 		": Winbond W83L51xD SD/MMC card interface driver\n");
 1955 	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
 1956 
 1957 #ifdef CONFIG_PNP
 1958 
 1959 	if (!param_nopnp) {
 1960 		result = pnp_register_driver(&wbsd_pnp_driver);
 1961 		if (result < 0)
 1962 			return result;
 1963 	}
 1964 #endif /* CONFIG_PNP */
 1965 
 1966 	if (param_nopnp) {
 1967 		result = platform_driver_register(&wbsd_driver);
 1968 		if (result < 0)
 1969 			return result;
 1970 
 1971 		wbsd_device = platform_device_alloc(DRIVER_NAME, -1);
 1972 		if (!wbsd_device) {
 1973 			platform_driver_unregister(&wbsd_driver);
 1974 			return -ENOMEM;
 1975 		}
 1976 
 1977 		result = platform_device_add(wbsd_device);
 1978 		if (result) {
 1979 			platform_device_put(wbsd_device);
 1980 			platform_driver_unregister(&wbsd_driver);
 1981 			return result;
 1982 		}
 1983 	}
 1984 
 1985 	return 0;
 1986 }
 1987 
 1988 static void __exit wbsd_drv_exit(void)
 1989 {
 1990 #ifdef CONFIG_PNP
 1991 
 1992 	if (!param_nopnp)
 1993 		pnp_unregister_driver(&wbsd_pnp_driver);
 1994 
 1995 #endif /* CONFIG_PNP */
 1996 
 1997 	if (param_nopnp) {
 1998 		platform_device_unregister(wbsd_device);
 1999 
 2000 		platform_driver_unregister(&wbsd_driver);
 2001 	}
 2002 
 2003 	DBG("unloaded\n");
 2004 }
 2005 
 2006 module_init(wbsd_drv_init);
 2007 module_exit(wbsd_drv_exit);
 2008 #ifdef CONFIG_PNP
 2009 module_param_named(nopnp, param_nopnp, uint, 0444);
 2010 #endif
 2011 module_param_named(io, param_io, uint, 0444);
 2012 module_param_named(irq, param_irq, uint, 0444);
 2013 module_param_named(dma, param_dma, int, 0444);
 2014 
 2015 MODULE_LICENSE("GPL");
 2016 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
 2017 MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
 2018 
 2019 #ifdef CONFIG_PNP
 2020 MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)");
 2021 #endif
 2022 MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)");
 2023 MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)");
 2024 MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");
 2025 
 2026 
 2027 
 2028 
 2029 
 2030 /* LDV_COMMENT_BEGIN_MAIN */
 2031 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
 2032 
 2033 /*###########################################################################*/
 2034 
 2035 /*############## Driver Environment Generator 0.2 output ####################*/
 2036 
 2037 /*###########################################################################*/
 2038 
 2039 
 2040 
 2041 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
 2042 void ldv_check_final_state(void);
 2043 
 2044 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
 2045 void ldv_check_return_value(int res);
 2046 
 2047 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
 2048 void ldv_check_return_value_probe(int res);
 2049 
 2050 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
 2051 void ldv_initialize(void);
 2052 
 2053 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
 2054 void ldv_handler_precall(void);
 2055 
 2056 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
 2057 int nondet_int(void);
 2058 
 2059 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
 2060 int LDV_IN_INTERRUPT;
 2061 
 2062 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
 2063 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
 2064 
 2065 
 2066 
 2067 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
 2068 	/*============================= VARIABLE DECLARATION PART   =============================*/
 2069 	/** STRUCT: struct type: mmc_host_ops, struct name: wbsd_ops **/
 2070 	/* content: static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)*/
 2071 	/* LDV_COMMENT_BEGIN_PREP */
 2072 	#define DRIVER_NAME "wbsd"
 2073 	#define DBG(x...) \
 2074 	pr_debug(DRIVER_NAME ": " x)
 2075 	#define DBGF(f, x...) \
 2076 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2077 	#ifdef CONFIG_PNP
 2078 	#endif 
 2079 	#ifdef CONFIG_PNP
 2080 	#else
 2081 	#endif
 2082 	/* LDV_COMMENT_END_PREP */
 2083 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_request" */
 2084 	struct mmc_host * var_group1;
 2085 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_request" */
 2086 	struct mmc_request * var_group2;
 2087 	/* LDV_COMMENT_BEGIN_PREP */
 2088 	#ifdef CONFIG_PM
 2089 	#endif
 2090 	#ifdef CONFIG_PNP
 2091 	#endif 
 2092 	#ifdef CONFIG_PM
 2093 	#ifdef CONFIG_PNP
 2094 	#endif 
 2095 	#else 
 2096 	#define wbsd_platform_suspend NULL
 2097 	#define wbsd_platform_resume NULL
 2098 	#define wbsd_pnp_suspend NULL
 2099 	#define wbsd_pnp_resume NULL
 2100 	#endif 
 2101 	#ifdef CONFIG_PNP
 2102 	#endif 
 2103 	#ifdef CONFIG_PNP
 2104 	#endif 
 2105 	#ifdef CONFIG_PNP
 2106 	#endif 
 2107 	#ifdef CONFIG_PNP
 2108 	#endif
 2109 	#ifdef CONFIG_PNP
 2110 	#endif
 2111 	/* LDV_COMMENT_END_PREP */
 2112 	/* content: static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)*/
 2113 	/* LDV_COMMENT_BEGIN_PREP */
 2114 	#define DRIVER_NAME "wbsd"
 2115 	#define DBG(x...) \
 2116 	pr_debug(DRIVER_NAME ": " x)
 2117 	#define DBGF(f, x...) \
 2118 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2119 	#ifdef CONFIG_PNP
 2120 	#endif 
 2121 	#ifdef CONFIG_PNP
 2122 	#else
 2123 	#endif
 2124 	#ifdef CONFIG_MMC_DEBUG
 2125 	#endif
 2126 	/* LDV_COMMENT_END_PREP */
 2127 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_set_ios" */
 2128 	struct mmc_ios * var_group3;
 2129 	/* LDV_COMMENT_BEGIN_PREP */
 2130 	#ifdef CONFIG_PM
 2131 	#endif
 2132 	#ifdef CONFIG_PNP
 2133 	#endif 
 2134 	#ifdef CONFIG_PM
 2135 	#ifdef CONFIG_PNP
 2136 	#endif 
 2137 	#else 
 2138 	#define wbsd_platform_suspend NULL
 2139 	#define wbsd_platform_resume NULL
 2140 	#define wbsd_pnp_suspend NULL
 2141 	#define wbsd_pnp_resume NULL
 2142 	#endif 
 2143 	#ifdef CONFIG_PNP
 2144 	#endif 
 2145 	#ifdef CONFIG_PNP
 2146 	#endif 
 2147 	#ifdef CONFIG_PNP
 2148 	#endif 
 2149 	#ifdef CONFIG_PNP
 2150 	#endif
 2151 	#ifdef CONFIG_PNP
 2152 	#endif
 2153 	/* LDV_COMMENT_END_PREP */
 2154 	/* content: static int wbsd_get_ro(struct mmc_host *mmc)*/
 2155 	/* LDV_COMMENT_BEGIN_PREP */
 2156 	#define DRIVER_NAME "wbsd"
 2157 	#define DBG(x...) \
 2158 	pr_debug(DRIVER_NAME ": " x)
 2159 	#define DBGF(f, x...) \
 2160 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2161 	#ifdef CONFIG_PNP
 2162 	#endif 
 2163 	#ifdef CONFIG_PNP
 2164 	#else
 2165 	#endif
 2166 	#ifdef CONFIG_MMC_DEBUG
 2167 	#endif
 2168 	/* LDV_COMMENT_END_PREP */
 2169 	/* LDV_COMMENT_BEGIN_PREP */
 2170 	#ifdef CONFIG_PM
 2171 	#endif
 2172 	#ifdef CONFIG_PNP
 2173 	#endif 
 2174 	#ifdef CONFIG_PM
 2175 	#ifdef CONFIG_PNP
 2176 	#endif 
 2177 	#else 
 2178 	#define wbsd_platform_suspend NULL
 2179 	#define wbsd_platform_resume NULL
 2180 	#define wbsd_pnp_suspend NULL
 2181 	#define wbsd_pnp_resume NULL
 2182 	#endif 
 2183 	#ifdef CONFIG_PNP
 2184 	#endif 
 2185 	#ifdef CONFIG_PNP
 2186 	#endif 
 2187 	#ifdef CONFIG_PNP
 2188 	#endif 
 2189 	#ifdef CONFIG_PNP
 2190 	#endif
 2191 	#ifdef CONFIG_PNP
 2192 	#endif
 2193 	/* LDV_COMMENT_END_PREP */
 2194 
 2195 	/** STRUCT: struct type: platform_driver, struct name: wbsd_driver **/
 2196 	/* content: static int wbsd_probe(struct platform_device *dev)*/
 2197 	/* LDV_COMMENT_BEGIN_PREP */
 2198 	#define DRIVER_NAME "wbsd"
 2199 	#define DBG(x...) \
 2200 	pr_debug(DRIVER_NAME ": " x)
 2201 	#define DBGF(f, x...) \
 2202 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2203 	#ifdef CONFIG_PNP
 2204 	#endif 
 2205 	#ifdef CONFIG_PNP
 2206 	#else
 2207 	#endif
 2208 	#ifdef CONFIG_MMC_DEBUG
 2209 	#endif
 2210 	#ifdef CONFIG_PM
 2211 	#endif
 2212 	/* LDV_COMMENT_END_PREP */
 2213 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_probe" */
 2214 	struct platform_device * var_group4;
 2215 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "wbsd_probe" */
 2216 	static int res_wbsd_probe_48;
 2217 	/* LDV_COMMENT_BEGIN_PREP */
 2218 	#ifdef CONFIG_PNP
 2219 	#endif 
 2220 	#ifdef CONFIG_PM
 2221 	#ifdef CONFIG_PNP
 2222 	#endif 
 2223 	#else 
 2224 	#define wbsd_platform_suspend NULL
 2225 	#define wbsd_platform_resume NULL
 2226 	#define wbsd_pnp_suspend NULL
 2227 	#define wbsd_pnp_resume NULL
 2228 	#endif 
 2229 	#ifdef CONFIG_PNP
 2230 	#endif 
 2231 	#ifdef CONFIG_PNP
 2232 	#endif 
 2233 	#ifdef CONFIG_PNP
 2234 	#endif 
 2235 	#ifdef CONFIG_PNP
 2236 	#endif
 2237 	#ifdef CONFIG_PNP
 2238 	#endif
 2239 	/* LDV_COMMENT_END_PREP */
 2240 	/* content: static int wbsd_remove(struct platform_device *dev)*/
 2241 	/* LDV_COMMENT_BEGIN_PREP */
 2242 	#define DRIVER_NAME "wbsd"
 2243 	#define DBG(x...) \
 2244 	pr_debug(DRIVER_NAME ": " x)
 2245 	#define DBGF(f, x...) \
 2246 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2247 	#ifdef CONFIG_PNP
 2248 	#endif 
 2249 	#ifdef CONFIG_PNP
 2250 	#else
 2251 	#endif
 2252 	#ifdef CONFIG_MMC_DEBUG
 2253 	#endif
 2254 	#ifdef CONFIG_PM
 2255 	#endif
 2256 	/* LDV_COMMENT_END_PREP */
 2257 	/* LDV_COMMENT_BEGIN_PREP */
 2258 	#ifdef CONFIG_PNP
 2259 	#endif 
 2260 	#ifdef CONFIG_PM
 2261 	#ifdef CONFIG_PNP
 2262 	#endif 
 2263 	#else 
 2264 	#define wbsd_platform_suspend NULL
 2265 	#define wbsd_platform_resume NULL
 2266 	#define wbsd_pnp_suspend NULL
 2267 	#define wbsd_pnp_resume NULL
 2268 	#endif 
 2269 	#ifdef CONFIG_PNP
 2270 	#endif 
 2271 	#ifdef CONFIG_PNP
 2272 	#endif 
 2273 	#ifdef CONFIG_PNP
 2274 	#endif 
 2275 	#ifdef CONFIG_PNP
 2276 	#endif
 2277 	#ifdef CONFIG_PNP
 2278 	#endif
 2279 	/* LDV_COMMENT_END_PREP */
 2280 	/* content: static int wbsd_platform_suspend(struct platform_device *dev, pm_message_t state)*/
 2281 	/* LDV_COMMENT_BEGIN_PREP */
 2282 	#define DRIVER_NAME "wbsd"
 2283 	#define DBG(x...) \
 2284 	pr_debug(DRIVER_NAME ": " x)
 2285 	#define DBGF(f, x...) \
 2286 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2287 	#ifdef CONFIG_PNP
 2288 	#endif 
 2289 	#ifdef CONFIG_PNP
 2290 	#else
 2291 	#endif
 2292 	#ifdef CONFIG_MMC_DEBUG
 2293 	#endif
 2294 	#ifdef CONFIG_PM
 2295 	#endif
 2296 	#ifdef CONFIG_PNP
 2297 	#endif 
 2298 	#ifdef CONFIG_PM
 2299 	/* LDV_COMMENT_END_PREP */
 2300 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_platform_suspend" */
 2301 	pm_message_t  var_wbsd_platform_suspend_52_p1;
 2302 	/* LDV_COMMENT_BEGIN_PREP */
 2303 	#ifdef CONFIG_PNP
 2304 	#endif 
 2305 	#else 
 2306 	#define wbsd_platform_suspend NULL
 2307 	#define wbsd_platform_resume NULL
 2308 	#define wbsd_pnp_suspend NULL
 2309 	#define wbsd_pnp_resume NULL
 2310 	#endif 
 2311 	#ifdef CONFIG_PNP
 2312 	#endif 
 2313 	#ifdef CONFIG_PNP
 2314 	#endif 
 2315 	#ifdef CONFIG_PNP
 2316 	#endif 
 2317 	#ifdef CONFIG_PNP
 2318 	#endif
 2319 	#ifdef CONFIG_PNP
 2320 	#endif
 2321 	/* LDV_COMMENT_END_PREP */
 2322 	/* content: static int wbsd_platform_resume(struct platform_device *dev)*/
 2323 	/* LDV_COMMENT_BEGIN_PREP */
 2324 	#define DRIVER_NAME "wbsd"
 2325 	#define DBG(x...) \
 2326 	pr_debug(DRIVER_NAME ": " x)
 2327 	#define DBGF(f, x...) \
 2328 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2329 	#ifdef CONFIG_PNP
 2330 	#endif 
 2331 	#ifdef CONFIG_PNP
 2332 	#else
 2333 	#endif
 2334 	#ifdef CONFIG_MMC_DEBUG
 2335 	#endif
 2336 	#ifdef CONFIG_PM
 2337 	#endif
 2338 	#ifdef CONFIG_PNP
 2339 	#endif 
 2340 	#ifdef CONFIG_PM
 2341 	/* LDV_COMMENT_END_PREP */
 2342 	/* LDV_COMMENT_BEGIN_PREP */
 2343 	#ifdef CONFIG_PNP
 2344 	#endif 
 2345 	#else 
 2346 	#define wbsd_platform_suspend NULL
 2347 	#define wbsd_platform_resume NULL
 2348 	#define wbsd_pnp_suspend NULL
 2349 	#define wbsd_pnp_resume NULL
 2350 	#endif 
 2351 	#ifdef CONFIG_PNP
 2352 	#endif 
 2353 	#ifdef CONFIG_PNP
 2354 	#endif 
 2355 	#ifdef CONFIG_PNP
 2356 	#endif 
 2357 	#ifdef CONFIG_PNP
 2358 	#endif
 2359 	#ifdef CONFIG_PNP
 2360 	#endif
 2361 	/* LDV_COMMENT_END_PREP */
 2362 
 2363 	/** STRUCT: struct type: pnp_driver, struct name: wbsd_pnp_driver **/
 2364 	/* content: static int wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)*/
 2365 	/* LDV_COMMENT_BEGIN_PREP */
 2366 	#define DRIVER_NAME "wbsd"
 2367 	#define DBG(x...) \
 2368 	pr_debug(DRIVER_NAME ": " x)
 2369 	#define DBGF(f, x...) \
 2370 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2371 	#ifdef CONFIG_PNP
 2372 	#endif 
 2373 	#ifdef CONFIG_PNP
 2374 	#else
 2375 	#endif
 2376 	#ifdef CONFIG_MMC_DEBUG
 2377 	#endif
 2378 	#ifdef CONFIG_PM
 2379 	#endif
 2380 	#ifdef CONFIG_PNP
 2381 	/* LDV_COMMENT_END_PREP */
 2382 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_pnp_probe" */
 2383 	struct pnp_dev * var_group5;
 2384 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_pnp_probe" */
 2385 	const struct pnp_device_id * var_wbsd_pnp_probe_50_p1;
 2386 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "wbsd_pnp_probe" */
 2387 	static int res_wbsd_pnp_probe_50;
 2388 	/* LDV_COMMENT_BEGIN_PREP */
 2389 	#endif 
 2390 	#ifdef CONFIG_PM
 2391 	#ifdef CONFIG_PNP
 2392 	#endif 
 2393 	#else 
 2394 	#define wbsd_platform_suspend NULL
 2395 	#define wbsd_platform_resume NULL
 2396 	#define wbsd_pnp_suspend NULL
 2397 	#define wbsd_pnp_resume NULL
 2398 	#endif 
 2399 	#ifdef CONFIG_PNP
 2400 	#endif 
 2401 	#ifdef CONFIG_PNP
 2402 	#endif 
 2403 	#ifdef CONFIG_PNP
 2404 	#endif 
 2405 	#ifdef CONFIG_PNP
 2406 	#endif
 2407 	#ifdef CONFIG_PNP
 2408 	#endif
 2409 	/* LDV_COMMENT_END_PREP */
 2410 	/* content: static void wbsd_pnp_remove(struct pnp_dev *dev)*/
 2411 	/* LDV_COMMENT_BEGIN_PREP */
 2412 	#define DRIVER_NAME "wbsd"
 2413 	#define DBG(x...) \
 2414 	pr_debug(DRIVER_NAME ": " x)
 2415 	#define DBGF(f, x...) \
 2416 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2417 	#ifdef CONFIG_PNP
 2418 	#endif 
 2419 	#ifdef CONFIG_PNP
 2420 	#else
 2421 	#endif
 2422 	#ifdef CONFIG_MMC_DEBUG
 2423 	#endif
 2424 	#ifdef CONFIG_PM
 2425 	#endif
 2426 	#ifdef CONFIG_PNP
 2427 	/* LDV_COMMENT_END_PREP */
 2428 	/* LDV_COMMENT_BEGIN_PREP */
 2429 	#endif 
 2430 	#ifdef CONFIG_PM
 2431 	#ifdef CONFIG_PNP
 2432 	#endif 
 2433 	#else 
 2434 	#define wbsd_platform_suspend NULL
 2435 	#define wbsd_platform_resume NULL
 2436 	#define wbsd_pnp_suspend NULL
 2437 	#define wbsd_pnp_resume NULL
 2438 	#endif 
 2439 	#ifdef CONFIG_PNP
 2440 	#endif 
 2441 	#ifdef CONFIG_PNP
 2442 	#endif 
 2443 	#ifdef CONFIG_PNP
 2444 	#endif 
 2445 	#ifdef CONFIG_PNP
 2446 	#endif
 2447 	#ifdef CONFIG_PNP
 2448 	#endif
 2449 	/* LDV_COMMENT_END_PREP */
 2450 	/* content: static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)*/
 2451 	/* LDV_COMMENT_BEGIN_PREP */
 2452 	#define DRIVER_NAME "wbsd"
 2453 	#define DBG(x...) \
 2454 	pr_debug(DRIVER_NAME ": " x)
 2455 	#define DBGF(f, x...) \
 2456 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2457 	#ifdef CONFIG_PNP
 2458 	#endif 
 2459 	#ifdef CONFIG_PNP
 2460 	#else
 2461 	#endif
 2462 	#ifdef CONFIG_MMC_DEBUG
 2463 	#endif
 2464 	#ifdef CONFIG_PM
 2465 	#endif
 2466 	#ifdef CONFIG_PNP
 2467 	#endif 
 2468 	#ifdef CONFIG_PM
 2469 	#ifdef CONFIG_PNP
 2470 	/* LDV_COMMENT_END_PREP */
 2471 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_pnp_suspend" */
 2472 	pm_message_t  var_wbsd_pnp_suspend_54_p1;
 2473 	/* LDV_COMMENT_BEGIN_PREP */
 2474 	#endif 
 2475 	#else 
 2476 	#define wbsd_platform_suspend NULL
 2477 	#define wbsd_platform_resume NULL
 2478 	#define wbsd_pnp_suspend NULL
 2479 	#define wbsd_pnp_resume NULL
 2480 	#endif 
 2481 	#ifdef CONFIG_PNP
 2482 	#endif 
 2483 	#ifdef CONFIG_PNP
 2484 	#endif 
 2485 	#ifdef CONFIG_PNP
 2486 	#endif 
 2487 	#ifdef CONFIG_PNP
 2488 	#endif
 2489 	#ifdef CONFIG_PNP
 2490 	#endif
 2491 	/* LDV_COMMENT_END_PREP */
 2492 	/* content: static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)*/
 2493 	/* LDV_COMMENT_BEGIN_PREP */
 2494 	#define DRIVER_NAME "wbsd"
 2495 	#define DBG(x...) \
 2496 	pr_debug(DRIVER_NAME ": " x)
 2497 	#define DBGF(f, x...) \
 2498 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2499 	#ifdef CONFIG_PNP
 2500 	#endif 
 2501 	#ifdef CONFIG_PNP
 2502 	#else
 2503 	#endif
 2504 	#ifdef CONFIG_MMC_DEBUG
 2505 	#endif
 2506 	#ifdef CONFIG_PM
 2507 	#endif
 2508 	#ifdef CONFIG_PNP
 2509 	#endif 
 2510 	#ifdef CONFIG_PM
 2511 	#ifdef CONFIG_PNP
 2512 	/* LDV_COMMENT_END_PREP */
 2513 	/* LDV_COMMENT_BEGIN_PREP */
 2514 	#endif 
 2515 	#else 
 2516 	#define wbsd_platform_suspend NULL
 2517 	#define wbsd_platform_resume NULL
 2518 	#define wbsd_pnp_suspend NULL
 2519 	#define wbsd_pnp_resume NULL
 2520 	#endif 
 2521 	#ifdef CONFIG_PNP
 2522 	#endif 
 2523 	#ifdef CONFIG_PNP
 2524 	#endif 
 2525 	#ifdef CONFIG_PNP
 2526 	#endif 
 2527 	#ifdef CONFIG_PNP
 2528 	#endif
 2529 	#ifdef CONFIG_PNP
 2530 	#endif
 2531 	/* LDV_COMMENT_END_PREP */
 2532 
 2533 	/** CALLBACK SECTION request_irq **/
 2534 	/* content: static irqreturn_t wbsd_irq(int irq, void *dev_id)*/
 2535 	/* LDV_COMMENT_BEGIN_PREP */
 2536 	#define DRIVER_NAME "wbsd"
 2537 	#define DBG(x...) \
 2538 	pr_debug(DRIVER_NAME ": " x)
 2539 	#define DBGF(f, x...) \
 2540 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2541 	#ifdef CONFIG_PNP
 2542 	#endif 
 2543 	#ifdef CONFIG_PNP
 2544 	#else
 2545 	#endif
 2546 	#ifdef CONFIG_MMC_DEBUG
 2547 	#endif
 2548 	/* LDV_COMMENT_END_PREP */
 2549 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_irq" */
 2550 	int  var_wbsd_irq_31_p0;
 2551 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_irq" */
 2552 	void * var_wbsd_irq_31_p1;
 2553 	/* LDV_COMMENT_BEGIN_PREP */
 2554 	#ifdef CONFIG_PM
 2555 	#endif
 2556 	#ifdef CONFIG_PNP
 2557 	#endif 
 2558 	#ifdef CONFIG_PM
 2559 	#ifdef CONFIG_PNP
 2560 	#endif 
 2561 	#else 
 2562 	#define wbsd_platform_suspend NULL
 2563 	#define wbsd_platform_resume NULL
 2564 	#define wbsd_pnp_suspend NULL
 2565 	#define wbsd_pnp_resume NULL
 2566 	#endif 
 2567 	#ifdef CONFIG_PNP
 2568 	#endif 
 2569 	#ifdef CONFIG_PNP
 2570 	#endif 
 2571 	#ifdef CONFIG_PNP
 2572 	#endif 
 2573 	#ifdef CONFIG_PNP
 2574 	#endif
 2575 	#ifdef CONFIG_PNP
 2576 	#endif
 2577 	/* LDV_COMMENT_END_PREP */
 2578 
 2579 	/** TIMER SECTION timer **/
 2580 	/* content: static void wbsd_reset_ignore(unsigned long data)*/
 2581 	/* LDV_COMMENT_BEGIN_PREP */
 2582 	#define DRIVER_NAME "wbsd"
 2583 	#define DBG(x...) \
 2584 	pr_debug(DRIVER_NAME ": " x)
 2585 	#define DBGF(f, x...) \
 2586 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2587 	#ifdef CONFIG_PNP
 2588 	#endif 
 2589 	#ifdef CONFIG_PNP
 2590 	#else
 2591 	#endif
 2592 	#ifdef CONFIG_MMC_DEBUG
 2593 	#endif
 2594 	/* LDV_COMMENT_END_PREP */
 2595 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "wbsd_reset_ignore" */
 2596 	unsigned long  var_wbsd_reset_ignore_24_p0;
 2597 	/* LDV_COMMENT_BEGIN_PREP */
 2598 	#ifdef CONFIG_PM
 2599 	#endif
 2600 	#ifdef CONFIG_PNP
 2601 	#endif 
 2602 	#ifdef CONFIG_PM
 2603 	#ifdef CONFIG_PNP
 2604 	#endif 
 2605 	#else 
 2606 	#define wbsd_platform_suspend NULL
 2607 	#define wbsd_platform_resume NULL
 2608 	#define wbsd_pnp_suspend NULL
 2609 	#define wbsd_pnp_resume NULL
 2610 	#endif 
 2611 	#ifdef CONFIG_PNP
 2612 	#endif 
 2613 	#ifdef CONFIG_PNP
 2614 	#endif 
 2615 	#ifdef CONFIG_PNP
 2616 	#endif 
 2617 	#ifdef CONFIG_PNP
 2618 	#endif
 2619 	#ifdef CONFIG_PNP
 2620 	#endif
 2621 	/* LDV_COMMENT_END_PREP */
 2622 
 2623 
 2624 
 2625 
 2626 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 2627 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 2628 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 2629 	LDV_IN_INTERRUPT=1;
 2630 
 2631 
 2632 
 2633 
 2634 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 2635 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 2636 	/*============================= FUNCTION CALL SECTION       =============================*/
 2637 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 2638 	ldv_initialize();
 2639 
 2640 	/** INIT: init_type: ST_MODULE_INIT **/
 2641 	/* content: static int __init wbsd_drv_init(void)*/
 2642 	/* LDV_COMMENT_BEGIN_PREP */
 2643 	#define DRIVER_NAME "wbsd"
 2644 	#define DBG(x...) \
 2645 	pr_debug(DRIVER_NAME ": " x)
 2646 	#define DBGF(f, x...) \
 2647 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2648 	#ifdef CONFIG_PNP
 2649 	#endif 
 2650 	#ifdef CONFIG_PNP
 2651 	#else
 2652 	#endif
 2653 	#ifdef CONFIG_MMC_DEBUG
 2654 	#endif
 2655 	#ifdef CONFIG_PM
 2656 	#endif
 2657 	#ifdef CONFIG_PNP
 2658 	#endif 
 2659 	#ifdef CONFIG_PM
 2660 	#ifdef CONFIG_PNP
 2661 	#endif 
 2662 	#else 
 2663 	#define wbsd_platform_suspend NULL
 2664 	#define wbsd_platform_resume NULL
 2665 	#define wbsd_pnp_suspend NULL
 2666 	#define wbsd_pnp_resume NULL
 2667 	#endif 
 2668 	#ifdef CONFIG_PNP
 2669 	#endif 
 2670 	/* LDV_COMMENT_END_PREP */
 2671 	/* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
 2672 	ldv_handler_precall();
 2673 	 if(wbsd_drv_init()) 
 2674 		goto ldv_final;
 2675 	/* LDV_COMMENT_BEGIN_PREP */
 2676 	#ifdef CONFIG_PNP
 2677 	#endif 
 2678 	#ifdef CONFIG_PNP
 2679 	#endif
 2680 	#ifdef CONFIG_PNP
 2681 	#endif
 2682 	/* LDV_COMMENT_END_PREP */
 2683 	
 2684 
 2685 	int ldv_s_wbsd_driver_platform_driver = 0;
 2686 
 2687 	int ldv_s_wbsd_pnp_driver_pnp_driver = 0;
 2688 	
 2689 
 2690 	
 2691 
 2692 	
 2693 
 2694 
 2695 	while(  nondet_int()
 2696 		|| !(ldv_s_wbsd_driver_platform_driver == 0)
 2697 		|| !(ldv_s_wbsd_pnp_driver_pnp_driver == 0)
 2698 	) {
 2699 
 2700 		switch(nondet_int()) {
 2701 
 2702 			case 0: {
 2703 
 2704 				/** STRUCT: struct type: mmc_host_ops, struct name: wbsd_ops **/
 2705 				
 2706 
 2707 				/* content: static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)*/
 2708 				/* LDV_COMMENT_BEGIN_PREP */
 2709 				#define DRIVER_NAME "wbsd"
 2710 				#define DBG(x...) \
 2711 	pr_debug(DRIVER_NAME ": " x)
 2712 				#define DBGF(f, x...) \
 2713 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2714 				#ifdef CONFIG_PNP
 2715 				#endif 
 2716 				#ifdef CONFIG_PNP
 2717 				#else
 2718 				#endif
 2719 				/* LDV_COMMENT_END_PREP */
 2720 				/* LDV_COMMENT_FUNCTION_CALL Function from field "request" from driver structure with callbacks "wbsd_ops" */
 2721 				ldv_handler_precall();
 2722 				wbsd_request( var_group1, var_group2);
 2723 				/* LDV_COMMENT_BEGIN_PREP */
 2724 				#ifdef CONFIG_PM
 2725 				#endif
 2726 				#ifdef CONFIG_PNP
 2727 				#endif 
 2728 				#ifdef CONFIG_PM
 2729 				#ifdef CONFIG_PNP
 2730 				#endif 
 2731 				#else 
 2732 				#define wbsd_platform_suspend NULL
 2733 				#define wbsd_platform_resume NULL
 2734 				#define wbsd_pnp_suspend NULL
 2735 				#define wbsd_pnp_resume NULL
 2736 				#endif 
 2737 				#ifdef CONFIG_PNP
 2738 				#endif 
 2739 				#ifdef CONFIG_PNP
 2740 				#endif 
 2741 				#ifdef CONFIG_PNP
 2742 				#endif 
 2743 				#ifdef CONFIG_PNP
 2744 				#endif
 2745 				#ifdef CONFIG_PNP
 2746 				#endif
 2747 				/* LDV_COMMENT_END_PREP */
 2748 				
 2749 
 2750 				
 2751 
 2752 			}
 2753 
 2754 			break;
 2755 			case 1: {
 2756 
 2757 				/** STRUCT: struct type: mmc_host_ops, struct name: wbsd_ops **/
 2758 				
 2759 
 2760 				/* content: static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)*/
 2761 				/* LDV_COMMENT_BEGIN_PREP */
 2762 				#define DRIVER_NAME "wbsd"
 2763 				#define DBG(x...) \
 2764 	pr_debug(DRIVER_NAME ": " x)
 2765 				#define DBGF(f, x...) \
 2766 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2767 				#ifdef CONFIG_PNP
 2768 				#endif 
 2769 				#ifdef CONFIG_PNP
 2770 				#else
 2771 				#endif
 2772 				#ifdef CONFIG_MMC_DEBUG
 2773 				#endif
 2774 				/* LDV_COMMENT_END_PREP */
 2775 				/* LDV_COMMENT_FUNCTION_CALL Function from field "set_ios" from driver structure with callbacks "wbsd_ops" */
 2776 				ldv_handler_precall();
 2777 				wbsd_set_ios( var_group1, var_group3);
 2778 				/* LDV_COMMENT_BEGIN_PREP */
 2779 				#ifdef CONFIG_PM
 2780 				#endif
 2781 				#ifdef CONFIG_PNP
 2782 				#endif 
 2783 				#ifdef CONFIG_PM
 2784 				#ifdef CONFIG_PNP
 2785 				#endif 
 2786 				#else 
 2787 				#define wbsd_platform_suspend NULL
 2788 				#define wbsd_platform_resume NULL
 2789 				#define wbsd_pnp_suspend NULL
 2790 				#define wbsd_pnp_resume NULL
 2791 				#endif 
 2792 				#ifdef CONFIG_PNP
 2793 				#endif 
 2794 				#ifdef CONFIG_PNP
 2795 				#endif 
 2796 				#ifdef CONFIG_PNP
 2797 				#endif 
 2798 				#ifdef CONFIG_PNP
 2799 				#endif
 2800 				#ifdef CONFIG_PNP
 2801 				#endif
 2802 				/* LDV_COMMENT_END_PREP */
 2803 				
 2804 
 2805 				
 2806 
 2807 			}
 2808 
 2809 			break;
 2810 			case 2: {
 2811 
 2812 				/** STRUCT: struct type: mmc_host_ops, struct name: wbsd_ops **/
 2813 				
 2814 
 2815 				/* content: static int wbsd_get_ro(struct mmc_host *mmc)*/
 2816 				/* LDV_COMMENT_BEGIN_PREP */
 2817 				#define DRIVER_NAME "wbsd"
 2818 				#define DBG(x...) \
 2819 	pr_debug(DRIVER_NAME ": " x)
 2820 				#define DBGF(f, x...) \
 2821 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2822 				#ifdef CONFIG_PNP
 2823 				#endif 
 2824 				#ifdef CONFIG_PNP
 2825 				#else
 2826 				#endif
 2827 				#ifdef CONFIG_MMC_DEBUG
 2828 				#endif
 2829 				/* LDV_COMMENT_END_PREP */
 2830 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_ro" from driver structure with callbacks "wbsd_ops" */
 2831 				ldv_handler_precall();
 2832 				wbsd_get_ro( var_group1);
 2833 				/* LDV_COMMENT_BEGIN_PREP */
 2834 				#ifdef CONFIG_PM
 2835 				#endif
 2836 				#ifdef CONFIG_PNP
 2837 				#endif 
 2838 				#ifdef CONFIG_PM
 2839 				#ifdef CONFIG_PNP
 2840 				#endif 
 2841 				#else 
 2842 				#define wbsd_platform_suspend NULL
 2843 				#define wbsd_platform_resume NULL
 2844 				#define wbsd_pnp_suspend NULL
 2845 				#define wbsd_pnp_resume NULL
 2846 				#endif 
 2847 				#ifdef CONFIG_PNP
 2848 				#endif 
 2849 				#ifdef CONFIG_PNP
 2850 				#endif 
 2851 				#ifdef CONFIG_PNP
 2852 				#endif 
 2853 				#ifdef CONFIG_PNP
 2854 				#endif
 2855 				#ifdef CONFIG_PNP
 2856 				#endif
 2857 				/* LDV_COMMENT_END_PREP */
 2858 				
 2859 
 2860 				
 2861 
 2862 			}
 2863 
 2864 			break;
 2865 			case 3: {
 2866 
 2867 				/** STRUCT: struct type: platform_driver, struct name: wbsd_driver **/
 2868 				if(ldv_s_wbsd_driver_platform_driver==0) {
 2869 
 2870 				/* content: static int wbsd_probe(struct platform_device *dev)*/
 2871 				/* LDV_COMMENT_BEGIN_PREP */
 2872 				#define DRIVER_NAME "wbsd"
 2873 				#define DBG(x...) \
 2874 	pr_debug(DRIVER_NAME ": " x)
 2875 				#define DBGF(f, x...) \
 2876 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2877 				#ifdef CONFIG_PNP
 2878 				#endif 
 2879 				#ifdef CONFIG_PNP
 2880 				#else
 2881 				#endif
 2882 				#ifdef CONFIG_MMC_DEBUG
 2883 				#endif
 2884 				#ifdef CONFIG_PM
 2885 				#endif
 2886 				/* LDV_COMMENT_END_PREP */
 2887 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "wbsd_driver". Standart function test for correct return result. */
 2888 				res_wbsd_probe_48 = wbsd_probe( var_group4);
 2889 				 ldv_check_return_value(res_wbsd_probe_48);
 2890 				 ldv_check_return_value_probe(res_wbsd_probe_48);
 2891 				 if(res_wbsd_probe_48) 
 2892 					goto ldv_module_exit;
 2893 				/* LDV_COMMENT_BEGIN_PREP */
 2894 				#ifdef CONFIG_PNP
 2895 				#endif 
 2896 				#ifdef CONFIG_PM
 2897 				#ifdef CONFIG_PNP
 2898 				#endif 
 2899 				#else 
 2900 				#define wbsd_platform_suspend NULL
 2901 				#define wbsd_platform_resume NULL
 2902 				#define wbsd_pnp_suspend NULL
 2903 				#define wbsd_pnp_resume NULL
 2904 				#endif 
 2905 				#ifdef CONFIG_PNP
 2906 				#endif 
 2907 				#ifdef CONFIG_PNP
 2908 				#endif 
 2909 				#ifdef CONFIG_PNP
 2910 				#endif 
 2911 				#ifdef CONFIG_PNP
 2912 				#endif
 2913 				#ifdef CONFIG_PNP
 2914 				#endif
 2915 				/* LDV_COMMENT_END_PREP */
 2916 				ldv_s_wbsd_driver_platform_driver++;
 2917 
 2918 				}
 2919 
 2920 			}
 2921 
 2922 			break;
 2923 			case 4: {
 2924 
 2925 				/** STRUCT: struct type: platform_driver, struct name: wbsd_driver **/
 2926 				if(ldv_s_wbsd_driver_platform_driver==1) {
 2927 
 2928 				/* content: static int wbsd_platform_suspend(struct platform_device *dev, pm_message_t state)*/
 2929 				/* LDV_COMMENT_BEGIN_PREP */
 2930 				#define DRIVER_NAME "wbsd"
 2931 				#define DBG(x...) \
 2932 	pr_debug(DRIVER_NAME ": " x)
 2933 				#define DBGF(f, x...) \
 2934 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2935 				#ifdef CONFIG_PNP
 2936 				#endif 
 2937 				#ifdef CONFIG_PNP
 2938 				#else
 2939 				#endif
 2940 				#ifdef CONFIG_MMC_DEBUG
 2941 				#endif
 2942 				#ifdef CONFIG_PM
 2943 				#endif
 2944 				#ifdef CONFIG_PNP
 2945 				#endif 
 2946 				#ifdef CONFIG_PM
 2947 				/* LDV_COMMENT_END_PREP */
 2948 				/* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "wbsd_driver" */
 2949 				ldv_handler_precall();
 2950 				wbsd_platform_suspend( var_group4, var_wbsd_platform_suspend_52_p1);
 2951 				/* LDV_COMMENT_BEGIN_PREP */
 2952 				#ifdef CONFIG_PNP
 2953 				#endif 
 2954 				#else 
 2955 				#define wbsd_platform_suspend NULL
 2956 				#define wbsd_platform_resume NULL
 2957 				#define wbsd_pnp_suspend NULL
 2958 				#define wbsd_pnp_resume NULL
 2959 				#endif 
 2960 				#ifdef CONFIG_PNP
 2961 				#endif 
 2962 				#ifdef CONFIG_PNP
 2963 				#endif 
 2964 				#ifdef CONFIG_PNP
 2965 				#endif 
 2966 				#ifdef CONFIG_PNP
 2967 				#endif
 2968 				#ifdef CONFIG_PNP
 2969 				#endif
 2970 				/* LDV_COMMENT_END_PREP */
 2971 				ldv_s_wbsd_driver_platform_driver++;
 2972 
 2973 				}
 2974 
 2975 			}
 2976 
 2977 			break;
 2978 			case 5: {
 2979 
 2980 				/** STRUCT: struct type: platform_driver, struct name: wbsd_driver **/
 2981 				if(ldv_s_wbsd_driver_platform_driver==2) {
 2982 
 2983 				/* content: static int wbsd_platform_resume(struct platform_device *dev)*/
 2984 				/* LDV_COMMENT_BEGIN_PREP */
 2985 				#define DRIVER_NAME "wbsd"
 2986 				#define DBG(x...) \
 2987 	pr_debug(DRIVER_NAME ": " x)
 2988 				#define DBGF(f, x...) \
 2989 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 2990 				#ifdef CONFIG_PNP
 2991 				#endif 
 2992 				#ifdef CONFIG_PNP
 2993 				#else
 2994 				#endif
 2995 				#ifdef CONFIG_MMC_DEBUG
 2996 				#endif
 2997 				#ifdef CONFIG_PM
 2998 				#endif
 2999 				#ifdef CONFIG_PNP
 3000 				#endif 
 3001 				#ifdef CONFIG_PM
 3002 				/* LDV_COMMENT_END_PREP */
 3003 				/* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "wbsd_driver" */
 3004 				ldv_handler_precall();
 3005 				wbsd_platform_resume( var_group4);
 3006 				/* LDV_COMMENT_BEGIN_PREP */
 3007 				#ifdef CONFIG_PNP
 3008 				#endif 
 3009 				#else 
 3010 				#define wbsd_platform_suspend NULL
 3011 				#define wbsd_platform_resume NULL
 3012 				#define wbsd_pnp_suspend NULL
 3013 				#define wbsd_pnp_resume NULL
 3014 				#endif 
 3015 				#ifdef CONFIG_PNP
 3016 				#endif 
 3017 				#ifdef CONFIG_PNP
 3018 				#endif 
 3019 				#ifdef CONFIG_PNP
 3020 				#endif 
 3021 				#ifdef CONFIG_PNP
 3022 				#endif
 3023 				#ifdef CONFIG_PNP
 3024 				#endif
 3025 				/* LDV_COMMENT_END_PREP */
 3026 				ldv_s_wbsd_driver_platform_driver++;
 3027 
 3028 				}
 3029 
 3030 			}
 3031 
 3032 			break;
 3033 			case 6: {
 3034 
 3035 				/** STRUCT: struct type: platform_driver, struct name: wbsd_driver **/
 3036 				if(ldv_s_wbsd_driver_platform_driver==3) {
 3037 
 3038 				/* content: static int wbsd_remove(struct platform_device *dev)*/
 3039 				/* LDV_COMMENT_BEGIN_PREP */
 3040 				#define DRIVER_NAME "wbsd"
 3041 				#define DBG(x...) \
 3042 	pr_debug(DRIVER_NAME ": " x)
 3043 				#define DBGF(f, x...) \
 3044 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 3045 				#ifdef CONFIG_PNP
 3046 				#endif 
 3047 				#ifdef CONFIG_PNP
 3048 				#else
 3049 				#endif
 3050 				#ifdef CONFIG_MMC_DEBUG
 3051 				#endif
 3052 				#ifdef CONFIG_PM
 3053 				#endif
 3054 				/* LDV_COMMENT_END_PREP */
 3055 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "wbsd_driver" */
 3056 				ldv_handler_precall();
 3057 				wbsd_remove( var_group4);
 3058 				/* LDV_COMMENT_BEGIN_PREP */
 3059 				#ifdef CONFIG_PNP
 3060 				#endif 
 3061 				#ifdef CONFIG_PM
 3062 				#ifdef CONFIG_PNP
 3063 				#endif 
 3064 				#else 
 3065 				#define wbsd_platform_suspend NULL
 3066 				#define wbsd_platform_resume NULL
 3067 				#define wbsd_pnp_suspend NULL
 3068 				#define wbsd_pnp_resume NULL
 3069 				#endif 
 3070 				#ifdef CONFIG_PNP
 3071 				#endif 
 3072 				#ifdef CONFIG_PNP
 3073 				#endif 
 3074 				#ifdef CONFIG_PNP
 3075 				#endif 
 3076 				#ifdef CONFIG_PNP
 3077 				#endif
 3078 				#ifdef CONFIG_PNP
 3079 				#endif
 3080 				/* LDV_COMMENT_END_PREP */
 3081 				ldv_s_wbsd_driver_platform_driver=0;
 3082 
 3083 				}
 3084 
 3085 			}
 3086 
 3087 			break;
 3088 			case 7: {
 3089 
 3090 				/** STRUCT: struct type: pnp_driver, struct name: wbsd_pnp_driver **/
 3091 				if(ldv_s_wbsd_pnp_driver_pnp_driver==0) {
 3092 
 3093 				/* content: static int wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)*/
 3094 				/* LDV_COMMENT_BEGIN_PREP */
 3095 				#define DRIVER_NAME "wbsd"
 3096 				#define DBG(x...) \
 3097 	pr_debug(DRIVER_NAME ": " x)
 3098 				#define DBGF(f, x...) \
 3099 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 3100 				#ifdef CONFIG_PNP
 3101 				#endif 
 3102 				#ifdef CONFIG_PNP
 3103 				#else
 3104 				#endif
 3105 				#ifdef CONFIG_MMC_DEBUG
 3106 				#endif
 3107 				#ifdef CONFIG_PM
 3108 				#endif
 3109 				#ifdef CONFIG_PNP
 3110 				/* LDV_COMMENT_END_PREP */
 3111 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "wbsd_pnp_driver". Standart function test for correct return result. */
 3112 				res_wbsd_pnp_probe_50 = wbsd_pnp_probe( var_group5, var_wbsd_pnp_probe_50_p1);
 3113 				 ldv_check_return_value(res_wbsd_pnp_probe_50);
 3114 				 ldv_check_return_value_probe(res_wbsd_pnp_probe_50);
 3115 				 if(res_wbsd_pnp_probe_50) 
 3116 					goto ldv_module_exit;
 3117 				/* LDV_COMMENT_BEGIN_PREP */
 3118 				#endif 
 3119 				#ifdef CONFIG_PM
 3120 				#ifdef CONFIG_PNP
 3121 				#endif 
 3122 				#else 
 3123 				#define wbsd_platform_suspend NULL
 3124 				#define wbsd_platform_resume NULL
 3125 				#define wbsd_pnp_suspend NULL
 3126 				#define wbsd_pnp_resume NULL
 3127 				#endif 
 3128 				#ifdef CONFIG_PNP
 3129 				#endif 
 3130 				#ifdef CONFIG_PNP
 3131 				#endif 
 3132 				#ifdef CONFIG_PNP
 3133 				#endif 
 3134 				#ifdef CONFIG_PNP
 3135 				#endif
 3136 				#ifdef CONFIG_PNP
 3137 				#endif
 3138 				/* LDV_COMMENT_END_PREP */
 3139 				ldv_s_wbsd_pnp_driver_pnp_driver++;
 3140 
 3141 				}
 3142 
 3143 			}
 3144 
 3145 			break;
 3146 			case 8: {
 3147 
 3148 				/** STRUCT: struct type: pnp_driver, struct name: wbsd_pnp_driver **/
 3149 				if(ldv_s_wbsd_pnp_driver_pnp_driver==1) {
 3150 
 3151 				/* content: static void wbsd_pnp_remove(struct pnp_dev *dev)*/
 3152 				/* LDV_COMMENT_BEGIN_PREP */
 3153 				#define DRIVER_NAME "wbsd"
 3154 				#define DBG(x...) \
 3155 	pr_debug(DRIVER_NAME ": " x)
 3156 				#define DBGF(f, x...) \
 3157 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 3158 				#ifdef CONFIG_PNP
 3159 				#endif 
 3160 				#ifdef CONFIG_PNP
 3161 				#else
 3162 				#endif
 3163 				#ifdef CONFIG_MMC_DEBUG
 3164 				#endif
 3165 				#ifdef CONFIG_PM
 3166 				#endif
 3167 				#ifdef CONFIG_PNP
 3168 				/* LDV_COMMENT_END_PREP */
 3169 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "wbsd_pnp_driver" */
 3170 				ldv_handler_precall();
 3171 				wbsd_pnp_remove( var_group5);
 3172 				/* LDV_COMMENT_BEGIN_PREP */
 3173 				#endif 
 3174 				#ifdef CONFIG_PM
 3175 				#ifdef CONFIG_PNP
 3176 				#endif 
 3177 				#else 
 3178 				#define wbsd_platform_suspend NULL
 3179 				#define wbsd_platform_resume NULL
 3180 				#define wbsd_pnp_suspend NULL
 3181 				#define wbsd_pnp_resume NULL
 3182 				#endif 
 3183 				#ifdef CONFIG_PNP
 3184 				#endif 
 3185 				#ifdef CONFIG_PNP
 3186 				#endif 
 3187 				#ifdef CONFIG_PNP
 3188 				#endif 
 3189 				#ifdef CONFIG_PNP
 3190 				#endif
 3191 				#ifdef CONFIG_PNP
 3192 				#endif
 3193 				/* LDV_COMMENT_END_PREP */
 3194 				ldv_s_wbsd_pnp_driver_pnp_driver=0;
 3195 
 3196 				}
 3197 
 3198 			}
 3199 
 3200 			break;
 3201 			case 9: {
 3202 
 3203 				/** STRUCT: struct type: pnp_driver, struct name: wbsd_pnp_driver **/
 3204 				
 3205 
 3206 				/* content: static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)*/
 3207 				/* LDV_COMMENT_BEGIN_PREP */
 3208 				#define DRIVER_NAME "wbsd"
 3209 				#define DBG(x...) \
 3210 	pr_debug(DRIVER_NAME ": " x)
 3211 				#define DBGF(f, x...) \
 3212 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 3213 				#ifdef CONFIG_PNP
 3214 				#endif 
 3215 				#ifdef CONFIG_PNP
 3216 				#else
 3217 				#endif
 3218 				#ifdef CONFIG_MMC_DEBUG
 3219 				#endif
 3220 				#ifdef CONFIG_PM
 3221 				#endif
 3222 				#ifdef CONFIG_PNP
 3223 				#endif 
 3224 				#ifdef CONFIG_PM
 3225 				#ifdef CONFIG_PNP
 3226 				/* LDV_COMMENT_END_PREP */
 3227 				/* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "wbsd_pnp_driver" */
 3228 				ldv_handler_precall();
 3229 				wbsd_pnp_suspend( var_group5, var_wbsd_pnp_suspend_54_p1);
 3230 				/* LDV_COMMENT_BEGIN_PREP */
 3231 				#endif 
 3232 				#else 
 3233 				#define wbsd_platform_suspend NULL
 3234 				#define wbsd_platform_resume NULL
 3235 				#define wbsd_pnp_suspend NULL
 3236 				#define wbsd_pnp_resume NULL
 3237 				#endif 
 3238 				#ifdef CONFIG_PNP
 3239 				#endif 
 3240 				#ifdef CONFIG_PNP
 3241 				#endif 
 3242 				#ifdef CONFIG_PNP
 3243 				#endif 
 3244 				#ifdef CONFIG_PNP
 3245 				#endif
 3246 				#ifdef CONFIG_PNP
 3247 				#endif
 3248 				/* LDV_COMMENT_END_PREP */
 3249 				
 3250 
 3251 				
 3252 
 3253 			}
 3254 
 3255 			break;
 3256 			case 10: {
 3257 
 3258 				/** STRUCT: struct type: pnp_driver, struct name: wbsd_pnp_driver **/
 3259 				
 3260 
 3261 				/* content: static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)*/
 3262 				/* LDV_COMMENT_BEGIN_PREP */
 3263 				#define DRIVER_NAME "wbsd"
 3264 				#define DBG(x...) \
 3265 	pr_debug(DRIVER_NAME ": " x)
 3266 				#define DBGF(f, x...) \
 3267 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 3268 				#ifdef CONFIG_PNP
 3269 				#endif 
 3270 				#ifdef CONFIG_PNP
 3271 				#else
 3272 				#endif
 3273 				#ifdef CONFIG_MMC_DEBUG
 3274 				#endif
 3275 				#ifdef CONFIG_PM
 3276 				#endif
 3277 				#ifdef CONFIG_PNP
 3278 				#endif 
 3279 				#ifdef CONFIG_PM
 3280 				#ifdef CONFIG_PNP
 3281 				/* LDV_COMMENT_END_PREP */
 3282 				/* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "wbsd_pnp_driver" */
 3283 				ldv_handler_precall();
 3284 				wbsd_pnp_resume( var_group5);
 3285 				/* LDV_COMMENT_BEGIN_PREP */
 3286 				#endif 
 3287 				#else 
 3288 				#define wbsd_platform_suspend NULL
 3289 				#define wbsd_platform_resume NULL
 3290 				#define wbsd_pnp_suspend NULL
 3291 				#define wbsd_pnp_resume NULL
 3292 				#endif 
 3293 				#ifdef CONFIG_PNP
 3294 				#endif 
 3295 				#ifdef CONFIG_PNP
 3296 				#endif 
 3297 				#ifdef CONFIG_PNP
 3298 				#endif 
 3299 				#ifdef CONFIG_PNP
 3300 				#endif
 3301 				#ifdef CONFIG_PNP
 3302 				#endif
 3303 				/* LDV_COMMENT_END_PREP */
 3304 				
 3305 
 3306 				
 3307 
 3308 			}
 3309 
 3310 			break;
 3311 			case 11: {
 3312 
 3313 				/** CALLBACK SECTION request_irq **/
 3314 				LDV_IN_INTERRUPT=2;
 3315 
 3316 				/* content: static irqreturn_t wbsd_irq(int irq, void *dev_id)*/
 3317 				/* LDV_COMMENT_BEGIN_PREP */
 3318 				#define DRIVER_NAME "wbsd"
 3319 				#define DBG(x...) \
 3320 	pr_debug(DRIVER_NAME ": " x)
 3321 				#define DBGF(f, x...) \
 3322 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 3323 				#ifdef CONFIG_PNP
 3324 				#endif 
 3325 				#ifdef CONFIG_PNP
 3326 				#else
 3327 				#endif
 3328 				#ifdef CONFIG_MMC_DEBUG
 3329 				#endif
 3330 				/* LDV_COMMENT_END_PREP */
 3331 				/* LDV_COMMENT_FUNCTION_CALL */
 3332 				ldv_handler_precall();
 3333 				wbsd_irq( var_wbsd_irq_31_p0, var_wbsd_irq_31_p1);
 3334 				/* LDV_COMMENT_BEGIN_PREP */
 3335 				#ifdef CONFIG_PM
 3336 				#endif
 3337 				#ifdef CONFIG_PNP
 3338 				#endif 
 3339 				#ifdef CONFIG_PM
 3340 				#ifdef CONFIG_PNP
 3341 				#endif 
 3342 				#else 
 3343 				#define wbsd_platform_suspend NULL
 3344 				#define wbsd_platform_resume NULL
 3345 				#define wbsd_pnp_suspend NULL
 3346 				#define wbsd_pnp_resume NULL
 3347 				#endif 
 3348 				#ifdef CONFIG_PNP
 3349 				#endif 
 3350 				#ifdef CONFIG_PNP
 3351 				#endif 
 3352 				#ifdef CONFIG_PNP
 3353 				#endif 
 3354 				#ifdef CONFIG_PNP
 3355 				#endif
 3356 				#ifdef CONFIG_PNP
 3357 				#endif
 3358 				/* LDV_COMMENT_END_PREP */
 3359 				LDV_IN_INTERRUPT=1;
 3360 
 3361 				
 3362 
 3363 			}
 3364 
 3365 			break;
 3366 			case 12: {
 3367 
 3368 				/** TIMER SECTION timer **/
 3369 				
 3370 
 3371 				/* content: static void wbsd_reset_ignore(unsigned long data)*/
 3372 				/* LDV_COMMENT_BEGIN_PREP */
 3373 				#define DRIVER_NAME "wbsd"
 3374 				#define DBG(x...) \
 3375 	pr_debug(DRIVER_NAME ": " x)
 3376 				#define DBGF(f, x...) \
 3377 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 3378 				#ifdef CONFIG_PNP
 3379 				#endif 
 3380 				#ifdef CONFIG_PNP
 3381 				#else
 3382 				#endif
 3383 				#ifdef CONFIG_MMC_DEBUG
 3384 				#endif
 3385 				/* LDV_COMMENT_END_PREP */
 3386 				/* LDV_COMMENT_FUNCTION_CALL */
 3387 				ldv_handler_precall();
 3388 				wbsd_reset_ignore( var_wbsd_reset_ignore_24_p0);
 3389 				/* LDV_COMMENT_BEGIN_PREP */
 3390 				#ifdef CONFIG_PM
 3391 				#endif
 3392 				#ifdef CONFIG_PNP
 3393 				#endif 
 3394 				#ifdef CONFIG_PM
 3395 				#ifdef CONFIG_PNP
 3396 				#endif 
 3397 				#else 
 3398 				#define wbsd_platform_suspend NULL
 3399 				#define wbsd_platform_resume NULL
 3400 				#define wbsd_pnp_suspend NULL
 3401 				#define wbsd_pnp_resume NULL
 3402 				#endif 
 3403 				#ifdef CONFIG_PNP
 3404 				#endif 
 3405 				#ifdef CONFIG_PNP
 3406 				#endif 
 3407 				#ifdef CONFIG_PNP
 3408 				#endif 
 3409 				#ifdef CONFIG_PNP
 3410 				#endif
 3411 				#ifdef CONFIG_PNP
 3412 				#endif
 3413 				/* LDV_COMMENT_END_PREP */
 3414 				
 3415 
 3416 				
 3417 
 3418 			}
 3419 
 3420 			break;
 3421 			default: break;
 3422 
 3423 		}
 3424 
 3425 	}
 3426 
 3427 	ldv_module_exit: 
 3428 
 3429 	/** INIT: init_type: ST_MODULE_EXIT **/
 3430 	/* content: static void __exit wbsd_drv_exit(void)*/
 3431 	/* LDV_COMMENT_BEGIN_PREP */
 3432 	#define DRIVER_NAME "wbsd"
 3433 	#define DBG(x...) \
 3434 	pr_debug(DRIVER_NAME ": " x)
 3435 	#define DBGF(f, x...) \
 3436 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
 3437 	#ifdef CONFIG_PNP
 3438 	#endif 
 3439 	#ifdef CONFIG_PNP
 3440 	#else
 3441 	#endif
 3442 	#ifdef CONFIG_MMC_DEBUG
 3443 	#endif
 3444 	#ifdef CONFIG_PM
 3445 	#endif
 3446 	#ifdef CONFIG_PNP
 3447 	#endif 
 3448 	#ifdef CONFIG_PM
 3449 	#ifdef CONFIG_PNP
 3450 	#endif 
 3451 	#else 
 3452 	#define wbsd_platform_suspend NULL
 3453 	#define wbsd_platform_resume NULL
 3454 	#define wbsd_pnp_suspend NULL
 3455 	#define wbsd_pnp_resume NULL
 3456 	#endif 
 3457 	#ifdef CONFIG_PNP
 3458 	#endif 
 3459 	#ifdef CONFIG_PNP
 3460 	#endif 
 3461 	/* LDV_COMMENT_END_PREP */
 3462 	/* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
 3463 	ldv_handler_precall();
 3464 	wbsd_drv_exit();
 3465 	/* LDV_COMMENT_BEGIN_PREP */
 3466 	#ifdef CONFIG_PNP
 3467 	#endif
 3468 	#ifdef CONFIG_PNP
 3469 	#endif
 3470 	/* LDV_COMMENT_END_PREP */
 3471 
 3472 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 3473 	ldv_final: ldv_check_final_state();
 3474 
 3475 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 3476 	return;
 3477 
 3478 }
 3479 #endif
 3480 
 3481 /* LDV_COMMENT_END_MAIN */
 3482 
 3483 #line 10 "/home/vitaly/ldv-launches/work/current--X--drivers--X--defaultlinux-4.9-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.9-rc1.tar.xz/csd_deg_dscv/2264/dscv_tempdir/dscv/ri/331_1a/drivers/mmc/host/wbsd.o.c.prepared"                 1 
    2 #include <verifier/rcv.h>
    3 #include <kernel-model/ERR.inc>
    4 
    5 int LDV_DMA_MAP_CALLS = 0;
    6 
    7 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_page') maps page */
    8 void ldv_dma_map_page(void) {
    9  /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked */
   10  ldv_assert(LDV_DMA_MAP_CALLS == 0);
   11  /* LDV_COMMENT_CHANGE_STATE Increase dma_mapping counter */
   12  LDV_DMA_MAP_CALLS++;
   13 }
   14 
   15 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_mapping_error') unmaps page */
   16 void ldv_dma_mapping_error(void) {
   17  /* LDV_COMMENT_ASSERT No dma_mapping calls to verify */
   18  ldv_assert(LDV_DMA_MAP_CALLS != 0);
   19  /* LDV_COMMENT_CHANGE_STATE Check that previos dma_mapping call was checked */
   20  LDV_DMA_MAP_CALLS--;
   21 }
   22 
   23 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all module reference counters have their initial values at the end */
   24 void ldv_check_final_state(void) {
   25  /* LDV_COMMENT_ASSERT All incremented module reference counters should be decremented before module unloading*/
   26  ldv_assert(LDV_DMA_MAP_CALLS == 0);
   27 }                 1 #ifndef _LDV_RCV_H_
    2 #define _LDV_RCV_H_
    3 
    4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
    5    label like the standard assert(). */
    6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
    7 
    8 /* The error label wrapper. It is used because of some static verifiers (like
    9    BLAST) don't accept multiple error labels through a program. */
   10 static inline void ldv_error(void)
   11 {
   12   LDV_ERROR: goto LDV_ERROR;
   13 }
   14 
   15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
   16    avoided by verifiers. */
   17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
   18 
   19 /* Infinite loop, that causes verifiers to skip such paths. */
   20 static inline void ldv_stop(void) {
   21   LDV_STOP: goto LDV_STOP;
   22 }
   23 
   24 /* Special nondeterministic functions. */
   25 int ldv_undef_int(void);
   26 void *ldv_undef_ptr(void);
   27 unsigned long ldv_undef_ulong(void);
   28 long ldv_undef_long(void);
   29 /* Return nondeterministic negative integer number. */
   30 static inline int ldv_undef_int_negative(void)
   31 {
   32   int ret = ldv_undef_int();
   33 
   34   ldv_assume(ret < 0);
   35 
   36   return ret;
   37 }
   38 /* Return nondeterministic nonpositive integer number. */
   39 static inline int ldv_undef_int_nonpositive(void)
   40 {
   41   int ret = ldv_undef_int();
   42 
   43   ldv_assume(ret <= 0);
   44 
   45   return ret;
   46 }
   47 
   48 /* Add explicit model for __builin_expect GCC function. Without the model a
   49    return value will be treated as nondetermined by verifiers. */
   50 static inline long __builtin_expect(long exp, long c)
   51 {
   52   return exp;
   53 }
   54 
   55 /* This function causes the program to exit abnormally. GCC implements this
   56 function by using a target-dependent mechanism (such as intentionally executing
   57 an illegal instruction) or by calling abort. The mechanism used may vary from
   58 release to release so you should not rely on any particular implementation.
   59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
   60 static inline void __builtin_trap(void)
   61 {
   62   ldv_assert(0);
   63 }
   64 
   65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
   66 #define LDV_PTR_MAX 2012
   67 
   68 #endif /* _LDV_RCV_H_ */                 1 /*
    2  * device.h - generic, centralized driver model
    3  *
    4  * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
    5  * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
    6  * Copyright (c) 2008-2009 Novell Inc.
    7  *
    8  * This file is released under the GPLv2
    9  *
   10  * See Documentation/driver-model/ for more information.
   11  */
   12 
   13 #ifndef _DEVICE_H_
   14 #define _DEVICE_H_
   15 
   16 #include <linux/ioport.h>
   17 #include <linux/kobject.h>
   18 #include <linux/klist.h>
   19 #include <linux/list.h>
   20 #include <linux/lockdep.h>
   21 #include <linux/compiler.h>
   22 #include <linux/types.h>
   23 #include <linux/mutex.h>
   24 #include <linux/pinctrl/devinfo.h>
   25 #include <linux/pm.h>
   26 #include <linux/atomic.h>
   27 #include <linux/ratelimit.h>
   28 #include <linux/uidgid.h>
   29 #include <linux/gfp.h>
   30 #include <asm/device.h>
   31 
   32 struct device;
   33 struct device_private;
   34 struct device_driver;
   35 struct driver_private;
   36 struct module;
   37 struct class;
   38 struct subsys_private;
   39 struct bus_type;
   40 struct device_node;
   41 struct fwnode_handle;
   42 struct iommu_ops;
   43 struct iommu_group;
   44 struct iommu_fwspec;
   45 
   46 struct bus_attribute {
   47 	struct attribute	attr;
   48 	ssize_t (*show)(struct bus_type *bus, char *buf);
   49 	ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
   50 };
   51 
   52 #define BUS_ATTR(_name, _mode, _show, _store)	\
   53 	struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
   54 #define BUS_ATTR_RW(_name) \
   55 	struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
   56 #define BUS_ATTR_RO(_name) \
   57 	struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
   58 
   59 extern int __must_check bus_create_file(struct bus_type *,
   60 					struct bus_attribute *);
   61 extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
   62 
   63 /**
   64  * struct bus_type - The bus type of the device
   65  *
   66  * @name:	The name of the bus.
   67  * @dev_name:	Used for subsystems to enumerate devices like ("foo%u", dev->id).
   68  * @dev_root:	Default device to use as the parent.
   69  * @dev_attrs:	Default attributes of the devices on the bus.
   70  * @bus_groups:	Default attributes of the bus.
   71  * @dev_groups:	Default attributes of the devices on the bus.
   72  * @drv_groups: Default attributes of the device drivers on the bus.
   73  * @match:	Called, perhaps multiple times, whenever a new device or driver
   74  *		is added for this bus. It should return a positive value if the
   75  *		given device can be handled by the given driver and zero
   76  *		otherwise. It may also return error code if determining that
   77  *		the driver supports the device is not possible. In case of
   78  *		-EPROBE_DEFER it will queue the device for deferred probing.
   79  * @uevent:	Called when a device is added, removed, or a few other things
   80  *		that generate uevents to add the environment variables.
   81  * @probe:	Called when a new device or driver add to this bus, and callback
   82  *		the specific driver's probe to initial the matched device.
   83  * @remove:	Called when a device removed from this bus.
   84  * @shutdown:	Called at shut-down time to quiesce the device.
   85  *
   86  * @online:	Called to put the device back online (after offlining it).
   87  * @offline:	Called to put the device offline for hot-removal. May fail.
   88  *
   89  * @suspend:	Called when a device on this bus wants to go to sleep mode.
   90  * @resume:	Called to bring a device on this bus out of sleep mode.
   91  * @pm:		Power management operations of this bus, callback the specific
   92  *		device driver's pm-ops.
   93  * @iommu_ops:  IOMMU specific operations for this bus, used to attach IOMMU
   94  *              driver implementations to a bus and allow the driver to do
   95  *              bus-specific setup
   96  * @p:		The private data of the driver core, only the driver core can
   97  *		touch this.
   98  * @lock_key:	Lock class key for use by the lock validator
   99  *
  100  * A bus is a channel between the processor and one or more devices. For the
  101  * purposes of the device model, all devices are connected via a bus, even if
  102  * it is an internal, virtual, "platform" bus. Buses can plug into each other.
  103  * A USB controller is usually a PCI device, for example. The device model
  104  * represents the actual connections between buses and the devices they control.
  105  * A bus is represented by the bus_type structure. It contains the name, the
  106  * default attributes, the bus' methods, PM operations, and the driver core's
  107  * private data.
  108  */
  109 struct bus_type {
  110 	const char		*name;
  111 	const char		*dev_name;
  112 	struct device		*dev_root;
  113 	struct device_attribute	*dev_attrs;	/* use dev_groups instead */
  114 	const struct attribute_group **bus_groups;
  115 	const struct attribute_group **dev_groups;
  116 	const struct attribute_group **drv_groups;
  117 
  118 	int (*match)(struct device *dev, struct device_driver *drv);
  119 	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
  120 	int (*probe)(struct device *dev);
  121 	int (*remove)(struct device *dev);
  122 	void (*shutdown)(struct device *dev);
  123 
  124 	int (*online)(struct device *dev);
  125 	int (*offline)(struct device *dev);
  126 
  127 	int (*suspend)(struct device *dev, pm_message_t state);
  128 	int (*resume)(struct device *dev);
  129 
  130 	const struct dev_pm_ops *pm;
  131 
  132 	const struct iommu_ops *iommu_ops;
  133 
  134 	struct subsys_private *p;
  135 	struct lock_class_key lock_key;
  136 };
  137 
  138 extern int __must_check bus_register(struct bus_type *bus);
  139 
  140 extern void bus_unregister(struct bus_type *bus);
  141 
  142 extern int __must_check bus_rescan_devices(struct bus_type *bus);
  143 
  144 /* iterator helpers for buses */
  145 struct subsys_dev_iter {
  146 	struct klist_iter		ki;
  147 	const struct device_type	*type;
  148 };
  149 void subsys_dev_iter_init(struct subsys_dev_iter *iter,
  150 			 struct bus_type *subsys,
  151 			 struct device *start,
  152 			 const struct device_type *type);
  153 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
  154 void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
  155 
  156 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
  157 		     int (*fn)(struct device *dev, void *data));
  158 struct device *bus_find_device(struct bus_type *bus, struct device *start,
  159 			       void *data,
  160 			       int (*match)(struct device *dev, void *data));
  161 struct device *bus_find_device_by_name(struct bus_type *bus,
  162 				       struct device *start,
  163 				       const char *name);
  164 struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
  165 					struct device *hint);
  166 int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
  167 		     void *data, int (*fn)(struct device_driver *, void *));
  168 void bus_sort_breadthfirst(struct bus_type *bus,
  169 			   int (*compare)(const struct device *a,
  170 					  const struct device *b));
  171 /*
  172  * Bus notifiers: Get notified of addition/removal of devices
  173  * and binding/unbinding of drivers to devices.
  174  * In the long run, it should be a replacement for the platform
  175  * notify hooks.
  176  */
  177 struct notifier_block;
  178 
  179 extern int bus_register_notifier(struct bus_type *bus,
  180 				 struct notifier_block *nb);
  181 extern int bus_unregister_notifier(struct bus_type *bus,
  182 				   struct notifier_block *nb);
  183 
  184 /* All 4 notifers below get called with the target struct device *
  185  * as an argument. Note that those functions are likely to be called
  186  * with the device lock held in the core, so be careful.
  187  */
  188 #define BUS_NOTIFY_ADD_DEVICE		0x00000001 /* device added */
  189 #define BUS_NOTIFY_DEL_DEVICE		0x00000002 /* device to be removed */
  190 #define BUS_NOTIFY_REMOVED_DEVICE	0x00000003 /* device removed */
  191 #define BUS_NOTIFY_BIND_DRIVER		0x00000004 /* driver about to be
  192 						      bound */
  193 #define BUS_NOTIFY_BOUND_DRIVER		0x00000005 /* driver bound to device */
  194 #define BUS_NOTIFY_UNBIND_DRIVER	0x00000006 /* driver about to be
  195 						      unbound */
  196 #define BUS_NOTIFY_UNBOUND_DRIVER	0x00000007 /* driver is unbound
  197 						      from the device */
  198 #define BUS_NOTIFY_DRIVER_NOT_BOUND	0x00000008 /* driver fails to be bound */
  199 
  200 extern struct kset *bus_get_kset(struct bus_type *bus);
  201 extern struct klist *bus_get_device_klist(struct bus_type *bus);
  202 
  203 /**
  204  * enum probe_type - device driver probe type to try
  205  *	Device drivers may opt in for special handling of their
  206  *	respective probe routines. This tells the core what to
  207  *	expect and prefer.
  208  *
  209  * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
  210  *	whether probed synchronously or asynchronously.
  211  * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
  212  *	probing order is not essential for booting the system may
  213  *	opt into executing their probes asynchronously.
  214  * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
  215  *	their probe routines to run synchronously with driver and
  216  *	device registration (with the exception of -EPROBE_DEFER
  217  *	handling - re-probing always ends up being done asynchronously).
  218  *
  219  * Note that the end goal is to switch the kernel to use asynchronous
  220  * probing by default, so annotating drivers with
  221  * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
  222  * to speed up boot process while we are validating the rest of the
  223  * drivers.
  224  */
  225 enum probe_type {
  226 	PROBE_DEFAULT_STRATEGY,
  227 	PROBE_PREFER_ASYNCHRONOUS,
  228 	PROBE_FORCE_SYNCHRONOUS,
  229 };
  230 
  231 /**
  232  * struct device_driver - The basic device driver structure
  233  * @name:	Name of the device driver.
  234  * @bus:	The bus which the device of this driver belongs to.
  235  * @owner:	The module owner.
  236  * @mod_name:	Used for built-in modules.
  237  * @suppress_bind_attrs: Disables bind/unbind via sysfs.
  238  * @probe_type:	Type of the probe (synchronous or asynchronous) to use.
  239  * @of_match_table: The open firmware table.
  240  * @acpi_match_table: The ACPI match table.
  241  * @probe:	Called to query the existence of a specific device,
  242  *		whether this driver can work with it, and bind the driver
  243  *		to a specific device.
  244  * @remove:	Called when the device is removed from the system to
  245  *		unbind a device from this driver.
  246  * @shutdown:	Called at shut-down time to quiesce the device.
  247  * @suspend:	Called to put the device to sleep mode. Usually to a
  248  *		low power state.
  249  * @resume:	Called to bring a device from sleep mode.
  250  * @groups:	Default attributes that get created by the driver core
  251  *		automatically.
  252  * @pm:		Power management operations of the device which matched
  253  *		this driver.
  254  * @p:		Driver core's private data, no one other than the driver
  255  *		core can touch this.
  256  *
  257  * The device driver-model tracks all of the drivers known to the system.
  258  * The main reason for this tracking is to enable the driver core to match
  259  * up drivers with new devices. Once drivers are known objects within the
  260  * system, however, a number of other things become possible. Device drivers
  261  * can export information and configuration variables that are independent
  262  * of any specific device.
  263  */
  264 struct device_driver {
  265 	const char		*name;
  266 	struct bus_type		*bus;
  267 
  268 	struct module		*owner;
  269 	const char		*mod_name;	/* used for built-in modules */
  270 
  271 	bool suppress_bind_attrs;	/* disables bind/unbind via sysfs */
  272 	enum probe_type probe_type;
  273 
  274 	const struct of_device_id	*of_match_table;
  275 	const struct acpi_device_id	*acpi_match_table;
  276 
  277 	int (*probe) (struct device *dev);
  278 	int (*remove) (struct device *dev);
  279 	void (*shutdown) (struct device *dev);
  280 	int (*suspend) (struct device *dev, pm_message_t state);
  281 	int (*resume) (struct device *dev);
  282 	const struct attribute_group **groups;
  283 
  284 	const struct dev_pm_ops *pm;
  285 
  286 	struct driver_private *p;
  287 };
  288 
  289 
  290 extern int __must_check driver_register(struct device_driver *drv);
  291 extern void driver_unregister(struct device_driver *drv);
  292 
  293 extern struct device_driver *driver_find(const char *name,
  294 					 struct bus_type *bus);
  295 extern int driver_probe_done(void);
  296 extern void wait_for_device_probe(void);
  297 
  298 
  299 /* sysfs interface for exporting driver attributes */
  300 
  301 struct driver_attribute {
  302 	struct attribute attr;
  303 	ssize_t (*show)(struct device_driver *driver, char *buf);
  304 	ssize_t (*store)(struct device_driver *driver, const char *buf,
  305 			 size_t count);
  306 };
  307 
  308 #define DRIVER_ATTR(_name, _mode, _show, _store) \
  309 	struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
  310 #define DRIVER_ATTR_RW(_name) \
  311 	struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
  312 #define DRIVER_ATTR_RO(_name) \
  313 	struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
  314 #define DRIVER_ATTR_WO(_name) \
  315 	struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
  316 
  317 extern int __must_check driver_create_file(struct device_driver *driver,
  318 					const struct driver_attribute *attr);
  319 extern void driver_remove_file(struct device_driver *driver,
  320 			       const struct driver_attribute *attr);
  321 
  322 extern int __must_check driver_for_each_device(struct device_driver *drv,
  323 					       struct device *start,
  324 					       void *data,
  325 					       int (*fn)(struct device *dev,
  326 							 void *));
  327 struct device *driver_find_device(struct device_driver *drv,
  328 				  struct device *start, void *data,
  329 				  int (*match)(struct device *dev, void *data));
  330 
  331 /**
  332  * struct subsys_interface - interfaces to device functions
  333  * @name:       name of the device function
  334  * @subsys:     subsytem of the devices to attach to
  335  * @node:       the list of functions registered at the subsystem
  336  * @add_dev:    device hookup to device function handler
  337  * @remove_dev: device hookup to device function handler
  338  *
  339  * Simple interfaces attached to a subsystem. Multiple interfaces can
  340  * attach to a subsystem and its devices. Unlike drivers, they do not
  341  * exclusively claim or control devices. Interfaces usually represent
  342  * a specific functionality of a subsystem/class of devices.
  343  */
  344 struct subsys_interface {
  345 	const char *name;
  346 	struct bus_type *subsys;
  347 	struct list_head node;
  348 	int (*add_dev)(struct device *dev, struct subsys_interface *sif);
  349 	void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
  350 };
  351 
  352 int subsys_interface_register(struct subsys_interface *sif);
  353 void subsys_interface_unregister(struct subsys_interface *sif);
  354 
  355 int subsys_system_register(struct bus_type *subsys,
  356 			   const struct attribute_group **groups);
  357 int subsys_virtual_register(struct bus_type *subsys,
  358 			    const struct attribute_group **groups);
  359 
  360 /**
  361  * struct class - device classes
  362  * @name:	Name of the class.
  363  * @owner:	The module owner.
  364  * @class_attrs: Default attributes of this class.
  365  * @dev_groups:	Default attributes of the devices that belong to the class.
  366  * @dev_kobj:	The kobject that represents this class and links it into the hierarchy.
  367  * @dev_uevent:	Called when a device is added, removed from this class, or a
  368  *		few other things that generate uevents to add the environment
  369  *		variables.
  370  * @devnode:	Callback to provide the devtmpfs.
  371  * @class_release: Called to release this class.
  372  * @dev_release: Called to release the device.
  373  * @suspend:	Used to put the device to sleep mode, usually to a low power
  374  *		state.
  375  * @resume:	Used to bring the device from the sleep mode.
  376  * @ns_type:	Callbacks so sysfs can detemine namespaces.
  377  * @namespace:	Namespace of the device belongs to this class.
  378  * @pm:		The default device power management operations of this class.
  379  * @p:		The private data of the driver core, no one other than the
  380  *		driver core can touch this.
  381  *
  382  * A class is a higher-level view of a device that abstracts out low-level
  383  * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
  384  * at the class level, they are all simply disks. Classes allow user space
  385  * to work with devices based on what they do, rather than how they are
  386  * connected or how they work.
  387  */
  388 struct class {
  389 	const char		*name;
  390 	struct module		*owner;
  391 
  392 	struct class_attribute		*class_attrs;
  393 	const struct attribute_group	**dev_groups;
  394 	struct kobject			*dev_kobj;
  395 
  396 	int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
  397 	char *(*devnode)(struct device *dev, umode_t *mode);
  398 
  399 	void (*class_release)(struct class *class);
  400 	void (*dev_release)(struct device *dev);
  401 
  402 	int (*suspend)(struct device *dev, pm_message_t state);
  403 	int (*resume)(struct device *dev);
  404 
  405 	const struct kobj_ns_type_operations *ns_type;
  406 	const void *(*namespace)(struct device *dev);
  407 
  408 	const struct dev_pm_ops *pm;
  409 
  410 	struct subsys_private *p;
  411 };
  412 
  413 struct class_dev_iter {
  414 	struct klist_iter		ki;
  415 	const struct device_type	*type;
  416 };
  417 
  418 extern struct kobject *sysfs_dev_block_kobj;
  419 extern struct kobject *sysfs_dev_char_kobj;
  420 extern int __must_check __class_register(struct class *class,
  421 					 struct lock_class_key *key);
  422 extern void class_unregister(struct class *class);
  423 
  424 /* This is a #define to keep the compiler from merging different
  425  * instances of the __key variable */
  426 #define class_register(class)			\
  427 ({						\
  428 	static struct lock_class_key __key;	\
  429 	__class_register(class, &__key);	\
  430 })
  431 
  432 struct class_compat;
  433 struct class_compat *class_compat_register(const char *name);
  434 void class_compat_unregister(struct class_compat *cls);
  435 int class_compat_create_link(struct class_compat *cls, struct device *dev,
  436 			     struct device *device_link);
  437 void class_compat_remove_link(struct class_compat *cls, struct device *dev,
  438 			      struct device *device_link);
  439 
  440 extern void class_dev_iter_init(struct class_dev_iter *iter,
  441 				struct class *class,
  442 				struct device *start,
  443 				const struct device_type *type);
  444 extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
  445 extern void class_dev_iter_exit(struct class_dev_iter *iter);
  446 
  447 extern int class_for_each_device(struct class *class, struct device *start,
  448 				 void *data,
  449 				 int (*fn)(struct device *dev, void *data));
  450 extern struct device *class_find_device(struct class *class,
  451 					struct device *start, const void *data,
  452 					int (*match)(struct device *, const void *));
  453 
  454 struct class_attribute {
  455 	struct attribute attr;
  456 	ssize_t (*show)(struct class *class, struct class_attribute *attr,
  457 			char *buf);
  458 	ssize_t (*store)(struct class *class, struct class_attribute *attr,
  459 			const char *buf, size_t count);
  460 };
  461 
  462 #define CLASS_ATTR(_name, _mode, _show, _store) \
  463 	struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store)
  464 #define CLASS_ATTR_RW(_name) \
  465 	struct class_attribute class_attr_##_name = __ATTR_RW(_name)
  466 #define CLASS_ATTR_RO(_name) \
  467 	struct class_attribute class_attr_##_name = __ATTR_RO(_name)
  468 
  469 extern int __must_check class_create_file_ns(struct class *class,
  470 					     const struct class_attribute *attr,
  471 					     const void *ns);
  472 extern void class_remove_file_ns(struct class *class,
  473 				 const struct class_attribute *attr,
  474 				 const void *ns);
  475 
  476 static inline int __must_check class_create_file(struct class *class,
  477 					const struct class_attribute *attr)
  478 {
  479 	return class_create_file_ns(class, attr, NULL);
  480 }
  481 
  482 static inline void class_remove_file(struct class *class,
  483 				     const struct class_attribute *attr)
  484 {
  485 	return class_remove_file_ns(class, attr, NULL);
  486 }
  487 
  488 /* Simple class attribute that is just a static string */
  489 struct class_attribute_string {
  490 	struct class_attribute attr;
  491 	char *str;
  492 };
  493 
  494 /* Currently read-only only */
  495 #define _CLASS_ATTR_STRING(_name, _mode, _str) \
  496 	{ __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
  497 #define CLASS_ATTR_STRING(_name, _mode, _str) \
  498 	struct class_attribute_string class_attr_##_name = \
  499 		_CLASS_ATTR_STRING(_name, _mode, _str)
  500 
  501 extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
  502                         char *buf);
  503 
  504 struct class_interface {
  505 	struct list_head	node;
  506 	struct class		*class;
  507 
  508 	int (*add_dev)		(struct device *, struct class_interface *);
  509 	void (*remove_dev)	(struct device *, struct class_interface *);
  510 };
  511 
  512 extern int __must_check class_interface_register(struct class_interface *);
  513 extern void class_interface_unregister(struct class_interface *);
  514 
  515 extern struct class * __must_check __class_create(struct module *owner,
  516 						  const char *name,
  517 						  struct lock_class_key *key);
  518 extern void class_destroy(struct class *cls);
  519 
  520 /* This is a #define to keep the compiler from merging different
  521  * instances of the __key variable */
  522 #define class_create(owner, name)		\
  523 ({						\
  524 	static struct lock_class_key __key;	\
  525 	__class_create(owner, name, &__key);	\
  526 })
  527 
  528 /*
  529  * The type of device, "struct device" is embedded in. A class
  530  * or bus can contain devices of different types
  531  * like "partitions" and "disks", "mouse" and "event".
  532  * This identifies the device type and carries type-specific
  533  * information, equivalent to the kobj_type of a kobject.
  534  * If "name" is specified, the uevent will contain it in
  535  * the DEVTYPE variable.
  536  */
  537 struct device_type {
  538 	const char *name;
  539 	const struct attribute_group **groups;
  540 	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
  541 	char *(*devnode)(struct device *dev, umode_t *mode,
  542 			 kuid_t *uid, kgid_t *gid);
  543 	void (*release)(struct device *dev);
  544 
  545 	const struct dev_pm_ops *pm;
  546 };
  547 
  548 /* interface for exporting device attributes */
  549 struct device_attribute {
  550 	struct attribute	attr;
  551 	ssize_t (*show)(struct device *dev, struct device_attribute *attr,
  552 			char *buf);
  553 	ssize_t (*store)(struct device *dev, struct device_attribute *attr,
  554 			 const char *buf, size_t count);
  555 };
  556 
  557 struct dev_ext_attribute {
  558 	struct device_attribute attr;
  559 	void *var;
  560 };
  561 
  562 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
  563 			  char *buf);
  564 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
  565 			   const char *buf, size_t count);
  566 ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
  567 			char *buf);
  568 ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
  569 			 const char *buf, size_t count);
  570 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
  571 			char *buf);
  572 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
  573 			 const char *buf, size_t count);
  574 
  575 #define DEVICE_ATTR(_name, _mode, _show, _store) \
  576 	struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
  577 #define DEVICE_ATTR_RW(_name) \
  578 	struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
  579 #define DEVICE_ATTR_RO(_name) \
  580 	struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
  581 #define DEVICE_ATTR_WO(_name) \
  582 	struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
  583 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \
  584 	struct dev_ext_attribute dev_attr_##_name = \
  585 		{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
  586 #define DEVICE_INT_ATTR(_name, _mode, _var) \
  587 	struct dev_ext_attribute dev_attr_##_name = \
  588 		{ __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
  589 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \
  590 	struct dev_ext_attribute dev_attr_##_name = \
  591 		{ __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
  592 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
  593 	struct device_attribute dev_attr_##_name =		\
  594 		__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
  595 
  596 extern int device_create_file(struct device *device,
  597 			      const struct device_attribute *entry);
  598 extern void device_remove_file(struct device *dev,
  599 			       const struct device_attribute *attr);
  600 extern bool device_remove_file_self(struct device *dev,
  601 				    const struct device_attribute *attr);
  602 extern int __must_check device_create_bin_file(struct device *dev,
  603 					const struct bin_attribute *attr);
  604 extern void device_remove_bin_file(struct device *dev,
  605 				   const struct bin_attribute *attr);
  606 
  607 /* device resource management */
  608 typedef void (*dr_release_t)(struct device *dev, void *res);
  609 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
  610 
  611 #ifdef CONFIG_DEBUG_DEVRES
  612 extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
  613 				 int nid, const char *name) __malloc;
  614 #define devres_alloc(release, size, gfp) \
  615 	__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
  616 #define devres_alloc_node(release, size, gfp, nid) \
  617 	__devres_alloc_node(release, size, gfp, nid, #release)
  618 #else
  619 extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
  620 			       int nid) __malloc;
  621 static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
  622 {
  623 	return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
  624 }
  625 #endif
  626 
  627 extern void devres_for_each_res(struct device *dev, dr_release_t release,
  628 				dr_match_t match, void *match_data,
  629 				void (*fn)(struct device *, void *, void *),
  630 				void *data);
  631 extern void devres_free(void *res);
  632 extern void devres_add(struct device *dev, void *res);
  633 extern void *devres_find(struct device *dev, dr_release_t release,
  634 			 dr_match_t match, void *match_data);
  635 extern void *devres_get(struct device *dev, void *new_res,
  636 			dr_match_t match, void *match_data);
  637 extern void *devres_remove(struct device *dev, dr_release_t release,
  638 			   dr_match_t match, void *match_data);
  639 extern int devres_destroy(struct device *dev, dr_release_t release,
  640 			  dr_match_t match, void *match_data);
  641 extern int devres_release(struct device *dev, dr_release_t release,
  642 			  dr_match_t match, void *match_data);
  643 
  644 /* devres group */
  645 extern void * __must_check devres_open_group(struct device *dev, void *id,
  646 					     gfp_t gfp);
  647 extern void devres_close_group(struct device *dev, void *id);
  648 extern void devres_remove_group(struct device *dev, void *id);
  649 extern int devres_release_group(struct device *dev, void *id);
  650 
  651 /* managed devm_k.alloc/kfree for device drivers */
  652 extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
  653 extern __printf(3, 0)
  654 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
  655 		      va_list ap) __malloc;
  656 extern __printf(3, 4)
  657 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc;
  658 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
  659 {
  660 	return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
  661 }
  662 static inline void *devm_kmalloc_array(struct device *dev,
  663 				       size_t n, size_t size, gfp_t flags)
  664 {
  665 	if (size != 0 && n > SIZE_MAX / size)
  666 		return NULL;
  667 	return devm_kmalloc(dev, n * size, flags);
  668 }
  669 static inline void *devm_kcalloc(struct device *dev,
  670 				 size_t n, size_t size, gfp_t flags)
  671 {
  672 	return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
  673 }
  674 extern void devm_kfree(struct device *dev, void *p);
  675 extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
  676 extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
  677 			  gfp_t gfp);
  678 
  679 extern unsigned long devm_get_free_pages(struct device *dev,
  680 					 gfp_t gfp_mask, unsigned int order);
  681 extern void devm_free_pages(struct device *dev, unsigned long addr);
  682 
  683 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
  684 
  685 /* allows to add/remove a custom action to devres stack */
  686 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
  687 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
  688 
  689 static inline int devm_add_action_or_reset(struct device *dev,
  690 					   void (*action)(void *), void *data)
  691 {
  692 	int ret;
  693 
  694 	ret = devm_add_action(dev, action, data);
  695 	if (ret)
  696 		action(data);
  697 
  698 	return ret;
  699 }
  700 
  701 struct device_dma_parameters {
  702 	/*
  703 	 * a low level driver may set these to teach IOMMU code about
  704 	 * sg limitations.
  705 	 */
  706 	unsigned int max_segment_size;
  707 	unsigned long segment_boundary_mask;
  708 };
  709 
  710 /**
  711  * struct device - The basic device structure
  712  * @parent:	The device's "parent" device, the device to which it is attached.
  713  * 		In most cases, a parent device is some sort of bus or host
  714  * 		controller. If parent is NULL, the device, is a top-level device,
  715  * 		which is not usually what you want.
  716  * @p:		Holds the private data of the driver core portions of the device.
  717  * 		See the comment of the struct device_private for detail.
  718  * @kobj:	A top-level, abstract class from which other classes are derived.
  719  * @init_name:	Initial name of the device.
  720  * @type:	The type of device.
  721  * 		This identifies the device type and carries type-specific
  722  * 		information.
  723  * @mutex:	Mutex to synchronize calls to its driver.
  724  * @bus:	Type of bus device is on.
  725  * @driver:	Which driver has allocated this
  726  * @platform_data: Platform data specific to the device.
  727  * 		Example: For devices on custom boards, as typical of embedded
  728  * 		and SOC based hardware, Linux often uses platform_data to point
  729  * 		to board-specific structures describing devices and how they
  730  * 		are wired.  That can include what ports are available, chip
  731  * 		variants, which GPIO pins act in what additional roles, and so
  732  * 		on.  This shrinks the "Board Support Packages" (BSPs) and
  733  * 		minimizes board-specific #ifdefs in drivers.
  734  * @driver_data: Private pointer for driver specific info.
  735  * @power:	For device power management.
  736  * 		See Documentation/power/devices.txt for details.
  737  * @pm_domain:	Provide callbacks that are executed during system suspend,
  738  * 		hibernation, system resume and during runtime PM transitions
  739  * 		along with subsystem-level and driver-level callbacks.
  740  * @pins:	For device pin management.
  741  *		See Documentation/pinctrl.txt for details.
  742  * @msi_list:	Hosts MSI descriptors
  743  * @msi_domain: The generic MSI domain this device is using.
  744  * @numa_node:	NUMA node this device is close to.
  745  * @dma_mask:	Dma mask (if dma'ble device).
  746  * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
  747  * 		hardware supports 64-bit addresses for consistent allocations
  748  * 		such descriptors.
  749  * @dma_pfn_offset: offset of DMA memory range relatively of RAM
  750  * @dma_parms:	A low level driver may set these to teach IOMMU code about
  751  * 		segment limitations.
  752  * @dma_pools:	Dma pools (if dma'ble device).
  753  * @dma_mem:	Internal for coherent mem override.
  754  * @cma_area:	Contiguous memory area for dma allocations
  755  * @archdata:	For arch-specific additions.
  756  * @of_node:	Associated device tree node.
  757  * @fwnode:	Associated device node supplied by platform firmware.
  758  * @devt:	For creating the sysfs "dev".
  759  * @id:		device instance
  760  * @devres_lock: Spinlock to protect the resource of the device.
  761  * @devres_head: The resources list of the device.
  762  * @knode_class: The node used to add the device to the class list.
  763  * @class:	The class of the device.
  764  * @groups:	Optional attribute groups.
  765  * @release:	Callback to free the device after all references have
  766  * 		gone away. This should be set by the allocator of the
  767  * 		device (i.e. the bus driver that discovered the device).
  768  * @iommu_group: IOMMU group the device belongs to.
  769  * @iommu_fwspec: IOMMU-specific properties supplied by firmware.
  770  *
  771  * @offline_disabled: If set, the device is permanently online.
  772  * @offline:	Set after successful invocation of bus type's .offline().
  773  *
  774  * At the lowest level, every device in a Linux system is represented by an
  775  * instance of struct device. The device structure contains the information
  776  * that the device model core needs to model the system. Most subsystems,
  777  * however, track additional information about the devices they host. As a
  778  * result, it is rare for devices to be represented by bare device structures;
  779  * instead, that structure, like kobject structures, is usually embedded within
  780  * a higher-level representation of the device.
  781  */
  782 struct device {
  783 	struct device		*parent;
  784 
  785 	struct device_private	*p;
  786 
  787 	struct kobject kobj;
  788 	const char		*init_name; /* initial name of the device */
  789 	const struct device_type *type;
  790 
  791 	struct mutex		mutex;	/* mutex to synchronize calls to
  792 					 * its driver.
  793 					 */
  794 
  795 	struct bus_type	*bus;		/* type of bus device is on */
  796 	struct device_driver *driver;	/* which driver has allocated this
  797 					   device */
  798 	void		*platform_data;	/* Platform specific data, device
  799 					   core doesn't touch it */
  800 	void		*driver_data;	/* Driver data, set and get with
  801 					   dev_set/get_drvdata */
  802 	struct dev_pm_info	power;
  803 	struct dev_pm_domain	*pm_domain;
  804 
  805 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  806 	struct irq_domain	*msi_domain;
  807 #endif
  808 #ifdef CONFIG_PINCTRL
  809 	struct dev_pin_info	*pins;
  810 #endif
  811 #ifdef CONFIG_GENERIC_MSI_IRQ
  812 	struct list_head	msi_list;
  813 #endif
  814 
  815 #ifdef CONFIG_NUMA
  816 	int		numa_node;	/* NUMA node this device is close to */
  817 #endif
  818 	u64		*dma_mask;	/* dma mask (if dma'able device) */
  819 	u64		coherent_dma_mask;/* Like dma_mask, but for
  820 					     alloc_coherent mappings as
  821 					     not all hardware supports
  822 					     64 bit addresses for consistent
  823 					     allocations such descriptors. */
  824 	unsigned long	dma_pfn_offset;
  825 
  826 	struct device_dma_parameters *dma_parms;
  827 
  828 	struct list_head	dma_pools;	/* dma pools (if dma'ble) */
  829 
  830 	struct dma_coherent_mem	*dma_mem; /* internal for coherent mem
  831 					     override */
  832 #ifdef CONFIG_DMA_CMA
  833 	struct cma *cma_area;		/* contiguous memory area for dma
  834 					   allocations */
  835 #endif
  836 	/* arch specific additions */
  837 	struct dev_archdata	archdata;
  838 
  839 	struct device_node	*of_node; /* associated device tree node */
  840 	struct fwnode_handle	*fwnode; /* firmware device node */
  841 
  842 	dev_t			devt;	/* dev_t, creates the sysfs "dev" */
  843 	u32			id;	/* device instance */
  844 
  845 	spinlock_t		devres_lock;
  846 	struct list_head	devres_head;
  847 
  848 	struct klist_node	knode_class;
  849 	struct class		*class;
  850 	const struct attribute_group **groups;	/* optional groups */
  851 
  852 	void	(*release)(struct device *dev);
  853 	struct iommu_group	*iommu_group;
  854 	struct iommu_fwspec	*iommu_fwspec;
  855 
  856 	bool			offline_disabled:1;
  857 	bool			offline:1;
  858 };
  859 
  860 static inline struct device *kobj_to_dev(struct kobject *kobj)
  861 {
  862 	return container_of(kobj, struct device, kobj);
  863 }
  864 
  865 /* Get the wakeup routines, which depend on struct device */
  866 #include <linux/pm_wakeup.h>
  867 
  868 static inline const char *dev_name(const struct device *dev)
  869 {
  870 	/* Use the init name until the kobject becomes available */
  871 	if (dev->init_name)
  872 		return dev->init_name;
  873 
  874 	return kobject_name(&dev->kobj);
  875 }
  876 
  877 extern __printf(2, 3)
  878 int dev_set_name(struct device *dev, const char *name, ...);
  879 
  880 #ifdef CONFIG_NUMA
  881 static inline int dev_to_node(struct device *dev)
  882 {
  883 	return dev->numa_node;
  884 }
  885 static inline void set_dev_node(struct device *dev, int node)
  886 {
  887 	dev->numa_node = node;
  888 }
  889 #else
  890 static inline int dev_to_node(struct device *dev)
  891 {
  892 	return -1;
  893 }
  894 static inline void set_dev_node(struct device *dev, int node)
  895 {
  896 }
  897 #endif
  898 
  899 static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
  900 {
  901 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  902 	return dev->msi_domain;
  903 #else
  904 	return NULL;
  905 #endif
  906 }
  907 
  908 static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
  909 {
  910 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  911 	dev->msi_domain = d;
  912 #endif
  913 }
  914 
  915 static inline void *dev_get_drvdata(const struct device *dev)
  916 {
  917 	return dev->driver_data;
  918 }
  919 
  920 static inline void dev_set_drvdata(struct device *dev, void *data)
  921 {
  922 	dev->driver_data = data;
  923 }
  924 
  925 static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
  926 {
  927 	return dev ? dev->power.subsys_data : NULL;
  928 }
  929 
  930 static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
  931 {
  932 	return dev->kobj.uevent_suppress;
  933 }
  934 
  935 static inline void dev_set_uevent_suppress(struct device *dev, int val)
  936 {
  937 	dev->kobj.uevent_suppress = val;
  938 }
  939 
  940 static inline int device_is_registered(struct device *dev)
  941 {
  942 	return dev->kobj.state_in_sysfs;
  943 }
  944 
  945 static inline void device_enable_async_suspend(struct device *dev)
  946 {
  947 	if (!dev->power.is_prepared)
  948 		dev->power.async_suspend = true;
  949 }
  950 
  951 static inline void device_disable_async_suspend(struct device *dev)
  952 {
  953 	if (!dev->power.is_prepared)
  954 		dev->power.async_suspend = false;
  955 }
  956 
  957 static inline bool device_async_suspend_enabled(struct device *dev)
  958 {
  959 	return !!dev->power.async_suspend;
  960 }
  961 
  962 static inline void dev_pm_syscore_device(struct device *dev, bool val)
  963 {
  964 #ifdef CONFIG_PM_SLEEP
  965 	dev->power.syscore = val;
  966 #endif
  967 }
  968 
  969 static inline void device_lock(struct device *dev)
  970 {
  971 	mutex_lock(&dev->mutex);
  972 }
  973 
  974 static inline int device_lock_interruptible(struct device *dev)
  975 {
  976 	return mutex_lock_interruptible(&dev->mutex);
  977 }
  978 
  979 static inline int device_trylock(struct device *dev)
  980 {
  981 	return mutex_trylock(&dev->mutex);
  982 }
  983 
  984 static inline void device_unlock(struct device *dev)
  985 {
  986 	mutex_unlock(&dev->mutex);
  987 }
  988 
  989 static inline void device_lock_assert(struct device *dev)
  990 {
  991 	lockdep_assert_held(&dev->mutex);
  992 }
  993 
  994 static inline struct device_node *dev_of_node(struct device *dev)
  995 {
  996 	if (!IS_ENABLED(CONFIG_OF))
  997 		return NULL;
  998 	return dev->of_node;
  999 }
 1000 
 1001 void driver_init(void);
 1002 
 1003 /*
 1004  * High level routines for use by the bus drivers
 1005  */
 1006 extern int __must_check device_register(struct device *dev);
 1007 extern void device_unregister(struct device *dev);
 1008 extern void device_initialize(struct device *dev);
 1009 extern int __must_check device_add(struct device *dev);
 1010 extern void device_del(struct device *dev);
 1011 extern int device_for_each_child(struct device *dev, void *data,
 1012 		     int (*fn)(struct device *dev, void *data));
 1013 extern int device_for_each_child_reverse(struct device *dev, void *data,
 1014 		     int (*fn)(struct device *dev, void *data));
 1015 extern struct device *device_find_child(struct device *dev, void *data,
 1016 				int (*match)(struct device *dev, void *data));
 1017 extern int device_rename(struct device *dev, const char *new_name);
 1018 extern int device_move(struct device *dev, struct device *new_parent,
 1019 		       enum dpm_order dpm_order);
 1020 extern const char *device_get_devnode(struct device *dev,
 1021 				      umode_t *mode, kuid_t *uid, kgid_t *gid,
 1022 				      const char **tmp);
 1023 
 1024 static inline bool device_supports_offline(struct device *dev)
 1025 {
 1026 	return dev->bus && dev->bus->offline && dev->bus->online;
 1027 }
 1028 
 1029 extern void lock_device_hotplug(void);
 1030 extern void unlock_device_hotplug(void);
 1031 extern int lock_device_hotplug_sysfs(void);
 1032 extern int device_offline(struct device *dev);
 1033 extern int device_online(struct device *dev);
 1034 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
 1035 extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
 1036 
 1037 /*
 1038  * Root device objects for grouping under /sys/devices
 1039  */
 1040 extern struct device *__root_device_register(const char *name,
 1041 					     struct module *owner);
 1042 
 1043 /* This is a macro to avoid include problems with THIS_MODULE */
 1044 #define root_device_register(name) \
 1045 	__root_device_register(name, THIS_MODULE)
 1046 
 1047 extern void root_device_unregister(struct device *root);
 1048 
 1049 static inline void *dev_get_platdata(const struct device *dev)
 1050 {
 1051 	return dev->platform_data;
 1052 }
 1053 
 1054 /*
 1055  * Manual binding of a device to driver. See drivers/base/bus.c
 1056  * for information on use.
 1057  */
 1058 extern int __must_check device_bind_driver(struct device *dev);
 1059 extern void device_release_driver(struct device *dev);
 1060 extern int  __must_check device_attach(struct device *dev);
 1061 extern int __must_check driver_attach(struct device_driver *drv);
 1062 extern void device_initial_probe(struct device *dev);
 1063 extern int __must_check device_reprobe(struct device *dev);
 1064 
 1065 extern bool device_is_bound(struct device *dev);
 1066 
 1067 /*
 1068  * Easy functions for dynamically creating devices on the fly
 1069  */
 1070 extern __printf(5, 0)
 1071 struct device *device_create_vargs(struct class *cls, struct device *parent,
 1072 				   dev_t devt, void *drvdata,
 1073 				   const char *fmt, va_list vargs);
 1074 extern __printf(5, 6)
 1075 struct device *device_create(struct class *cls, struct device *parent,
 1076 			     dev_t devt, void *drvdata,
 1077 			     const char *fmt, ...);
 1078 extern __printf(6, 7)
 1079 struct device *device_create_with_groups(struct class *cls,
 1080 			     struct device *parent, dev_t devt, void *drvdata,
 1081 			     const struct attribute_group **groups,
 1082 			     const char *fmt, ...);
 1083 extern void device_destroy(struct class *cls, dev_t devt);
 1084 
 1085 /*
 1086  * Platform "fixup" functions - allow the platform to have their say
 1087  * about devices and actions that the general device layer doesn't
 1088  * know about.
 1089  */
 1090 /* Notify platform of device discovery */
 1091 extern int (*platform_notify)(struct device *dev);
 1092 
 1093 extern int (*platform_notify_remove)(struct device *dev);
 1094 
 1095 
 1096 /*
 1097  * get_device - atomically increment the reference count for the device.
 1098  *
 1099  */
 1100 extern struct device *get_device(struct device *dev);
 1101 extern void put_device(struct device *dev);
 1102 
 1103 #ifdef CONFIG_DEVTMPFS
 1104 extern int devtmpfs_create_node(struct device *dev);
 1105 extern int devtmpfs_delete_node(struct device *dev);
 1106 extern int devtmpfs_mount(const char *mntdir);
 1107 #else
 1108 static inline int devtmpfs_create_node(struct device *dev) { return 0; }
 1109 static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
 1110 static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
 1111 #endif
 1112 
 1113 /* drivers/base/power/shutdown.c */
 1114 extern void device_shutdown(void);
 1115 
 1116 /* debugging and troubleshooting/diagnostic helpers. */
 1117 extern const char *dev_driver_string(const struct device *dev);
 1118 
 1119 
 1120 #ifdef CONFIG_PRINTK
 1121 
 1122 extern __printf(3, 0)
 1123 int dev_vprintk_emit(int level, const struct device *dev,
 1124 		     const char *fmt, va_list args);
 1125 extern __printf(3, 4)
 1126 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
 1127 
 1128 extern __printf(3, 4)
 1129 void dev_printk(const char *level, const struct device *dev,
 1130 		const char *fmt, ...);
 1131 extern __printf(2, 3)
 1132 void dev_emerg(const struct device *dev, const char *fmt, ...);
 1133 extern __printf(2, 3)
 1134 void dev_alert(const struct device *dev, const char *fmt, ...);
 1135 extern __printf(2, 3)
 1136 void dev_crit(const struct device *dev, const char *fmt, ...);
 1137 extern __printf(2, 3)
 1138 void dev_err(const struct device *dev, const char *fmt, ...);
 1139 extern __printf(2, 3)
 1140 void dev_warn(const struct device *dev, const char *fmt, ...);
 1141 extern __printf(2, 3)
 1142 void dev_notice(const struct device *dev, const char *fmt, ...);
 1143 extern __printf(2, 3)
 1144 void _dev_info(const struct device *dev, const char *fmt, ...);
 1145 
 1146 #else
 1147 
 1148 static inline __printf(3, 0)
 1149 int dev_vprintk_emit(int level, const struct device *dev,
 1150 		     const char *fmt, va_list args)
 1151 { return 0; }
 1152 static inline __printf(3, 4)
 1153 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
 1154 { return 0; }
 1155 
 1156 static inline void __dev_printk(const char *level, const struct device *dev,
 1157 				struct va_format *vaf)
 1158 {}
 1159 static inline __printf(3, 4)
 1160 void dev_printk(const char *level, const struct device *dev,
 1161 		const char *fmt, ...)
 1162 {}
 1163 
 1164 static inline __printf(2, 3)
 1165 void dev_emerg(const struct device *dev, const char *fmt, ...)
 1166 {}
 1167 static inline __printf(2, 3)
 1168 void dev_crit(const struct device *dev, const char *fmt, ...)
 1169 {}
 1170 static inline __printf(2, 3)
 1171 void dev_alert(const struct device *dev, const char *fmt, ...)
 1172 {}
 1173 static inline __printf(2, 3)
 1174 void dev_err(const struct device *dev, const char *fmt, ...)
 1175 {}
 1176 static inline __printf(2, 3)
 1177 void dev_warn(const struct device *dev, const char *fmt, ...)
 1178 {}
 1179 static inline __printf(2, 3)
 1180 void dev_notice(const struct device *dev, const char *fmt, ...)
 1181 {}
 1182 static inline __printf(2, 3)
 1183 void _dev_info(const struct device *dev, const char *fmt, ...)
 1184 {}
 1185 
 1186 #endif
 1187 
 1188 /*
 1189  * Stupid hackaround for existing uses of non-printk uses dev_info
 1190  *
 1191  * Note that the definition of dev_info below is actually _dev_info
 1192  * and a macro is used to avoid redefining dev_info
 1193  */
 1194 
 1195 #define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
 1196 
 1197 #if defined(CONFIG_DYNAMIC_DEBUG)
 1198 #define dev_dbg(dev, format, ...)		     \
 1199 do {						     \
 1200 	dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
 1201 } while (0)
 1202 #elif defined(DEBUG)
 1203 #define dev_dbg(dev, format, arg...)		\
 1204 	dev_printk(KERN_DEBUG, dev, format, ##arg)
 1205 #else
 1206 #define dev_dbg(dev, format, arg...)				\
 1207 ({								\
 1208 	if (0)							\
 1209 		dev_printk(KERN_DEBUG, dev, format, ##arg);	\
 1210 })
 1211 #endif
 1212 
 1213 #ifdef CONFIG_PRINTK
 1214 #define dev_level_once(dev_level, dev, fmt, ...)			\
 1215 do {									\
 1216 	static bool __print_once __read_mostly;				\
 1217 									\
 1218 	if (!__print_once) {						\
 1219 		__print_once = true;					\
 1220 		dev_level(dev, fmt, ##__VA_ARGS__);			\
 1221 	}								\
 1222 } while (0)
 1223 #else
 1224 #define dev_level_once(dev_level, dev, fmt, ...)			\
 1225 do {									\
 1226 	if (0)								\
 1227 		dev_level(dev, fmt, ##__VA_ARGS__);			\
 1228 } while (0)
 1229 #endif
 1230 
 1231 #define dev_emerg_once(dev, fmt, ...)					\
 1232 	dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
 1233 #define dev_alert_once(dev, fmt, ...)					\
 1234 	dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
 1235 #define dev_crit_once(dev, fmt, ...)					\
 1236 	dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
 1237 #define dev_err_once(dev, fmt, ...)					\
 1238 	dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
 1239 #define dev_warn_once(dev, fmt, ...)					\
 1240 	dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
 1241 #define dev_notice_once(dev, fmt, ...)					\
 1242 	dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
 1243 #define dev_info_once(dev, fmt, ...)					\
 1244 	dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
 1245 #define dev_dbg_once(dev, fmt, ...)					\
 1246 	dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
 1247 
 1248 #define dev_level_ratelimited(dev_level, dev, fmt, ...)			\
 1249 do {									\
 1250 	static DEFINE_RATELIMIT_STATE(_rs,				\
 1251 				      DEFAULT_RATELIMIT_INTERVAL,	\
 1252 				      DEFAULT_RATELIMIT_BURST);		\
 1253 	if (__ratelimit(&_rs))						\
 1254 		dev_level(dev, fmt, ##__VA_ARGS__);			\
 1255 } while (0)
 1256 
 1257 #define dev_emerg_ratelimited(dev, fmt, ...)				\
 1258 	dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
 1259 #define dev_alert_ratelimited(dev, fmt, ...)				\
 1260 	dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
 1261 #define dev_crit_ratelimited(dev, fmt, ...)				\
 1262 	dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
 1263 #define dev_err_ratelimited(dev, fmt, ...)				\
 1264 	dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
 1265 #define dev_warn_ratelimited(dev, fmt, ...)				\
 1266 	dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
 1267 #define dev_notice_ratelimited(dev, fmt, ...)				\
 1268 	dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
 1269 #define dev_info_ratelimited(dev, fmt, ...)				\
 1270 	dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
 1271 #if defined(CONFIG_DYNAMIC_DEBUG)
 1272 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
 1273 #define dev_dbg_ratelimited(dev, fmt, ...)				\
 1274 do {									\
 1275 	static DEFINE_RATELIMIT_STATE(_rs,				\
 1276 				      DEFAULT_RATELIMIT_INTERVAL,	\
 1277 				      DEFAULT_RATELIMIT_BURST);		\
 1278 	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);			\
 1279 	if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&	\
 1280 	    __ratelimit(&_rs))						\
 1281 		__dynamic_dev_dbg(&descriptor, dev, fmt,		\
 1282 				  ##__VA_ARGS__);			\
 1283 } while (0)
 1284 #elif defined(DEBUG)
 1285 #define dev_dbg_ratelimited(dev, fmt, ...)				\
 1286 do {									\
 1287 	static DEFINE_RATELIMIT_STATE(_rs,				\
 1288 				      DEFAULT_RATELIMIT_INTERVAL,	\
 1289 				      DEFAULT_RATELIMIT_BURST);		\
 1290 	if (__ratelimit(&_rs))						\
 1291 		dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);	\
 1292 } while (0)
 1293 #else
 1294 #define dev_dbg_ratelimited(dev, fmt, ...)				\
 1295 do {									\
 1296 	if (0)								\
 1297 		dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);	\
 1298 } while (0)
 1299 #endif
 1300 
 1301 #ifdef VERBOSE_DEBUG
 1302 #define dev_vdbg	dev_dbg
 1303 #else
 1304 #define dev_vdbg(dev, format, arg...)				\
 1305 ({								\
 1306 	if (0)							\
 1307 		dev_printk(KERN_DEBUG, dev, format, ##arg);	\
 1308 })
 1309 #endif
 1310 
 1311 /*
 1312  * dev_WARN*() acts like dev_printk(), but with the key difference of
 1313  * using WARN/WARN_ONCE to include file/line information and a backtrace.
 1314  */
 1315 #define dev_WARN(dev, format, arg...) \
 1316 	WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
 1317 
 1318 #define dev_WARN_ONCE(dev, condition, format, arg...) \
 1319 	WARN_ONCE(condition, "%s %s: " format, \
 1320 			dev_driver_string(dev), dev_name(dev), ## arg)
 1321 
 1322 /* Create alias, so I can be autoloaded. */
 1323 #define MODULE_ALIAS_CHARDEV(major,minor) \
 1324 	MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
 1325 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \
 1326 	MODULE_ALIAS("char-major-" __stringify(major) "-*")
 1327 
 1328 #ifdef CONFIG_SYSFS_DEPRECATED
 1329 extern long sysfs_deprecated;
 1330 #else
 1331 #define sysfs_deprecated 0
 1332 #endif
 1333 
 1334 /**
 1335  * module_driver() - Helper macro for drivers that don't do anything
 1336  * special in module init/exit. This eliminates a lot of boilerplate.
 1337  * Each module may only use this macro once, and calling it replaces
 1338  * module_init() and module_exit().
 1339  *
 1340  * @__driver: driver name
 1341  * @__register: register function for this driver type
 1342  * @__unregister: unregister function for this driver type
 1343  * @...: Additional arguments to be passed to __register and __unregister.
 1344  *
 1345  * Use this macro to construct bus specific macros for registering
 1346  * drivers, and do not use it on its own.
 1347  */
 1348 #define module_driver(__driver, __register, __unregister, ...) \
 1349 static int __init __driver##_init(void) \
 1350 { \
 1351 	return __register(&(__driver) , ##__VA_ARGS__); \
 1352 } \
 1353 module_init(__driver##_init); \
 1354 static void __exit __driver##_exit(void) \
 1355 { \
 1356 	__unregister(&(__driver) , ##__VA_ARGS__); \
 1357 } \
 1358 module_exit(__driver##_exit);
 1359 
 1360 /**
 1361  * builtin_driver() - Helper macro for drivers that don't do anything
 1362  * special in init and have no exit. This eliminates some boilerplate.
 1363  * Each driver may only use this macro once, and calling it replaces
 1364  * device_initcall (or in some cases, the legacy __initcall).  This is
 1365  * meant to be a direct parallel of module_driver() above but without
 1366  * the __exit stuff that is not used for builtin cases.
 1367  *
 1368  * @__driver: driver name
 1369  * @__register: register function for this driver type
 1370  * @...: Additional arguments to be passed to __register
 1371  *
 1372  * Use this macro to construct bus specific macros for registering
 1373  * drivers, and do not use it on its own.
 1374  */
 1375 #define builtin_driver(__driver, __register, ...) \
 1376 static int __init __driver##_init(void) \
 1377 { \
 1378 	return __register(&(__driver) , ##__VA_ARGS__); \
 1379 } \
 1380 device_initcall(__driver##_init);
 1381 
 1382 #endif /* _DEVICE_H_ */                 1 #ifndef _LINUX_DMA_MAPPING_H
    2 #define _LINUX_DMA_MAPPING_H
    3 
    4 #include <linux/sizes.h>
    5 #include <linux/string.h>
    6 #include <linux/device.h>
    7 #include <linux/err.h>
    8 #include <linux/dma-debug.h>
    9 #include <linux/dma-direction.h>
   10 #include <linux/scatterlist.h>
   11 #include <linux/kmemcheck.h>
   12 #include <linux/bug.h>
   13 
   14 /**
   15  * List of possible attributes associated with a DMA mapping. The semantics
   16  * of each attribute should be defined in Documentation/DMA-attributes.txt.
   17  *
   18  * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
   19  * forces all pending DMA writes to complete.
   20  */
   21 #define DMA_ATTR_WRITE_BARRIER		(1UL << 0)
   22 /*
   23  * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
   24  * may be weakly ordered, that is that reads and writes may pass each other.
   25  */
   26 #define DMA_ATTR_WEAK_ORDERING		(1UL << 1)
   27 /*
   28  * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
   29  * buffered to improve performance.
   30  */
   31 #define DMA_ATTR_WRITE_COMBINE		(1UL << 2)
   32 /*
   33  * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
   34  * consistent or non-consistent memory as it sees fit.
   35  */
   36 #define DMA_ATTR_NON_CONSISTENT		(1UL << 3)
   37 /*
   38  * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
   39  * virtual mapping for the allocated buffer.
   40  */
   41 #define DMA_ATTR_NO_KERNEL_MAPPING	(1UL << 4)
   42 /*
   43  * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
   44  * the CPU cache for the given buffer assuming that it has been already
   45  * transferred to 'device' domain.
   46  */
   47 #define DMA_ATTR_SKIP_CPU_SYNC		(1UL << 5)
   48 /*
   49  * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
   50  * in physical memory.
   51  */
   52 #define DMA_ATTR_FORCE_CONTIGUOUS	(1UL << 6)
   53 /*
   54  * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
   55  * that it's probably not worth the time to try to allocate memory to in a way
   56  * that gives better TLB efficiency.
   57  */
   58 #define DMA_ATTR_ALLOC_SINGLE_PAGES	(1UL << 7)
   59 /*
   60  * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
   61  * allocation failure reports (similarly to __GFP_NOWARN).
   62  */
   63 #define DMA_ATTR_NO_WARN	(1UL << 8)
   64 
   65 /*
   66  * A dma_addr_t can hold any valid DMA or bus address for the platform.
   67  * It can be given to a device to use as a DMA source or target.  A CPU cannot
   68  * reference a dma_addr_t directly because there may be translation between
   69  * its physical address space and the bus address space.
   70  */
   71 struct dma_map_ops {
   72 	void* (*alloc)(struct device *dev, size_t size,
   73 				dma_addr_t *dma_handle, gfp_t gfp,
   74 				unsigned long attrs);
   75 	void (*free)(struct device *dev, size_t size,
   76 			      void *vaddr, dma_addr_t dma_handle,
   77 			      unsigned long attrs);
   78 	int (*mmap)(struct device *, struct vm_area_struct *,
   79 			  void *, dma_addr_t, size_t,
   80 			  unsigned long attrs);
   81 
   82 	int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
   83 			   dma_addr_t, size_t, unsigned long attrs);
   84 
   85 	dma_addr_t (*map_page)(struct device *dev, struct page *page,
   86 			       unsigned long offset, size_t size,
   87 			       enum dma_data_direction dir,
   88 			       unsigned long attrs);
   89 	void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
   90 			   size_t size, enum dma_data_direction dir,
   91 			   unsigned long attrs);
   92 	/*
   93 	 * map_sg returns 0 on error and a value > 0 on success.
   94 	 * It should never return a value < 0.
   95 	 */
   96 	int (*map_sg)(struct device *dev, struct scatterlist *sg,
   97 		      int nents, enum dma_data_direction dir,
   98 		      unsigned long attrs);
   99 	void (*unmap_sg)(struct device *dev,
  100 			 struct scatterlist *sg, int nents,
  101 			 enum dma_data_direction dir,
  102 			 unsigned long attrs);
  103 	dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
  104 			       size_t size, enum dma_data_direction dir,
  105 			       unsigned long attrs);
  106 	void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
  107 			   size_t size, enum dma_data_direction dir,
  108 			   unsigned long attrs);
  109 	void (*sync_single_for_cpu)(struct device *dev,
  110 				    dma_addr_t dma_handle, size_t size,
  111 				    enum dma_data_direction dir);
  112 	void (*sync_single_for_device)(struct device *dev,
  113 				       dma_addr_t dma_handle, size_t size,
  114 				       enum dma_data_direction dir);
  115 	void (*sync_sg_for_cpu)(struct device *dev,
  116 				struct scatterlist *sg, int nents,
  117 				enum dma_data_direction dir);
  118 	void (*sync_sg_for_device)(struct device *dev,
  119 				   struct scatterlist *sg, int nents,
  120 				   enum dma_data_direction dir);
  121 	int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
  122 	int (*dma_supported)(struct device *dev, u64 mask);
  123 	int (*set_dma_mask)(struct device *dev, u64 mask);
  124 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
  125 	u64 (*get_required_mask)(struct device *dev);
  126 #endif
  127 	int is_phys;
  128 };
  129 
  130 extern struct dma_map_ops dma_noop_ops;
  131 
  132 #define DMA_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
  133 
  134 #define DMA_MASK_NONE	0x0ULL
  135 
  136 static inline int valid_dma_direction(int dma_direction)
  137 {
  138 	return ((dma_direction == DMA_BIDIRECTIONAL) ||
  139 		(dma_direction == DMA_TO_DEVICE) ||
  140 		(dma_direction == DMA_FROM_DEVICE));
  141 }
  142 
  143 static inline int is_device_dma_capable(struct device *dev)
  144 {
  145 	return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
  146 }
  147 
  148 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
  149 /*
  150  * These three functions are only for dma allocator.
  151  * Don't use them in device drivers.
  152  */
  153 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
  154 				       dma_addr_t *dma_handle, void **ret);
  155 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
  156 
  157 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
  158 			    void *cpu_addr, size_t size, int *ret);
  159 #else
  160 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
  161 #define dma_release_from_coherent(dev, order, vaddr) (0)
  162 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
  163 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  164 
  165 #ifdef CONFIG_HAS_DMA
  166 #include <asm/dma-mapping.h>
  167 #else
  168 /*
  169  * Define the dma api to allow compilation but not linking of
  170  * dma dependent code.  Code that depends on the dma-mapping
  171  * API needs to set 'depends on HAS_DMA' in its Kconfig
  172  */
  173 extern struct dma_map_ops bad_dma_ops;
  174 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
  175 {
  176 	return &bad_dma_ops;
  177 }
  178 #endif
  179 
  180 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
  181 					      size_t size,
  182 					      enum dma_data_direction dir,
  183 					      unsigned long attrs)
  184 {
  185 	struct dma_map_ops *ops = get_dma_ops(dev);
  186 	dma_addr_t addr;
  187 
  188 	kmemcheck_mark_initialized(ptr, size);
  189 	BUG_ON(!valid_dma_direction(dir));
  190 	addr = ops->map_page(dev, virt_to_page(ptr),
  191 			     offset_in_page(ptr), size,
  192 			     dir, attrs);
  193 	debug_dma_map_page(dev, virt_to_page(ptr),
  194 			   offset_in_page(ptr), size,
  195 			   dir, addr, true);
  196 	return addr;
  197 }
  198 
  199 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
  200 					  size_t size,
  201 					  enum dma_data_direction dir,
  202 					  unsigned long attrs)
  203 {
  204 	struct dma_map_ops *ops = get_dma_ops(dev);
  205 
  206 	BUG_ON(!valid_dma_direction(dir));
  207 	if (ops->unmap_page)
  208 		ops->unmap_page(dev, addr, size, dir, attrs);
  209 	debug_dma_unmap_page(dev, addr, size, dir, true);
  210 }
  211 
  212 /*
  213  * dma_maps_sg_attrs returns 0 on error and > 0 on success.
  214  * It should never return a value < 0.
  215  */
  216 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
  217 				   int nents, enum dma_data_direction dir,
  218 				   unsigned long attrs)
  219 {
  220 	struct dma_map_ops *ops = get_dma_ops(dev);
  221 	int i, ents;
  222 	struct scatterlist *s;
  223 
  224 	for_each_sg(sg, s, nents, i)
  225 		kmemcheck_mark_initialized(sg_virt(s), s->length);
  226 	BUG_ON(!valid_dma_direction(dir));
  227 	ents = ops->map_sg(dev, sg, nents, dir, attrs);
  228 	BUG_ON(ents < 0);
  229 	debug_dma_map_sg(dev, sg, nents, ents, dir);
  230 
  231 	return ents;
  232 }
  233 
  234 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
  235 				      int nents, enum dma_data_direction dir,
  236 				      unsigned long attrs)
  237 {
  238 	struct dma_map_ops *ops = get_dma_ops(dev);
  239 
  240 	BUG_ON(!valid_dma_direction(dir));
  241 	debug_dma_unmap_sg(dev, sg, nents, dir);
  242 	if (ops->unmap_sg)
  243 		ops->unmap_sg(dev, sg, nents, dir, attrs);
  244 }
  245 
  246 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  247 				      size_t offset, size_t size,
  248 				      enum dma_data_direction dir)
  249 {
  250 	struct dma_map_ops *ops = get_dma_ops(dev);
  251 	dma_addr_t addr;
  252 
  253 	kmemcheck_mark_initialized(page_address(page) + offset, size);
  254 	BUG_ON(!valid_dma_direction(dir));
  255 	addr = ops->map_page(dev, page, offset, size, dir, 0);
  256 	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
  257 
  258 	return addr;
  259 }
  260 
  261 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
  262 				  size_t size, enum dma_data_direction dir)
  263 {
  264 	struct dma_map_ops *ops = get_dma_ops(dev);
  265 
  266 	BUG_ON(!valid_dma_direction(dir));
  267 	if (ops->unmap_page)
  268 		ops->unmap_page(dev, addr, size, dir, 0);
  269 	debug_dma_unmap_page(dev, addr, size, dir, false);
  270 }
  271 
  272 static inline dma_addr_t dma_map_resource(struct device *dev,
  273 					  phys_addr_t phys_addr,
  274 					  size_t size,
  275 					  enum dma_data_direction dir,
  276 					  unsigned long attrs)
  277 {
  278 	struct dma_map_ops *ops = get_dma_ops(dev);
  279 	dma_addr_t addr;
  280 
  281 	BUG_ON(!valid_dma_direction(dir));
  282 
  283 	/* Don't allow RAM to be mapped */
  284 	BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
  285 
  286 	addr = phys_addr;
  287 	if (ops->map_resource)
  288 		addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
  289 
  290 	debug_dma_map_resource(dev, phys_addr, size, dir, addr);
  291 
  292 	return addr;
  293 }
  294 
  295 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
  296 				      size_t size, enum dma_data_direction dir,
  297 				      unsigned long attrs)
  298 {
  299 	struct dma_map_ops *ops = get_dma_ops(dev);
  300 
  301 	BUG_ON(!valid_dma_direction(dir));
  302 	if (ops->unmap_resource)
  303 		ops->unmap_resource(dev, addr, size, dir, attrs);
  304 	debug_dma_unmap_resource(dev, addr, size, dir);
  305 }
  306 
  307 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
  308 					   size_t size,
  309 					   enum dma_data_direction dir)
  310 {
  311 	struct dma_map_ops *ops = get_dma_ops(dev);
  312 
  313 	BUG_ON(!valid_dma_direction(dir));
  314 	if (ops->sync_single_for_cpu)
  315 		ops->sync_single_for_cpu(dev, addr, size, dir);
  316 	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
  317 }
  318 
  319 static inline void dma_sync_single_for_device(struct device *dev,
  320 					      dma_addr_t addr, size_t size,
  321 					      enum dma_data_direction dir)
  322 {
  323 	struct dma_map_ops *ops = get_dma_ops(dev);
  324 
  325 	BUG_ON(!valid_dma_direction(dir));
  326 	if (ops->sync_single_for_device)
  327 		ops->sync_single_for_device(dev, addr, size, dir);
  328 	debug_dma_sync_single_for_device(dev, addr, size, dir);
  329 }
  330 
  331 static inline void dma_sync_single_range_for_cpu(struct device *dev,
  332 						 dma_addr_t addr,
  333 						 unsigned long offset,
  334 						 size_t size,
  335 						 enum dma_data_direction dir)
  336 {
  337 	const struct dma_map_ops *ops = get_dma_ops(dev);
  338 
  339 	BUG_ON(!valid_dma_direction(dir));
  340 	if (ops->sync_single_for_cpu)
  341 		ops->sync_single_for_cpu(dev, addr + offset, size, dir);
  342 	debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
  343 }
  344 
  345 static inline void dma_sync_single_range_for_device(struct device *dev,
  346 						    dma_addr_t addr,
  347 						    unsigned long offset,
  348 						    size_t size,
  349 						    enum dma_data_direction dir)
  350 {
  351 	const struct dma_map_ops *ops = get_dma_ops(dev);
  352 
  353 	BUG_ON(!valid_dma_direction(dir));
  354 	if (ops->sync_single_for_device)
  355 		ops->sync_single_for_device(dev, addr + offset, size, dir);
  356 	debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
  357 }
  358 
  359 static inline void
  360 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  361 		    int nelems, enum dma_data_direction dir)
  362 {
  363 	struct dma_map_ops *ops = get_dma_ops(dev);
  364 
  365 	BUG_ON(!valid_dma_direction(dir));
  366 	if (ops->sync_sg_for_cpu)
  367 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
  368 	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
  369 }
  370 
  371 static inline void
  372 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  373 		       int nelems, enum dma_data_direction dir)
  374 {
  375 	struct dma_map_ops *ops = get_dma_ops(dev);
  376 
  377 	BUG_ON(!valid_dma_direction(dir));
  378 	if (ops->sync_sg_for_device)
  379 		ops->sync_sg_for_device(dev, sg, nelems, dir);
  380 	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
  381 
  382 }
  383 
  384 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
  385 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
  386 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
  387 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
  388 
  389 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
  390 			   void *cpu_addr, dma_addr_t dma_addr, size_t size);
  391 
  392 void *dma_common_contiguous_remap(struct page *page, size_t size,
  393 			unsigned long vm_flags,
  394 			pgprot_t prot, const void *caller);
  395 
  396 void *dma_common_pages_remap(struct page **pages, size_t size,
  397 			unsigned long vm_flags, pgprot_t prot,
  398 			const void *caller);
  399 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
  400 
  401 /**
  402  * dma_mmap_attrs - map a coherent DMA allocation into user space
  403  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  404  * @vma: vm_area_struct describing requested user mapping
  405  * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
  406  * @handle: device-view address returned from dma_alloc_attrs
  407  * @size: size of memory originally requested in dma_alloc_attrs
  408  * @attrs: attributes of mapping properties requested in dma_alloc_attrs
  409  *
  410  * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
  411  * into user space.  The coherent DMA buffer must not be freed by the
  412  * driver until the user space mapping has been released.
  413  */
  414 static inline int
  415 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
  416 	       dma_addr_t dma_addr, size_t size, unsigned long attrs)
  417 {
  418 	struct dma_map_ops *ops = get_dma_ops(dev);
  419 	BUG_ON(!ops);
  420 	if (ops->mmap)
  421 		return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
  422 	return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
  423 }
  424 
  425 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
  426 
  427 int
  428 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
  429 		       void *cpu_addr, dma_addr_t dma_addr, size_t size);
  430 
  431 static inline int
  432 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
  433 		      dma_addr_t dma_addr, size_t size,
  434 		      unsigned long attrs)
  435 {
  436 	struct dma_map_ops *ops = get_dma_ops(dev);
  437 	BUG_ON(!ops);
  438 	if (ops->get_sgtable)
  439 		return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
  440 					attrs);
  441 	return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
  442 }
  443 
  444 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
  445 
  446 #ifndef arch_dma_alloc_attrs
  447 #define arch_dma_alloc_attrs(dev, flag)	(true)
  448 #endif
  449 
  450 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
  451 				       dma_addr_t *dma_handle, gfp_t flag,
  452 				       unsigned long attrs)
  453 {
  454 	struct dma_map_ops *ops = get_dma_ops(dev);
  455 	void *cpu_addr;
  456 
  457 	BUG_ON(!ops);
  458 
  459 	if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
  460 		return cpu_addr;
  461 
  462 	if (!arch_dma_alloc_attrs(&dev, &flag))
  463 		return NULL;
  464 	if (!ops->alloc)
  465 		return NULL;
  466 
  467 	cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
  468 	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
  469 	return cpu_addr;
  470 }
  471 
  472 static inline void dma_free_attrs(struct device *dev, size_t size,
  473 				     void *cpu_addr, dma_addr_t dma_handle,
  474 				     unsigned long attrs)
  475 {
  476 	struct dma_map_ops *ops = get_dma_ops(dev);
  477 
  478 	BUG_ON(!ops);
  479 	WARN_ON(irqs_disabled());
  480 
  481 	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
  482 		return;
  483 
  484 	if (!ops->free || !cpu_addr)
  485 		return;
  486 
  487 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
  488 	ops->free(dev, size, cpu_addr, dma_handle, attrs);
  489 }
  490 
  491 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  492 		dma_addr_t *dma_handle, gfp_t flag)
  493 {
  494 	return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
  495 }
  496 
  497 static inline void dma_free_coherent(struct device *dev, size_t size,
  498 		void *cpu_addr, dma_addr_t dma_handle)
  499 {
  500 	return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
  501 }
  502 
  503 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
  504 		dma_addr_t *dma_handle, gfp_t gfp)
  505 {
  506 	return dma_alloc_attrs(dev, size, dma_handle, gfp,
  507 			       DMA_ATTR_NON_CONSISTENT);
  508 }
  509 
  510 static inline void dma_free_noncoherent(struct device *dev, size_t size,
  511 		void *cpu_addr, dma_addr_t dma_handle)
  512 {
  513 	dma_free_attrs(dev, size, cpu_addr, dma_handle,
  514 		       DMA_ATTR_NON_CONSISTENT);
  515 }
  516 
  517 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  518 {
  519 	debug_dma_mapping_error(dev, dma_addr);
  520 
  521 	if (get_dma_ops(dev)->mapping_error)
  522 		return get_dma_ops(dev)->mapping_error(dev, dma_addr);
  523 
  524 #ifdef DMA_ERROR_CODE
  525 	return dma_addr == DMA_ERROR_CODE;
  526 #else
  527 	return 0;
  528 #endif
  529 }
  530 
  531 #ifndef HAVE_ARCH_DMA_SUPPORTED
  532 static inline int dma_supported(struct device *dev, u64 mask)
  533 {
  534 	struct dma_map_ops *ops = get_dma_ops(dev);
  535 
  536 	if (!ops)
  537 		return 0;
  538 	if (!ops->dma_supported)
  539 		return 1;
  540 	return ops->dma_supported(dev, mask);
  541 }
  542 #endif
  543 
  544 #ifndef HAVE_ARCH_DMA_SET_MASK
  545 static inline int dma_set_mask(struct device *dev, u64 mask)
  546 {
  547 	struct dma_map_ops *ops = get_dma_ops(dev);
  548 
  549 	if (ops->set_dma_mask)
  550 		return ops->set_dma_mask(dev, mask);
  551 
  552 	if (!dev->dma_mask || !dma_supported(dev, mask))
  553 		return -EIO;
  554 	*dev->dma_mask = mask;
  555 	return 0;
  556 }
  557 #endif
  558 
  559 static inline u64 dma_get_mask(struct device *dev)
  560 {
  561 	if (dev && dev->dma_mask && *dev->dma_mask)
  562 		return *dev->dma_mask;
  563 	return DMA_BIT_MASK(32);
  564 }
  565 
  566 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
  567 int dma_set_coherent_mask(struct device *dev, u64 mask);
  568 #else
  569 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
  570 {
  571 	if (!dma_supported(dev, mask))
  572 		return -EIO;
  573 	dev->coherent_dma_mask = mask;
  574 	return 0;
  575 }
  576 #endif
  577 
  578 /*
  579  * Set both the DMA mask and the coherent DMA mask to the same thing.
  580  * Note that we don't check the return value from dma_set_coherent_mask()
  581  * as the DMA API guarantees that the coherent DMA mask can be set to
  582  * the same or smaller than the streaming DMA mask.
  583  */
  584 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
  585 {
  586 	int rc = dma_set_mask(dev, mask);
  587 	if (rc == 0)
  588 		dma_set_coherent_mask(dev, mask);
  589 	return rc;
  590 }
  591 
  592 /*
  593  * Similar to the above, except it deals with the case where the device
  594  * does not have dev->dma_mask appropriately setup.
  595  */
  596 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
  597 {
  598 	dev->dma_mask = &dev->coherent_dma_mask;
  599 	return dma_set_mask_and_coherent(dev, mask);
  600 }
  601 
  602 extern u64 dma_get_required_mask(struct device *dev);
  603 
  604 #ifndef arch_setup_dma_ops
  605 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
  606 				      u64 size, const struct iommu_ops *iommu,
  607 				      bool coherent) { }
  608 #endif
  609 
  610 #ifndef arch_teardown_dma_ops
  611 static inline void arch_teardown_dma_ops(struct device *dev) { }
  612 #endif
  613 
  614 static inline unsigned int dma_get_max_seg_size(struct device *dev)
  615 {
  616 	if (dev->dma_parms && dev->dma_parms->max_segment_size)
  617 		return dev->dma_parms->max_segment_size;
  618 	return SZ_64K;
  619 }
  620 
  621 static inline unsigned int dma_set_max_seg_size(struct device *dev,
  622 						unsigned int size)
  623 {
  624 	if (dev->dma_parms) {
  625 		dev->dma_parms->max_segment_size = size;
  626 		return 0;
  627 	}
  628 	return -EIO;
  629 }
  630 
  631 static inline unsigned long dma_get_seg_boundary(struct device *dev)
  632 {
  633 	if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
  634 		return dev->dma_parms->segment_boundary_mask;
  635 	return DMA_BIT_MASK(32);
  636 }
  637 
  638 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
  639 {
  640 	if (dev->dma_parms) {
  641 		dev->dma_parms->segment_boundary_mask = mask;
  642 		return 0;
  643 	}
  644 	return -EIO;
  645 }
  646 
  647 #ifndef dma_max_pfn
  648 static inline unsigned long dma_max_pfn(struct device *dev)
  649 {
  650 	return *dev->dma_mask >> PAGE_SHIFT;
  651 }
  652 #endif
  653 
  654 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
  655 					dma_addr_t *dma_handle, gfp_t flag)
  656 {
  657 	void *ret = dma_alloc_coherent(dev, size, dma_handle,
  658 				       flag | __GFP_ZERO);
  659 	return ret;
  660 }
  661 
  662 #ifdef CONFIG_HAS_DMA
  663 static inline int dma_get_cache_alignment(void)
  664 {
  665 #ifdef ARCH_DMA_MINALIGN
  666 	return ARCH_DMA_MINALIGN;
  667 #endif
  668 	return 1;
  669 }
  670 #endif
  671 
  672 /* flags for the coherent memory api */
  673 #define	DMA_MEMORY_MAP			0x01
  674 #define DMA_MEMORY_IO			0x02
  675 #define DMA_MEMORY_INCLUDES_CHILDREN	0x04
  676 #define DMA_MEMORY_EXCLUSIVE		0x08
  677 
  678 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
  679 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
  680 				dma_addr_t device_addr, size_t size, int flags);
  681 void dma_release_declared_memory(struct device *dev);
  682 void *dma_mark_declared_memory_occupied(struct device *dev,
  683 					dma_addr_t device_addr, size_t size);
  684 #else
  685 static inline int
  686 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
  687 			    dma_addr_t device_addr, size_t size, int flags)
  688 {
  689 	return 0;
  690 }
  691 
  692 static inline void
  693 dma_release_declared_memory(struct device *dev)
  694 {
  695 }
  696 
  697 static inline void *
  698 dma_mark_declared_memory_occupied(struct device *dev,
  699 				  dma_addr_t device_addr, size_t size)
  700 {
  701 	return ERR_PTR(-EBUSY);
  702 }
  703 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  704 
  705 /*
  706  * Managed DMA API
  707  */
  708 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
  709 				 dma_addr_t *dma_handle, gfp_t gfp);
  710 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
  711 			       dma_addr_t dma_handle);
  712 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
  713 				    dma_addr_t *dma_handle, gfp_t gfp);
  714 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  715 				  dma_addr_t dma_handle);
  716 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
  717 extern int dmam_declare_coherent_memory(struct device *dev,
  718 					phys_addr_t phys_addr,
  719 					dma_addr_t device_addr, size_t size,
  720 					int flags);
  721 extern void dmam_release_declared_memory(struct device *dev);
  722 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  723 static inline int dmam_declare_coherent_memory(struct device *dev,
  724 				phys_addr_t phys_addr, dma_addr_t device_addr,
  725 				size_t size, gfp_t gfp)
  726 {
  727 	return 0;
  728 }
  729 
  730 static inline void dmam_release_declared_memory(struct device *dev)
  731 {
  732 }
  733 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  734 
  735 static inline void *dma_alloc_wc(struct device *dev, size_t size,
  736 				 dma_addr_t *dma_addr, gfp_t gfp)
  737 {
  738 	return dma_alloc_attrs(dev, size, dma_addr, gfp,
  739 			       DMA_ATTR_WRITE_COMBINE);
  740 }
  741 #ifndef dma_alloc_writecombine
  742 #define dma_alloc_writecombine dma_alloc_wc
  743 #endif
  744 
  745 static inline void dma_free_wc(struct device *dev, size_t size,
  746 			       void *cpu_addr, dma_addr_t dma_addr)
  747 {
  748 	return dma_free_attrs(dev, size, cpu_addr, dma_addr,
  749 			      DMA_ATTR_WRITE_COMBINE);
  750 }
  751 #ifndef dma_free_writecombine
  752 #define dma_free_writecombine dma_free_wc
  753 #endif
  754 
  755 static inline int dma_mmap_wc(struct device *dev,
  756 			      struct vm_area_struct *vma,
  757 			      void *cpu_addr, dma_addr_t dma_addr,
  758 			      size_t size)
  759 {
  760 	return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
  761 			      DMA_ATTR_WRITE_COMBINE);
  762 }
  763 #ifndef dma_mmap_writecombine
  764 #define dma_mmap_writecombine dma_mmap_wc
  765 #endif
  766 
  767 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
  768 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
  769 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
  770 #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
  771 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
  772 #define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
  773 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
  774 #else
  775 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
  776 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
  777 #define dma_unmap_addr(PTR, ADDR_NAME)           (0)
  778 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  do { } while (0)
  779 #define dma_unmap_len(PTR, LEN_NAME)             (0)
  780 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    do { } while (0)
  781 #endif
  782 
  783 #endif                 1 /* interrupt.h */
    2 #ifndef _LINUX_INTERRUPT_H
    3 #define _LINUX_INTERRUPT_H
    4 
    5 #include <linux/kernel.h>
    6 #include <linux/linkage.h>
    7 #include <linux/bitops.h>
    8 #include <linux/preempt.h>
    9 #include <linux/cpumask.h>
   10 #include <linux/irqreturn.h>
   11 #include <linux/irqnr.h>
   12 #include <linux/hardirq.h>
   13 #include <linux/irqflags.h>
   14 #include <linux/hrtimer.h>
   15 #include <linux/kref.h>
   16 #include <linux/workqueue.h>
   17 
   18 #include <linux/atomic.h>
   19 #include <asm/ptrace.h>
   20 #include <asm/irq.h>
   21 
   22 /*
   23  * These correspond to the IORESOURCE_IRQ_* defines in
   24  * linux/ioport.h to select the interrupt line behaviour.  When
   25  * requesting an interrupt without specifying a IRQF_TRIGGER, the
   26  * setting should be assumed to be "as already configured", which
   27  * may be as per machine or firmware initialisation.
   28  */
   29 #define IRQF_TRIGGER_NONE	0x00000000
   30 #define IRQF_TRIGGER_RISING	0x00000001
   31 #define IRQF_TRIGGER_FALLING	0x00000002
   32 #define IRQF_TRIGGER_HIGH	0x00000004
   33 #define IRQF_TRIGGER_LOW	0x00000008
   34 #define IRQF_TRIGGER_MASK	(IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
   35 				 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
   36 #define IRQF_TRIGGER_PROBE	0x00000010
   37 
   38 /*
   39  * These flags used only by the kernel as part of the
   40  * irq handling routines.
   41  *
   42  * IRQF_SHARED - allow sharing the irq among several devices
   43  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
   44  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
   45  * IRQF_PERCPU - Interrupt is per cpu
   46  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
   47  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
   48  *                registered first in an shared interrupt is considered for
   49  *                performance reasons)
   50  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
   51  *                Used by threaded interrupts which need to keep the
   52  *                irq line disabled until the threaded handler has been run.
   53  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend.  Does not guarantee
   54  *                   that this interrupt will wake the system from a suspended
   55  *                   state.  See Documentation/power/suspend-and-interrupts.txt
   56  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
   57  * IRQF_NO_THREAD - Interrupt cannot be threaded
   58  * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
   59  *                resume time.
   60  * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
   61  *                interrupt handler after suspending interrupts. For system
   62  *                wakeup devices users need to implement wakeup detection in
   63  *                their interrupt handlers.
   64  */
   65 #define IRQF_SHARED		0x00000080
   66 #define IRQF_PROBE_SHARED	0x00000100
   67 #define __IRQF_TIMER		0x00000200
   68 #define IRQF_PERCPU		0x00000400
   69 #define IRQF_NOBALANCING	0x00000800
   70 #define IRQF_IRQPOLL		0x00001000
   71 #define IRQF_ONESHOT		0x00002000
   72 #define IRQF_NO_SUSPEND		0x00004000
   73 #define IRQF_FORCE_RESUME	0x00008000
   74 #define IRQF_NO_THREAD		0x00010000
   75 #define IRQF_EARLY_RESUME	0x00020000
   76 #define IRQF_COND_SUSPEND	0x00040000
   77 
   78 #define IRQF_TIMER		(__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
   79 
   80 /*
   81  * These values can be returned by request_any_context_irq() and
   82  * describe the context the interrupt will be run in.
   83  *
   84  * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
   85  * IRQC_IS_NESTED - interrupt runs in a nested threaded context
   86  */
   87 enum {
   88 	IRQC_IS_HARDIRQ	= 0,
   89 	IRQC_IS_NESTED,
   90 };
   91 
   92 typedef irqreturn_t (*irq_handler_t)(int, void *);
   93 
   94 /**
   95  * struct irqaction - per interrupt action descriptor
   96  * @handler:	interrupt handler function
   97  * @name:	name of the device
   98  * @dev_id:	cookie to identify the device
   99  * @percpu_dev_id:	cookie to identify the device
  100  * @next:	pointer to the next irqaction for shared interrupts
  101  * @irq:	interrupt number
  102  * @flags:	flags (see IRQF_* above)
  103  * @thread_fn:	interrupt handler function for threaded interrupts
  104  * @thread:	thread pointer for threaded interrupts
  105  * @secondary:	pointer to secondary irqaction (force threading)
  106  * @thread_flags:	flags related to @thread
  107  * @thread_mask:	bitmask for keeping track of @thread activity
  108  * @dir:	pointer to the proc/irq/NN/name entry
  109  */
  110 struct irqaction {
  111 	irq_handler_t		handler;
  112 	void			*dev_id;
  113 	void __percpu		*percpu_dev_id;
  114 	struct irqaction	*next;
  115 	irq_handler_t		thread_fn;
  116 	struct task_struct	*thread;
  117 	struct irqaction	*secondary;
  118 	unsigned int		irq;
  119 	unsigned int		flags;
  120 	unsigned long		thread_flags;
  121 	unsigned long		thread_mask;
  122 	const char		*name;
  123 	struct proc_dir_entry	*dir;
  124 } ____cacheline_internodealigned_in_smp;
  125 
  126 extern irqreturn_t no_action(int cpl, void *dev_id);
  127 
  128 /*
  129  * If a (PCI) device interrupt is not connected we set dev->irq to
  130  * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
  131  * can distingiush that case from other error returns.
  132  *
  133  * 0x80000000 is guaranteed to be outside the available range of interrupts
  134  * and easy to distinguish from other possible incorrect values.
  135  */
  136 #define IRQ_NOTCONNECTED	(1U << 31)
  137 
  138 extern int __must_check
  139 request_threaded_irq(unsigned int irq, irq_handler_t handler,
  140 		     irq_handler_t thread_fn,
  141 		     unsigned long flags, const char *name, void *dev);
  142 
  143 static inline int __must_check
  144 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
  145 	    const char *name, void *dev)
  146 {
  147 	return request_threaded_irq(irq, handler, NULL, flags, name, dev);
  148 }
  149 
  150 extern int __must_check
  151 request_any_context_irq(unsigned int irq, irq_handler_t handler,
  152 			unsigned long flags, const char *name, void *dev_id);
  153 
  154 extern int __must_check
  155 request_percpu_irq(unsigned int irq, irq_handler_t handler,
  156 		   const char *devname, void __percpu *percpu_dev_id);
  157 
  158 extern void free_irq(unsigned int, void *);
  159 extern void free_percpu_irq(unsigned int, void __percpu *);
  160 
  161 struct device;
  162 
  163 extern int __must_check
  164 devm_request_threaded_irq(struct device *dev, unsigned int irq,
  165 			  irq_handler_t handler, irq_handler_t thread_fn,
  166 			  unsigned long irqflags, const char *devname,
  167 			  void *dev_id);
  168 
  169 static inline int __must_check
  170 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
  171 		 unsigned long irqflags, const char *devname, void *dev_id)
  172 {
  173 	return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
  174 					 devname, dev_id);
  175 }
  176 
  177 extern int __must_check
  178 devm_request_any_context_irq(struct device *dev, unsigned int irq,
  179 		 irq_handler_t handler, unsigned long irqflags,
  180 		 const char *devname, void *dev_id);
  181 
  182 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
  183 
  184 /*
  185  * On lockdep we dont want to enable hardirqs in hardirq
  186  * context. Use local_irq_enable_in_hardirq() to annotate
  187  * kernel code that has to do this nevertheless (pretty much
  188  * the only valid case is for old/broken hardware that is
  189  * insanely slow).
  190  *
  191  * NOTE: in theory this might break fragile code that relies
  192  * on hardirq delivery - in practice we dont seem to have such
  193  * places left. So the only effect should be slightly increased
  194  * irqs-off latencies.
  195  */
  196 #ifdef CONFIG_LOCKDEP
  197 # define local_irq_enable_in_hardirq()	do { } while (0)
  198 #else
  199 # define local_irq_enable_in_hardirq()	local_irq_enable()
  200 #endif
  201 
  202 extern void disable_irq_nosync(unsigned int irq);
  203 extern bool disable_hardirq(unsigned int irq);
  204 extern void disable_irq(unsigned int irq);
  205 extern void disable_percpu_irq(unsigned int irq);
  206 extern void enable_irq(unsigned int irq);
  207 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
  208 extern bool irq_percpu_is_enabled(unsigned int irq);
  209 extern void irq_wake_thread(unsigned int irq, void *dev_id);
  210 
  211 /* The following three functions are for the core kernel use only. */
  212 extern void suspend_device_irqs(void);
  213 extern void resume_device_irqs(void);
  214 
  215 /**
  216  * struct irq_affinity_notify - context for notification of IRQ affinity changes
  217  * @irq:		Interrupt to which notification applies
  218  * @kref:		Reference count, for internal use
  219  * @work:		Work item, for internal use
  220  * @notify:		Function to be called on change.  This will be
  221  *			called in process context.
  222  * @release:		Function to be called on release.  This will be
  223  *			called in process context.  Once registered, the
  224  *			structure must only be freed when this function is
  225  *			called or later.
  226  */
  227 struct irq_affinity_notify {
  228 	unsigned int irq;
  229 	struct kref kref;
  230 	struct work_struct work;
  231 	void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
  232 	void (*release)(struct kref *ref);
  233 };
  234 
  235 #if defined(CONFIG_SMP)
  236 
  237 extern cpumask_var_t irq_default_affinity;
  238 
  239 /* Internal implementation. Use the helpers below */
  240 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
  241 			      bool force);
  242 
  243 /**
  244  * irq_set_affinity - Set the irq affinity of a given irq
  245  * @irq:	Interrupt to set affinity
  246  * @cpumask:	cpumask
  247  *
  248  * Fails if cpumask does not contain an online CPU
  249  */
  250 static inline int
  251 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
  252 {
  253 	return __irq_set_affinity(irq, cpumask, false);
  254 }
  255 
  256 /**
  257  * irq_force_affinity - Force the irq affinity of a given irq
  258  * @irq:	Interrupt to set affinity
  259  * @cpumask:	cpumask
  260  *
  261  * Same as irq_set_affinity, but without checking the mask against
  262  * online cpus.
  263  *
  264  * Solely for low level cpu hotplug code, where we need to make per
  265  * cpu interrupts affine before the cpu becomes online.
  266  */
  267 static inline int
  268 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
  269 {
  270 	return __irq_set_affinity(irq, cpumask, true);
  271 }
  272 
  273 extern int irq_can_set_affinity(unsigned int irq);
  274 extern int irq_select_affinity(unsigned int irq);
  275 
  276 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
  277 
  278 extern int
  279 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
  280 
  281 struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec);
  282 int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec);
  283 
  284 #else /* CONFIG_SMP */
  285 
  286 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
  287 {
  288 	return -EINVAL;
  289 }
  290 
  291 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
  292 {
  293 	return 0;
  294 }
  295 
  296 static inline int irq_can_set_affinity(unsigned int irq)
  297 {
  298 	return 0;
  299 }
  300 
  301 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
  302 
  303 static inline int irq_set_affinity_hint(unsigned int irq,
  304 					const struct cpumask *m)
  305 {
  306 	return -EINVAL;
  307 }
  308 
  309 static inline int
  310 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
  311 {
  312 	return 0;
  313 }
  314 
  315 static inline struct cpumask *
  316 irq_create_affinity_masks(const struct cpumask *affinity, int nvec)
  317 {
  318 	return NULL;
  319 }
  320 
  321 static inline int
  322 irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec)
  323 {
  324 	return maxvec;
  325 }
  326 
  327 #endif /* CONFIG_SMP */
  328 
  329 /*
  330  * Special lockdep variants of irq disabling/enabling.
  331  * These should be used for locking constructs that
  332  * know that a particular irq context which is disabled,
  333  * and which is the only irq-context user of a lock,
  334  * that it's safe to take the lock in the irq-disabled
  335  * section without disabling hardirqs.
  336  *
  337  * On !CONFIG_LOCKDEP they are equivalent to the normal
  338  * irq disable/enable methods.
  339  */
  340 static inline void disable_irq_nosync_lockdep(unsigned int irq)
  341 {
  342 	disable_irq_nosync(irq);
  343 #ifdef CONFIG_LOCKDEP
  344 	local_irq_disable();
  345 #endif
  346 }
  347 
  348 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
  349 {
  350 	disable_irq_nosync(irq);
  351 #ifdef CONFIG_LOCKDEP
  352 	local_irq_save(*flags);
  353 #endif
  354 }
  355 
  356 static inline void disable_irq_lockdep(unsigned int irq)
  357 {
  358 	disable_irq(irq);
  359 #ifdef CONFIG_LOCKDEP
  360 	local_irq_disable();
  361 #endif
  362 }
  363 
  364 static inline void enable_irq_lockdep(unsigned int irq)
  365 {
  366 #ifdef CONFIG_LOCKDEP
  367 	local_irq_enable();
  368 #endif
  369 	enable_irq(irq);
  370 }
  371 
  372 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
  373 {
  374 #ifdef CONFIG_LOCKDEP
  375 	local_irq_restore(*flags);
  376 #endif
  377 	enable_irq(irq);
  378 }
  379 
  380 /* IRQ wakeup (PM) control: */
  381 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
  382 
  383 static inline int enable_irq_wake(unsigned int irq)
  384 {
  385 	return irq_set_irq_wake(irq, 1);
  386 }
  387 
  388 static inline int disable_irq_wake(unsigned int irq)
  389 {
  390 	return irq_set_irq_wake(irq, 0);
  391 }
  392 
  393 /*
  394  * irq_get_irqchip_state/irq_set_irqchip_state specific flags
  395  */
  396 enum irqchip_irq_state {
  397 	IRQCHIP_STATE_PENDING,		/* Is interrupt pending? */
  398 	IRQCHIP_STATE_ACTIVE,		/* Is interrupt in progress? */
  399 	IRQCHIP_STATE_MASKED,		/* Is interrupt masked? */
  400 	IRQCHIP_STATE_LINE_LEVEL,	/* Is IRQ line high? */
  401 };
  402 
  403 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
  404 				 bool *state);
  405 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
  406 				 bool state);
  407 
  408 #ifdef CONFIG_IRQ_FORCED_THREADING
  409 extern bool force_irqthreads;
  410 #else
  411 #define force_irqthreads	(0)
  412 #endif
  413 
  414 #ifndef __ARCH_SET_SOFTIRQ_PENDING
  415 #define set_softirq_pending(x) (local_softirq_pending() = (x))
  416 #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
  417 #endif
  418 
  419 /* Some architectures might implement lazy enabling/disabling of
  420  * interrupts. In some cases, such as stop_machine, we might want
  421  * to ensure that after a local_irq_disable(), interrupts have
  422  * really been disabled in hardware. Such architectures need to
  423  * implement the following hook.
  424  */
  425 #ifndef hard_irq_disable
  426 #define hard_irq_disable()	do { } while(0)
  427 #endif
  428 
  429 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
  430    frequency threaded job scheduling. For almost all the purposes
  431    tasklets are more than enough. F.e. all serial device BHs et
  432    al. should be converted to tasklets, not to softirqs.
  433  */
  434 
  435 enum
  436 {
  437 	HI_SOFTIRQ=0,
  438 	TIMER_SOFTIRQ,
  439 	NET_TX_SOFTIRQ,
  440 	NET_RX_SOFTIRQ,
  441 	BLOCK_SOFTIRQ,
  442 	IRQ_POLL_SOFTIRQ,
  443 	TASKLET_SOFTIRQ,
  444 	SCHED_SOFTIRQ,
  445 	HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
  446 			    numbering. Sigh! */
  447 	RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
  448 
  449 	NR_SOFTIRQS
  450 };
  451 
  452 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
  453 
  454 /* map softirq index to softirq name. update 'softirq_to_name' in
  455  * kernel/softirq.c when adding a new softirq.
  456  */
  457 extern const char * const softirq_to_name[NR_SOFTIRQS];
  458 
  459 /* softirq mask and active fields moved to irq_cpustat_t in
  460  * asm/hardirq.h to get better cache usage.  KAO
  461  */
  462 
  463 struct softirq_action
  464 {
  465 	void	(*action)(struct softirq_action *);
  466 };
  467 
  468 asmlinkage void do_softirq(void);
  469 asmlinkage void __do_softirq(void);
  470 
  471 #ifdef __ARCH_HAS_DO_SOFTIRQ
  472 void do_softirq_own_stack(void);
  473 #else
  474 static inline void do_softirq_own_stack(void)
  475 {
  476 	__do_softirq();
  477 }
  478 #endif
  479 
  480 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
  481 extern void softirq_init(void);
  482 extern void __raise_softirq_irqoff(unsigned int nr);
  483 
  484 extern void raise_softirq_irqoff(unsigned int nr);
  485 extern void raise_softirq(unsigned int nr);
  486 
  487 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
  488 
  489 static inline struct task_struct *this_cpu_ksoftirqd(void)
  490 {
  491 	return this_cpu_read(ksoftirqd);
  492 }
  493 
  494 /* Tasklets --- multithreaded analogue of BHs.
  495 
  496    Main feature differing them of generic softirqs: tasklet
  497    is running only on one CPU simultaneously.
  498 
  499    Main feature differing them of BHs: different tasklets
  500    may be run simultaneously on different CPUs.
  501 
  502    Properties:
  503    * If tasklet_schedule() is called, then tasklet is guaranteed
  504      to be executed on some cpu at least once after this.
  505    * If the tasklet is already scheduled, but its execution is still not
  506      started, it will be executed only once.
  507    * If this tasklet is already running on another CPU (or schedule is called
  508      from tasklet itself), it is rescheduled for later.
  509    * Tasklet is strictly serialized wrt itself, but not
  510      wrt another tasklets. If client needs some intertask synchronization,
  511      he makes it with spinlocks.
  512  */
  513 
  514 struct tasklet_struct
  515 {
  516 	struct tasklet_struct *next;
  517 	unsigned long state;
  518 	atomic_t count;
  519 	void (*func)(unsigned long);
  520 	unsigned long data;
  521 };
  522 
  523 #define DECLARE_TASKLET(name, func, data) \
  524 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
  525 
  526 #define DECLARE_TASKLET_DISABLED(name, func, data) \
  527 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
  528 
  529 
  530 enum
  531 {
  532 	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
  533 	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
  534 };
  535 
  536 #ifdef CONFIG_SMP
  537 static inline int tasklet_trylock(struct tasklet_struct *t)
  538 {
  539 	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
  540 }
  541 
  542 static inline void tasklet_unlock(struct tasklet_struct *t)
  543 {
  544 	smp_mb__before_atomic();
  545 	clear_bit(TASKLET_STATE_RUN, &(t)->state);
  546 }
  547 
  548 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
  549 {
  550 	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
  551 }
  552 #else
  553 #define tasklet_trylock(t) 1
  554 #define tasklet_unlock_wait(t) do { } while (0)
  555 #define tasklet_unlock(t) do { } while (0)
  556 #endif
  557 
  558 extern void __tasklet_schedule(struct tasklet_struct *t);
  559 
  560 static inline void tasklet_schedule(struct tasklet_struct *t)
  561 {
  562 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  563 		__tasklet_schedule(t);
  564 }
  565 
  566 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
  567 
  568 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
  569 {
  570 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  571 		__tasklet_hi_schedule(t);
  572 }
  573 
  574 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
  575 
  576 /*
  577  * This version avoids touching any other tasklets. Needed for kmemcheck
  578  * in order not to take any page faults while enqueueing this tasklet;
  579  * consider VERY carefully whether you really need this or
  580  * tasklet_hi_schedule()...
  581  */
  582 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
  583 {
  584 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  585 		__tasklet_hi_schedule_first(t);
  586 }
  587 
  588 
  589 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
  590 {
  591 	atomic_inc(&t->count);
  592 	smp_mb__after_atomic();
  593 }
  594 
  595 static inline void tasklet_disable(struct tasklet_struct *t)
  596 {
  597 	tasklet_disable_nosync(t);
  598 	tasklet_unlock_wait(t);
  599 	smp_mb();
  600 }
  601 
  602 static inline void tasklet_enable(struct tasklet_struct *t)
  603 {
  604 	smp_mb__before_atomic();
  605 	atomic_dec(&t->count);
  606 }
  607 
  608 extern void tasklet_kill(struct tasklet_struct *t);
  609 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
  610 extern void tasklet_init(struct tasklet_struct *t,
  611 			 void (*func)(unsigned long), unsigned long data);
  612 
  613 struct tasklet_hrtimer {
  614 	struct hrtimer		timer;
  615 	struct tasklet_struct	tasklet;
  616 	enum hrtimer_restart	(*function)(struct hrtimer *);
  617 };
  618 
  619 extern void
  620 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
  621 		     enum hrtimer_restart (*function)(struct hrtimer *),
  622 		     clockid_t which_clock, enum hrtimer_mode mode);
  623 
  624 static inline
  625 void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
  626 			   const enum hrtimer_mode mode)
  627 {
  628 	hrtimer_start(&ttimer->timer, time, mode);
  629 }
  630 
  631 static inline
  632 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
  633 {
  634 	hrtimer_cancel(&ttimer->timer);
  635 	tasklet_kill(&ttimer->tasklet);
  636 }
  637 
  638 /*
  639  * Autoprobing for irqs:
  640  *
  641  * probe_irq_on() and probe_irq_off() provide robust primitives
  642  * for accurate IRQ probing during kernel initialization.  They are
  643  * reasonably simple to use, are not "fooled" by spurious interrupts,
  644  * and, unlike other attempts at IRQ probing, they do not get hung on
  645  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
  646  *
  647  * For reasonably foolproof probing, use them as follows:
  648  *
  649  * 1. clear and/or mask the device's internal interrupt.
  650  * 2. sti();
  651  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
  652  * 4. enable the device and cause it to trigger an interrupt.
  653  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
  654  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
  655  * 7. service the device to clear its pending interrupt.
  656  * 8. loop again if paranoia is required.
  657  *
  658  * probe_irq_on() returns a mask of allocated irq's.
  659  *
  660  * probe_irq_off() takes the mask as a parameter,
  661  * and returns the irq number which occurred,
  662  * or zero if none occurred, or a negative irq number
  663  * if more than one irq occurred.
  664  */
  665 
  666 #if !defined(CONFIG_GENERIC_IRQ_PROBE) 
  667 static inline unsigned long probe_irq_on(void)
  668 {
  669 	return 0;
  670 }
  671 static inline int probe_irq_off(unsigned long val)
  672 {
  673 	return 0;
  674 }
  675 static inline unsigned int probe_irq_mask(unsigned long val)
  676 {
  677 	return 0;
  678 }
  679 #else
  680 extern unsigned long probe_irq_on(void);	/* returns 0 on failure */
  681 extern int probe_irq_off(unsigned long);	/* returns 0 or negative on failure */
  682 extern unsigned int probe_irq_mask(unsigned long);	/* returns mask of ISA interrupts */
  683 #endif
  684 
  685 #ifdef CONFIG_PROC_FS
  686 /* Initialize /proc/irq/ */
  687 extern void init_irq_proc(void);
  688 #else
  689 static inline void init_irq_proc(void)
  690 {
  691 }
  692 #endif
  693 
  694 struct seq_file;
  695 int show_interrupts(struct seq_file *p, void *v);
  696 int arch_show_interrupts(struct seq_file *p, int prec);
  697 
  698 extern int early_irq_init(void);
  699 extern int arch_probe_nr_irqs(void);
  700 extern int arch_early_irq_init(void);
  701 
  702 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
  703 /*
  704  * We want to know which function is an entrypoint of a hardirq or a softirq.
  705  */
  706 #define __irq_entry		 __attribute__((__section__(".irqentry.text")))
  707 #define __softirq_entry  \
  708 	__attribute__((__section__(".softirqentry.text")))
  709 
  710 /* Limits of hardirq entrypoints */
  711 extern char __irqentry_text_start[];
  712 extern char __irqentry_text_end[];
  713 /* Limits of softirq entrypoints */
  714 extern char __softirqentry_text_start[];
  715 extern char __softirqentry_text_end[];
  716 
  717 #else
  718 #define __irq_entry
  719 #define __softirq_entry
  720 #endif
  721 
  722 #endif                 1 #ifndef LINUX_KMEMCHECK_H
    2 #define LINUX_KMEMCHECK_H
    3 
    4 #include <linux/mm_types.h>
    5 #include <linux/types.h>
    6 
    7 #ifdef CONFIG_KMEMCHECK
    8 extern int kmemcheck_enabled;
    9 
   10 /* The slab-related functions. */
   11 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
   12 void kmemcheck_free_shadow(struct page *page, int order);
   13 void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
   14 			  size_t size);
   15 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
   16 
   17 void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
   18 			       gfp_t gfpflags);
   19 
   20 void kmemcheck_show_pages(struct page *p, unsigned int n);
   21 void kmemcheck_hide_pages(struct page *p, unsigned int n);
   22 
   23 bool kmemcheck_page_is_tracked(struct page *p);
   24 
   25 void kmemcheck_mark_unallocated(void *address, unsigned int n);
   26 void kmemcheck_mark_uninitialized(void *address, unsigned int n);
   27 void kmemcheck_mark_initialized(void *address, unsigned int n);
   28 void kmemcheck_mark_freed(void *address, unsigned int n);
   29 
   30 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
   31 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
   32 void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
   33 
   34 int kmemcheck_show_addr(unsigned long address);
   35 int kmemcheck_hide_addr(unsigned long address);
   36 
   37 bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
   38 
   39 /*
   40  * Bitfield annotations
   41  *
   42  * How to use: If you have a struct using bitfields, for example
   43  *
   44  *     struct a {
   45  *             int x:8, y:8;
   46  *     };
   47  *
   48  * then this should be rewritten as
   49  *
   50  *     struct a {
   51  *             kmemcheck_bitfield_begin(flags);
   52  *             int x:8, y:8;
   53  *             kmemcheck_bitfield_end(flags);
   54  *     };
   55  *
   56  * Now the "flags_begin" and "flags_end" members may be used to refer to the
   57  * beginning and end, respectively, of the bitfield (and things like
   58  * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
   59  * fields should be annotated:
   60  *
   61  *     struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
   62  *     kmemcheck_annotate_bitfield(a, flags);
   63  */
   64 #define kmemcheck_bitfield_begin(name)	\
   65 	int name##_begin[0];
   66 
   67 #define kmemcheck_bitfield_end(name)	\
   68 	int name##_end[0];
   69 
   70 #define kmemcheck_annotate_bitfield(ptr, name)				\
   71 	do {								\
   72 		int _n;							\
   73 									\
   74 		if (!ptr)						\
   75 			break;						\
   76 									\
   77 		_n = (long) &((ptr)->name##_end)			\
   78 			- (long) &((ptr)->name##_begin);		\
   79 		BUILD_BUG_ON(_n < 0);					\
   80 									\
   81 		kmemcheck_mark_initialized(&((ptr)->name##_begin), _n);	\
   82 	} while (0)
   83 
   84 #define kmemcheck_annotate_variable(var)				\
   85 	do {								\
   86 		kmemcheck_mark_initialized(&(var), sizeof(var));	\
   87 	} while (0)							\
   88 
   89 #else
   90 #define kmemcheck_enabled 0
   91 
   92 static inline void
   93 kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
   94 {
   95 }
   96 
   97 static inline void
   98 kmemcheck_free_shadow(struct page *page, int order)
   99 {
  100 }
  101 
  102 static inline void
  103 kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
  104 		     size_t size)
  105 {
  106 }
  107 
  108 static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
  109 				       size_t size)
  110 {
  111 }
  112 
  113 static inline void kmemcheck_pagealloc_alloc(struct page *p,
  114 	unsigned int order, gfp_t gfpflags)
  115 {
  116 }
  117 
  118 static inline bool kmemcheck_page_is_tracked(struct page *p)
  119 {
  120 	return false;
  121 }
  122 
  123 static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
  124 {
  125 }
  126 
  127 static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
  128 {
  129 }
  130 
  131 static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
  132 {
  133 }
  134 
  135 static inline void kmemcheck_mark_freed(void *address, unsigned int n)
  136 {
  137 }
  138 
  139 static inline void kmemcheck_mark_unallocated_pages(struct page *p,
  140 						    unsigned int n)
  141 {
  142 }
  143 
  144 static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
  145 						      unsigned int n)
  146 {
  147 }
  148 
  149 static inline void kmemcheck_mark_initialized_pages(struct page *p,
  150 						    unsigned int n)
  151 {
  152 }
  153 
  154 static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
  155 {
  156 	return true;
  157 }
  158 
  159 #define kmemcheck_bitfield_begin(name)
  160 #define kmemcheck_bitfield_end(name)
  161 #define kmemcheck_annotate_bitfield(ptr, name)	\
  162 	do {					\
  163 	} while (0)
  164 
  165 #define kmemcheck_annotate_variable(var)	\
  166 	do {					\
  167 	} while (0)
  168 
  169 #endif /* CONFIG_KMEMCHECK */
  170 
  171 #endif /* LINUX_KMEMCHECK_H */                 1 /*
    2  * kobject.h - generic kernel object infrastructure.
    3  *
    4  * Copyright (c) 2002-2003 Patrick Mochel
    5  * Copyright (c) 2002-2003 Open Source Development Labs
    6  * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com>
    7  * Copyright (c) 2006-2008 Novell Inc.
    8  *
    9  * This file is released under the GPLv2.
   10  *
   11  * Please read Documentation/kobject.txt before using the kobject
   12  * interface, ESPECIALLY the parts about reference counts and object
   13  * destructors.
   14  */
   15 
   16 #ifndef _KOBJECT_H_
   17 #define _KOBJECT_H_
   18 
   19 #include <linux/types.h>
   20 #include <linux/list.h>
   21 #include <linux/sysfs.h>
   22 #include <linux/compiler.h>
   23 #include <linux/spinlock.h>
   24 #include <linux/kref.h>
   25 #include <linux/kobject_ns.h>
   26 #include <linux/kernel.h>
   27 #include <linux/wait.h>
   28 #include <linux/atomic.h>
   29 #include <linux/workqueue.h>
   30 
   31 #define UEVENT_HELPER_PATH_LEN		256
   32 #define UEVENT_NUM_ENVP			32	/* number of env pointers */
   33 #define UEVENT_BUFFER_SIZE		2048	/* buffer for the variables */
   34 
   35 #ifdef CONFIG_UEVENT_HELPER
   36 /* path to the userspace helper executed on an event */
   37 extern char uevent_helper[];
   38 #endif
   39 
   40 /* counter to tag the uevent, read only except for the kobject core */
   41 extern u64 uevent_seqnum;
   42 
   43 /*
   44  * The actions here must match the index to the string array
   45  * in lib/kobject_uevent.c
   46  *
   47  * Do not add new actions here without checking with the driver-core
   48  * maintainers. Action strings are not meant to express subsystem
   49  * or device specific properties. In most cases you want to send a
   50  * kobject_uevent_env(kobj, KOBJ_CHANGE, env) with additional event
   51  * specific variables added to the event environment.
   52  */
   53 enum kobject_action {
   54 	KOBJ_ADD,
   55 	KOBJ_REMOVE,
   56 	KOBJ_CHANGE,
   57 	KOBJ_MOVE,
   58 	KOBJ_ONLINE,
   59 	KOBJ_OFFLINE,
   60 	KOBJ_MAX
   61 };
   62 
   63 struct kobject {
   64 	const char		*name;
   65 	struct list_head	entry;
   66 	struct kobject		*parent;
   67 	struct kset		*kset;
   68 	struct kobj_type	*ktype;
   69 	struct kernfs_node	*sd; /* sysfs directory entry */
   70 	struct kref		kref;
   71 #ifdef CONFIG_DEBUG_KOBJECT_RELEASE
   72 	struct delayed_work	release;
   73 #endif
   74 	unsigned int state_initialized:1;
   75 	unsigned int state_in_sysfs:1;
   76 	unsigned int state_add_uevent_sent:1;
   77 	unsigned int state_remove_uevent_sent:1;
   78 	unsigned int uevent_suppress:1;
   79 };
   80 
   81 extern __printf(2, 3)
   82 int kobject_set_name(struct kobject *kobj, const char *name, ...);
   83 extern __printf(2, 0)
   84 int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
   85 			   va_list vargs);
   86 
   87 static inline const char *kobject_name(const struct kobject *kobj)
   88 {
   89 	return kobj->name;
   90 }
   91 
   92 extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype);
   93 extern __printf(3, 4) __must_check
   94 int kobject_add(struct kobject *kobj, struct kobject *parent,
   95 		const char *fmt, ...);
   96 extern __printf(4, 5) __must_check
   97 int kobject_init_and_add(struct kobject *kobj,
   98 			 struct kobj_type *ktype, struct kobject *parent,
   99 			 const char *fmt, ...);
  100 
  101 extern void kobject_del(struct kobject *kobj);
  102 
  103 extern struct kobject * __must_check kobject_create(void);
  104 extern struct kobject * __must_check kobject_create_and_add(const char *name,
  105 						struct kobject *parent);
  106 
  107 extern int __must_check kobject_rename(struct kobject *, const char *new_name);
  108 extern int __must_check kobject_move(struct kobject *, struct kobject *);
  109 
  110 extern struct kobject *kobject_get(struct kobject *kobj);
  111 extern void kobject_put(struct kobject *kobj);
  112 
  113 extern const void *kobject_namespace(struct kobject *kobj);
  114 extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
  115 
  116 struct kobj_type {
  117 	void (*release)(struct kobject *kobj);
  118 	const struct sysfs_ops *sysfs_ops;
  119 	struct attribute **default_attrs;
  120 	const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
  121 	const void *(*namespace)(struct kobject *kobj);
  122 };
  123 
  124 struct kobj_uevent_env {
  125 	char *argv[3];
  126 	char *envp[UEVENT_NUM_ENVP];
  127 	int envp_idx;
  128 	char buf[UEVENT_BUFFER_SIZE];
  129 	int buflen;
  130 };
  131 
  132 struct kset_uevent_ops {
  133 	int (* const filter)(struct kset *kset, struct kobject *kobj);
  134 	const char *(* const name)(struct kset *kset, struct kobject *kobj);
  135 	int (* const uevent)(struct kset *kset, struct kobject *kobj,
  136 		      struct kobj_uevent_env *env);
  137 };
  138 
  139 struct kobj_attribute {
  140 	struct attribute attr;
  141 	ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
  142 			char *buf);
  143 	ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
  144 			 const char *buf, size_t count);
  145 };
  146 
  147 extern const struct sysfs_ops kobj_sysfs_ops;
  148 
  149 struct sock;
  150 
  151 /**
  152  * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
  153  *
  154  * A kset defines a group of kobjects.  They can be individually
  155  * different "types" but overall these kobjects all want to be grouped
  156  * together and operated on in the same manner.  ksets are used to
  157  * define the attribute callbacks and other common events that happen to
  158  * a kobject.
  159  *
  160  * @list: the list of all kobjects for this kset
  161  * @list_lock: a lock for iterating over the kobjects
  162  * @kobj: the embedded kobject for this kset (recursion, isn't it fun...)
  163  * @uevent_ops: the set of uevent operations for this kset.  These are
  164  * called whenever a kobject has something happen to it so that the kset
  165  * can add new environment variables, or filter out the uevents if so
  166  * desired.
  167  */
  168 struct kset {
  169 	struct list_head list;
  170 	spinlock_t list_lock;
  171 	struct kobject kobj;
  172 	const struct kset_uevent_ops *uevent_ops;
  173 };
  174 
  175 extern void kset_init(struct kset *kset);
  176 extern int __must_check kset_register(struct kset *kset);
  177 extern void kset_unregister(struct kset *kset);
  178 extern struct kset * __must_check kset_create_and_add(const char *name,
  179 						const struct kset_uevent_ops *u,
  180 						struct kobject *parent_kobj);
  181 
  182 static inline struct kset *to_kset(struct kobject *kobj)
  183 {
  184 	return kobj ? container_of(kobj, struct kset, kobj) : NULL;
  185 }
  186 
  187 static inline struct kset *kset_get(struct kset *k)
  188 {
  189 	return k ? to_kset(kobject_get(&k->kobj)) : NULL;
  190 }
  191 
  192 static inline void kset_put(struct kset *k)
  193 {
  194 	kobject_put(&k->kobj);
  195 }
  196 
  197 static inline struct kobj_type *get_ktype(struct kobject *kobj)
  198 {
  199 	return kobj->ktype;
  200 }
  201 
  202 extern struct kobject *kset_find_obj(struct kset *, const char *);
  203 
  204 /* The global /sys/kernel/ kobject for people to chain off of */
  205 extern struct kobject *kernel_kobj;
  206 /* The global /sys/kernel/mm/ kobject for people to chain off of */
  207 extern struct kobject *mm_kobj;
  208 /* The global /sys/hypervisor/ kobject for people to chain off of */
  209 extern struct kobject *hypervisor_kobj;
  210 /* The global /sys/power/ kobject for people to chain off of */
  211 extern struct kobject *power_kobj;
  212 /* The global /sys/firmware/ kobject for people to chain off of */
  213 extern struct kobject *firmware_kobj;
  214 
  215 int kobject_uevent(struct kobject *kobj, enum kobject_action action);
  216 int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
  217 			char *envp[]);
  218 
  219 __printf(2, 3)
  220 int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...);
  221 
  222 int kobject_action_type(const char *buf, size_t count,
  223 			enum kobject_action *type);
  224 
  225 #endif /* _KOBJECT_H_ */                 1 /*
    2  *  linux/include/linux/mmc/host.h
    3  *
    4  * This program is free software; you can redistribute it and/or modify
    5  * it under the terms of the GNU General Public License version 2 as
    6  * published by the Free Software Foundation.
    7  *
    8  *  Host driver specific definitions.
    9  */
   10 #ifndef LINUX_MMC_HOST_H
   11 #define LINUX_MMC_HOST_H
   12 
   13 #include <linux/leds.h>
   14 #include <linux/mutex.h>
   15 #include <linux/timer.h>
   16 #include <linux/sched.h>
   17 #include <linux/device.h>
   18 #include <linux/fault-inject.h>
   19 
   20 #include <linux/mmc/core.h>
   21 #include <linux/mmc/card.h>
   22 #include <linux/mmc/mmc.h>
   23 #include <linux/mmc/pm.h>
   24 
   25 struct mmc_ios {
   26 	unsigned int	clock;			/* clock rate */
   27 	unsigned short	vdd;
   28 
   29 /* vdd stores the bit number of the selected voltage range from below. */
   30 
   31 	unsigned char	bus_mode;		/* command output mode */
   32 
   33 #define MMC_BUSMODE_OPENDRAIN	1
   34 #define MMC_BUSMODE_PUSHPULL	2
   35 
   36 	unsigned char	chip_select;		/* SPI chip select */
   37 
   38 #define MMC_CS_DONTCARE		0
   39 #define MMC_CS_HIGH		1
   40 #define MMC_CS_LOW		2
   41 
   42 	unsigned char	power_mode;		/* power supply mode */
   43 
   44 #define MMC_POWER_OFF		0
   45 #define MMC_POWER_UP		1
   46 #define MMC_POWER_ON		2
   47 #define MMC_POWER_UNDEFINED	3
   48 
   49 	unsigned char	bus_width;		/* data bus width */
   50 
   51 #define MMC_BUS_WIDTH_1		0
   52 #define MMC_BUS_WIDTH_4		2
   53 #define MMC_BUS_WIDTH_8		3
   54 
   55 	unsigned char	timing;			/* timing specification used */
   56 
   57 #define MMC_TIMING_LEGACY	0
   58 #define MMC_TIMING_MMC_HS	1
   59 #define MMC_TIMING_SD_HS	2
   60 #define MMC_TIMING_UHS_SDR12	3
   61 #define MMC_TIMING_UHS_SDR25	4
   62 #define MMC_TIMING_UHS_SDR50	5
   63 #define MMC_TIMING_UHS_SDR104	6
   64 #define MMC_TIMING_UHS_DDR50	7
   65 #define MMC_TIMING_MMC_DDR52	8
   66 #define MMC_TIMING_MMC_HS200	9
   67 #define MMC_TIMING_MMC_HS400	10
   68 
   69 	unsigned char	signal_voltage;		/* signalling voltage (1.8V or 3.3V) */
   70 
   71 #define MMC_SIGNAL_VOLTAGE_330	0
   72 #define MMC_SIGNAL_VOLTAGE_180	1
   73 #define MMC_SIGNAL_VOLTAGE_120	2
   74 
   75 	unsigned char	drv_type;		/* driver type (A, B, C, D) */
   76 
   77 #define MMC_SET_DRIVER_TYPE_B	0
   78 #define MMC_SET_DRIVER_TYPE_A	1
   79 #define MMC_SET_DRIVER_TYPE_C	2
   80 #define MMC_SET_DRIVER_TYPE_D	3
   81 
   82 	bool enhanced_strobe;			/* hs400es selection */
   83 };
   84 
   85 struct mmc_host_ops {
   86 	/*
   87 	 * It is optional for the host to implement pre_req and post_req in
   88 	 * order to support double buffering of requests (prepare one
   89 	 * request while another request is active).
   90 	 * pre_req() must always be followed by a post_req().
   91 	 * To undo a call made to pre_req(), call post_req() with
   92 	 * a nonzero err condition.
   93 	 */
   94 	void	(*post_req)(struct mmc_host *host, struct mmc_request *req,
   95 			    int err);
   96 	void	(*pre_req)(struct mmc_host *host, struct mmc_request *req,
   97 			   bool is_first_req);
   98 	void	(*request)(struct mmc_host *host, struct mmc_request *req);
   99 
  100 	/*
  101 	 * Avoid calling the next three functions too often or in a "fast
  102 	 * path", since underlaying controller might implement them in an
  103 	 * expensive and/or slow way. Also note that these functions might
  104 	 * sleep, so don't call them in the atomic contexts!
  105 	 */
  106 
  107 	/*
  108 	 * Notes to the set_ios callback:
  109 	 * ios->clock might be 0. For some controllers, setting 0Hz
  110 	 * as any other frequency works. However, some controllers
  111 	 * explicitly need to disable the clock. Otherwise e.g. voltage
  112 	 * switching might fail because the SDCLK is not really quiet.
  113 	 */
  114 	void	(*set_ios)(struct mmc_host *host, struct mmc_ios *ios);
  115 
  116 	/*
  117 	 * Return values for the get_ro callback should be:
  118 	 *   0 for a read/write card
  119 	 *   1 for a read-only card
  120 	 *   -ENOSYS when not supported (equal to NULL callback)
  121 	 *   or a negative errno value when something bad happened
  122 	 */
  123 	int	(*get_ro)(struct mmc_host *host);
  124 
  125 	/*
  126 	 * Return values for the get_cd callback should be:
  127 	 *   0 for a absent card
  128 	 *   1 for a present card
  129 	 *   -ENOSYS when not supported (equal to NULL callback)
  130 	 *   or a negative errno value when something bad happened
  131 	 */
  132 	int	(*get_cd)(struct mmc_host *host);
  133 
  134 	void	(*enable_sdio_irq)(struct mmc_host *host, int enable);
  135 
  136 	/* optional callback for HC quirks */
  137 	void	(*init_card)(struct mmc_host *host, struct mmc_card *card);
  138 
  139 	int	(*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios);
  140 
  141 	/* Check if the card is pulling dat[0:3] low */
  142 	int	(*card_busy)(struct mmc_host *host);
  143 
  144 	/* The tuning command opcode value is different for SD and eMMC cards */
  145 	int	(*execute_tuning)(struct mmc_host *host, u32 opcode);
  146 
  147 	/* Prepare HS400 target operating frequency depending host driver */
  148 	int	(*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
  149 	/* Prepare enhanced strobe depending host driver */
  150 	void	(*hs400_enhanced_strobe)(struct mmc_host *host,
  151 					 struct mmc_ios *ios);
  152 	int	(*select_drive_strength)(struct mmc_card *card,
  153 					 unsigned int max_dtr, int host_drv,
  154 					 int card_drv, int *drv_type);
  155 	void	(*hw_reset)(struct mmc_host *host);
  156 	void	(*card_event)(struct mmc_host *host);
  157 
  158 	/*
  159 	 * Optional callback to support controllers with HW issues for multiple
  160 	 * I/O. Returns the number of supported blocks for the request.
  161 	 */
  162 	int	(*multi_io_quirk)(struct mmc_card *card,
  163 				  unsigned int direction, int blk_size);
  164 };
  165 
  166 struct mmc_card;
  167 struct device;
  168 
  169 struct mmc_async_req {
  170 	/* active mmc request */
  171 	struct mmc_request	*mrq;
  172 	/*
  173 	 * Check error status of completed mmc request.
  174 	 * Returns 0 if success otherwise non zero.
  175 	 */
  176 	int (*err_check) (struct mmc_card *, struct mmc_async_req *);
  177 };
  178 
  179 /**
  180  * struct mmc_slot - MMC slot functions
  181  *
  182  * @cd_irq:		MMC/SD-card slot hotplug detection IRQ or -EINVAL
  183  * @handler_priv:	MMC/SD-card slot context
  184  *
  185  * Some MMC/SD host controllers implement slot-functions like card and
  186  * write-protect detection natively. However, a large number of controllers
  187  * leave these functions to the CPU. This struct provides a hook to attach
  188  * such slot-function drivers.
  189  */
  190 struct mmc_slot {
  191 	int cd_irq;
  192 	void *handler_priv;
  193 };
  194 
  195 /**
  196  * mmc_context_info - synchronization details for mmc context
  197  * @is_done_rcv		wake up reason was done request
  198  * @is_new_req		wake up reason was new request
  199  * @is_waiting_last_req	mmc context waiting for single running request
  200  * @wait		wait queue
  201  * @lock		lock to protect data fields
  202  */
  203 struct mmc_context_info {
  204 	bool			is_done_rcv;
  205 	bool			is_new_req;
  206 	bool			is_waiting_last_req;
  207 	wait_queue_head_t	wait;
  208 	spinlock_t		lock;
  209 };
  210 
  211 struct regulator;
  212 struct mmc_pwrseq;
  213 
  214 struct mmc_supply {
  215 	struct regulator *vmmc;		/* Card power supply */
  216 	struct regulator *vqmmc;	/* Optional Vccq supply */
  217 };
  218 
  219 struct mmc_host {
  220 	struct device		*parent;
  221 	struct device		class_dev;
  222 	int			index;
  223 	const struct mmc_host_ops *ops;
  224 	struct mmc_pwrseq	*pwrseq;
  225 	unsigned int		f_min;
  226 	unsigned int		f_max;
  227 	unsigned int		f_init;
  228 	u32			ocr_avail;
  229 	u32			ocr_avail_sdio;	/* SDIO-specific OCR */
  230 	u32			ocr_avail_sd;	/* SD-specific OCR */
  231 	u32			ocr_avail_mmc;	/* MMC-specific OCR */
  232 #ifdef CONFIG_PM_SLEEP
  233 	struct notifier_block	pm_notify;
  234 #endif
  235 	u32			max_current_330;
  236 	u32			max_current_300;
  237 	u32			max_current_180;
  238 
  239 #define MMC_VDD_165_195		0x00000080	/* VDD voltage 1.65 - 1.95 */
  240 #define MMC_VDD_20_21		0x00000100	/* VDD voltage 2.0 ~ 2.1 */
  241 #define MMC_VDD_21_22		0x00000200	/* VDD voltage 2.1 ~ 2.2 */
  242 #define MMC_VDD_22_23		0x00000400	/* VDD voltage 2.2 ~ 2.3 */
  243 #define MMC_VDD_23_24		0x00000800	/* VDD voltage 2.3 ~ 2.4 */
  244 #define MMC_VDD_24_25		0x00001000	/* VDD voltage 2.4 ~ 2.5 */
  245 #define MMC_VDD_25_26		0x00002000	/* VDD voltage 2.5 ~ 2.6 */
  246 #define MMC_VDD_26_27		0x00004000	/* VDD voltage 2.6 ~ 2.7 */
  247 #define MMC_VDD_27_28		0x00008000	/* VDD voltage 2.7 ~ 2.8 */
  248 #define MMC_VDD_28_29		0x00010000	/* VDD voltage 2.8 ~ 2.9 */
  249 #define MMC_VDD_29_30		0x00020000	/* VDD voltage 2.9 ~ 3.0 */
  250 #define MMC_VDD_30_31		0x00040000	/* VDD voltage 3.0 ~ 3.1 */
  251 #define MMC_VDD_31_32		0x00080000	/* VDD voltage 3.1 ~ 3.2 */
  252 #define MMC_VDD_32_33		0x00100000	/* VDD voltage 3.2 ~ 3.3 */
  253 #define MMC_VDD_33_34		0x00200000	/* VDD voltage 3.3 ~ 3.4 */
  254 #define MMC_VDD_34_35		0x00400000	/* VDD voltage 3.4 ~ 3.5 */
  255 #define MMC_VDD_35_36		0x00800000	/* VDD voltage 3.5 ~ 3.6 */
  256 
  257 	u32			caps;		/* Host capabilities */
  258 
  259 #define MMC_CAP_4_BIT_DATA	(1 << 0)	/* Can the host do 4 bit transfers */
  260 #define MMC_CAP_MMC_HIGHSPEED	(1 << 1)	/* Can do MMC high-speed timing */
  261 #define MMC_CAP_SD_HIGHSPEED	(1 << 2)	/* Can do SD high-speed timing */
  262 #define MMC_CAP_SDIO_IRQ	(1 << 3)	/* Can signal pending SDIO IRQs */
  263 #define MMC_CAP_SPI		(1 << 4)	/* Talks only SPI protocols */
  264 #define MMC_CAP_NEEDS_POLL	(1 << 5)	/* Needs polling for card-detection */
  265 #define MMC_CAP_8_BIT_DATA	(1 << 6)	/* Can the host do 8 bit transfers */
  266 #define MMC_CAP_AGGRESSIVE_PM	(1 << 7)	/* Suspend (e)MMC/SD at idle  */
  267 #define MMC_CAP_NONREMOVABLE	(1 << 8)	/* Nonremovable e.g. eMMC */
  268 #define MMC_CAP_WAIT_WHILE_BUSY	(1 << 9)	/* Waits while card is busy */
  269 #define MMC_CAP_ERASE		(1 << 10)	/* Allow erase/trim commands */
  270 #define MMC_CAP_1_8V_DDR	(1 << 11)	/* can support */
  271 						/* DDR mode at 1.8V */
  272 #define MMC_CAP_1_2V_DDR	(1 << 12)	/* can support */
  273 						/* DDR mode at 1.2V */
  274 #define MMC_CAP_POWER_OFF_CARD	(1 << 13)	/* Can power off after boot */
  275 #define MMC_CAP_BUS_WIDTH_TEST	(1 << 14)	/* CMD14/CMD19 bus width ok */
  276 #define MMC_CAP_UHS_SDR12	(1 << 15)	/* Host supports UHS SDR12 mode */
  277 #define MMC_CAP_UHS_SDR25	(1 << 16)	/* Host supports UHS SDR25 mode */
  278 #define MMC_CAP_UHS_SDR50	(1 << 17)	/* Host supports UHS SDR50 mode */
  279 #define MMC_CAP_UHS_SDR104	(1 << 18)	/* Host supports UHS SDR104 mode */
  280 #define MMC_CAP_UHS_DDR50	(1 << 19)	/* Host supports UHS DDR50 mode */
  281 #define MMC_CAP_DRIVER_TYPE_A	(1 << 23)	/* Host supports Driver Type A */
  282 #define MMC_CAP_DRIVER_TYPE_C	(1 << 24)	/* Host supports Driver Type C */
  283 #define MMC_CAP_DRIVER_TYPE_D	(1 << 25)	/* Host supports Driver Type D */
  284 #define MMC_CAP_CMD_DURING_TFR	(1 << 29)	/* Commands during data transfer */
  285 #define MMC_CAP_CMD23		(1 << 30)	/* CMD23 supported. */
  286 #define MMC_CAP_HW_RESET	(1 << 31)	/* Hardware reset */
  287 
  288 	u32			caps2;		/* More host capabilities */
  289 
  290 #define MMC_CAP2_BOOTPART_NOACC	(1 << 0)	/* Boot partition no access */
  291 #define MMC_CAP2_FULL_PWR_CYCLE	(1 << 2)	/* Can do full power cycle */
  292 #define MMC_CAP2_HS200_1_8V_SDR	(1 << 5)        /* can support */
  293 #define MMC_CAP2_HS200_1_2V_SDR	(1 << 6)        /* can support */
  294 #define MMC_CAP2_HS200		(MMC_CAP2_HS200_1_8V_SDR | \
  295 				 MMC_CAP2_HS200_1_2V_SDR)
  296 #define MMC_CAP2_HC_ERASE_SZ	(1 << 9)	/* High-capacity erase size */
  297 #define MMC_CAP2_CD_ACTIVE_HIGH	(1 << 10)	/* Card-detect signal active high */
  298 #define MMC_CAP2_RO_ACTIVE_HIGH	(1 << 11)	/* Write-protect signal active high */
  299 #define MMC_CAP2_PACKED_RD	(1 << 12)	/* Allow packed read */
  300 #define MMC_CAP2_PACKED_WR	(1 << 13)	/* Allow packed write */
  301 #define MMC_CAP2_PACKED_CMD	(MMC_CAP2_PACKED_RD | \
  302 				 MMC_CAP2_PACKED_WR)
  303 #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14)	/* Don't power up before scan */
  304 #define MMC_CAP2_HS400_1_8V	(1 << 15)	/* Can support HS400 1.8V */
  305 #define MMC_CAP2_HS400_1_2V	(1 << 16)	/* Can support HS400 1.2V */
  306 #define MMC_CAP2_HS400		(MMC_CAP2_HS400_1_8V | \
  307 				 MMC_CAP2_HS400_1_2V)
  308 #define MMC_CAP2_HSX00_1_2V	(MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V)
  309 #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
  310 #define MMC_CAP2_NO_WRITE_PROTECT (1 << 18)	/* No physical write protect pin, assume that card is always read-write */
  311 #define MMC_CAP2_NO_SDIO	(1 << 19)	/* Do not send SDIO commands during initialization */
  312 #define MMC_CAP2_HS400_ES	(1 << 20)	/* Host supports enhanced strobe */
  313 #define MMC_CAP2_NO_SD		(1 << 21)	/* Do not send SD commands during initialization */
  314 #define MMC_CAP2_NO_MMC		(1 << 22)	/* Do not send (e)MMC commands during initialization */
  315 
  316 	mmc_pm_flag_t		pm_caps;	/* supported pm features */
  317 
  318 	/* host specific block data */
  319 	unsigned int		max_seg_size;	/* see blk_queue_max_segment_size */
  320 	unsigned short		max_segs;	/* see blk_queue_max_segments */
  321 	unsigned short		unused;
  322 	unsigned int		max_req_size;	/* maximum number of bytes in one req */
  323 	unsigned int		max_blk_size;	/* maximum size of one mmc block */
  324 	unsigned int		max_blk_count;	/* maximum number of blocks in one req */
  325 	unsigned int		max_busy_timeout; /* max busy timeout in ms */
  326 
  327 	/* private data */
  328 	spinlock_t		lock;		/* lock for claim and bus ops */
  329 
  330 	struct mmc_ios		ios;		/* current io bus settings */
  331 
  332 	/* group bitfields together to minimize padding */
  333 	unsigned int		use_spi_crc:1;
  334 	unsigned int		claimed:1;	/* host exclusively claimed */
  335 	unsigned int		bus_dead:1;	/* bus has been released */
  336 #ifdef CONFIG_MMC_DEBUG
  337 	unsigned int		removed:1;	/* host is being removed */
  338 #endif
  339 	unsigned int		can_retune:1;	/* re-tuning can be used */
  340 	unsigned int		doing_retune:1;	/* re-tuning in progress */
  341 	unsigned int		retune_now:1;	/* do re-tuning at next req */
  342 	unsigned int		retune_paused:1; /* re-tuning is temporarily disabled */
  343 
  344 	int			rescan_disable;	/* disable card detection */
  345 	int			rescan_entered;	/* used with nonremovable devices */
  346 
  347 	int			need_retune;	/* re-tuning is needed */
  348 	int			hold_retune;	/* hold off re-tuning */
  349 	unsigned int		retune_period;	/* re-tuning period in secs */
  350 	struct timer_list	retune_timer;	/* for periodic re-tuning */
  351 
  352 	bool			trigger_card_event; /* card_event necessary */
  353 
  354 	struct mmc_card		*card;		/* device attached to this host */
  355 
  356 	wait_queue_head_t	wq;
  357 	struct task_struct	*claimer;	/* task that has host claimed */
  358 	int			claim_cnt;	/* "claim" nesting count */
  359 
  360 	struct delayed_work	detect;
  361 	int			detect_change;	/* card detect flag */
  362 	struct mmc_slot		slot;
  363 
  364 	const struct mmc_bus_ops *bus_ops;	/* current bus driver */
  365 	unsigned int		bus_refs;	/* reference counter */
  366 
  367 	unsigned int		sdio_irqs;
  368 	struct task_struct	*sdio_irq_thread;
  369 	bool			sdio_irq_pending;
  370 	atomic_t		sdio_irq_thread_abort;
  371 
  372 	mmc_pm_flag_t		pm_flags;	/* requested pm features */
  373 
  374 	struct led_trigger	*led;		/* activity led */
  375 
  376 #ifdef CONFIG_REGULATOR
  377 	bool			regulator_enabled; /* regulator state */
  378 #endif
  379 	struct mmc_supply	supply;
  380 
  381 	struct dentry		*debugfs_root;
  382 
  383 	struct mmc_async_req	*areq;		/* active async req */
  384 	struct mmc_context_info	context_info;	/* async synchronization info */
  385 
  386 	/* Ongoing data transfer that allows commands during transfer */
  387 	struct mmc_request	*ongoing_mrq;
  388 
  389 #ifdef CONFIG_FAIL_MMC_REQUEST
  390 	struct fault_attr	fail_mmc_request;
  391 #endif
  392 
  393 	unsigned int		actual_clock;	/* Actual HC clock rate */
  394 
  395 	unsigned int		slotno;	/* used for sdio acpi binding */
  396 
  397 	int			dsr_req;	/* DSR value is valid */
  398 	u32			dsr;	/* optional driver stage (DSR) value */
  399 
  400 	unsigned long		private[0] ____cacheline_aligned;
  401 };
  402 
  403 struct mmc_host *mmc_alloc_host(int extra, struct device *);
  404 int mmc_add_host(struct mmc_host *);
  405 void mmc_remove_host(struct mmc_host *);
  406 void mmc_free_host(struct mmc_host *);
  407 int mmc_of_parse(struct mmc_host *host);
  408 
  409 static inline void *mmc_priv(struct mmc_host *host)
  410 {
  411 	return (void *)host->private;
  412 }
  413 
  414 #define mmc_host_is_spi(host)	((host)->caps & MMC_CAP_SPI)
  415 
  416 #define mmc_dev(x)	((x)->parent)
  417 #define mmc_classdev(x)	(&(x)->class_dev)
  418 #define mmc_hostname(x)	(dev_name(&(x)->class_dev))
  419 
  420 int mmc_power_save_host(struct mmc_host *host);
  421 int mmc_power_restore_host(struct mmc_host *host);
  422 
  423 void mmc_detect_change(struct mmc_host *, unsigned long delay);
  424 void mmc_request_done(struct mmc_host *, struct mmc_request *);
  425 void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
  426 
  427 static inline void mmc_signal_sdio_irq(struct mmc_host *host)
  428 {
  429 	host->ops->enable_sdio_irq(host, 0);
  430 	host->sdio_irq_pending = true;
  431 	if (host->sdio_irq_thread)
  432 		wake_up_process(host->sdio_irq_thread);
  433 }
  434 
  435 void sdio_run_irqs(struct mmc_host *host);
  436 
  437 #ifdef CONFIG_REGULATOR
  438 int mmc_regulator_get_ocrmask(struct regulator *supply);
  439 int mmc_regulator_set_ocr(struct mmc_host *mmc,
  440 			struct regulator *supply,
  441 			unsigned short vdd_bit);
  442 int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios);
  443 #else
  444 static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
  445 {
  446 	return 0;
  447 }
  448 
  449 static inline int mmc_regulator_set_ocr(struct mmc_host *mmc,
  450 				 struct regulator *supply,
  451 				 unsigned short vdd_bit)
  452 {
  453 	return 0;
  454 }
  455 
  456 static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
  457 					  struct mmc_ios *ios)
  458 {
  459 	return -EINVAL;
  460 }
  461 #endif
  462 
  463 int mmc_regulator_get_supply(struct mmc_host *mmc);
  464 
  465 static inline int mmc_card_is_removable(struct mmc_host *host)
  466 {
  467 	return !(host->caps & MMC_CAP_NONREMOVABLE);
  468 }
  469 
  470 static inline int mmc_card_keep_power(struct mmc_host *host)
  471 {
  472 	return host->pm_flags & MMC_PM_KEEP_POWER;
  473 }
  474 
  475 static inline int mmc_card_wake_sdio_irq(struct mmc_host *host)
  476 {
  477 	return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ;
  478 }
  479 
  480 static inline int mmc_host_cmd23(struct mmc_host *host)
  481 {
  482 	return host->caps & MMC_CAP_CMD23;
  483 }
  484 
  485 static inline int mmc_boot_partition_access(struct mmc_host *host)
  486 {
  487 	return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
  488 }
  489 
  490 static inline int mmc_host_uhs(struct mmc_host *host)
  491 {
  492 	return host->caps &
  493 		(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
  494 		 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
  495 		 MMC_CAP_UHS_DDR50);
  496 }
  497 
  498 static inline int mmc_host_packed_wr(struct mmc_host *host)
  499 {
  500 	return host->caps2 & MMC_CAP2_PACKED_WR;
  501 }
  502 
  503 static inline int mmc_card_hs(struct mmc_card *card)
  504 {
  505 	return card->host->ios.timing == MMC_TIMING_SD_HS ||
  506 		card->host->ios.timing == MMC_TIMING_MMC_HS;
  507 }
  508 
  509 static inline int mmc_card_uhs(struct mmc_card *card)
  510 {
  511 	return card->host->ios.timing >= MMC_TIMING_UHS_SDR12 &&
  512 		card->host->ios.timing <= MMC_TIMING_UHS_DDR50;
  513 }
  514 
  515 static inline bool mmc_card_hs200(struct mmc_card *card)
  516 {
  517 	return card->host->ios.timing == MMC_TIMING_MMC_HS200;
  518 }
  519 
  520 static inline bool mmc_card_ddr52(struct mmc_card *card)
  521 {
  522 	return card->host->ios.timing == MMC_TIMING_MMC_DDR52;
  523 }
  524 
  525 static inline bool mmc_card_hs400(struct mmc_card *card)
  526 {
  527 	return card->host->ios.timing == MMC_TIMING_MMC_HS400;
  528 }
  529 
  530 static inline bool mmc_card_hs400es(struct mmc_card *card)
  531 {
  532 	return card->host->ios.enhanced_strobe;
  533 }
  534 
  535 void mmc_retune_timer_stop(struct mmc_host *host);
  536 
  537 static inline void mmc_retune_needed(struct mmc_host *host)
  538 {
  539 	if (host->can_retune)
  540 		host->need_retune = 1;
  541 }
  542 
  543 static inline void mmc_retune_recheck(struct mmc_host *host)
  544 {
  545 	if (host->hold_retune <= 1)
  546 		host->retune_now = 1;
  547 }
  548 
  549 void mmc_retune_pause(struct mmc_host *host);
  550 void mmc_retune_unpause(struct mmc_host *host);
  551 
  552 #endif /* LINUX_MMC_HOST_H */                 1 /*
    2  * Linux Plug and Play Support
    3  * Copyright by Adam Belay <ambx1@neo.rr.com>
    4  * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
    5  *	Bjorn Helgaas <bjorn.helgaas@hp.com>
    6  */
    7 
    8 #ifndef _LINUX_PNP_H
    9 #define _LINUX_PNP_H
   10 
   11 #include <linux/device.h>
   12 #include <linux/list.h>
   13 #include <linux/errno.h>
   14 #include <linux/mod_devicetable.h>
   15 #include <linux/console.h>
   16 
   17 #define PNP_NAME_LEN		50
   18 
   19 struct pnp_protocol;
   20 struct pnp_dev;
   21 
   22 /*
   23  * Resource Management
   24  */
   25 #ifdef CONFIG_PNP
   26 struct resource *pnp_get_resource(struct pnp_dev *dev, unsigned long type,
   27 				unsigned int num);
   28 #else
   29 static inline struct resource *pnp_get_resource(struct pnp_dev *dev,
   30 			unsigned long type, unsigned int num)
   31 {
   32 	return NULL;
   33 }
   34 #endif
   35 
   36 static inline int pnp_resource_valid(struct resource *res)
   37 {
   38 	if (res)
   39 		return 1;
   40 	return 0;
   41 }
   42 
   43 static inline int pnp_resource_enabled(struct resource *res)
   44 {
   45 	if (res && !(res->flags & IORESOURCE_DISABLED))
   46 		return 1;
   47 	return 0;
   48 }
   49 
   50 static inline resource_size_t pnp_resource_len(struct resource *res)
   51 {
   52 	if (res->start == 0 && res->end == 0)
   53 		return 0;
   54 	return resource_size(res);
   55 }
   56 
   57 
   58 static inline resource_size_t pnp_port_start(struct pnp_dev *dev,
   59 					     unsigned int bar)
   60 {
   61 	struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
   62 
   63 	if (pnp_resource_valid(res))
   64 		return res->start;
   65 	return 0;
   66 }
   67 
   68 static inline resource_size_t pnp_port_end(struct pnp_dev *dev,
   69 					   unsigned int bar)
   70 {
   71 	struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
   72 
   73 	if (pnp_resource_valid(res))
   74 		return res->end;
   75 	return 0;
   76 }
   77 
   78 static inline unsigned long pnp_port_flags(struct pnp_dev *dev,
   79 					   unsigned int bar)
   80 {
   81 	struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
   82 
   83 	if (pnp_resource_valid(res))
   84 		return res->flags;
   85 	return IORESOURCE_IO | IORESOURCE_AUTO;
   86 }
   87 
   88 static inline int pnp_port_valid(struct pnp_dev *dev, unsigned int bar)
   89 {
   90 	return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_IO, bar));
   91 }
   92 
   93 static inline resource_size_t pnp_port_len(struct pnp_dev *dev,
   94 					   unsigned int bar)
   95 {
   96 	struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
   97 
   98 	if (pnp_resource_valid(res))
   99 		return pnp_resource_len(res);
  100 	return 0;
  101 }
  102 
  103 
  104 static inline resource_size_t pnp_mem_start(struct pnp_dev *dev,
  105 					    unsigned int bar)
  106 {
  107 	struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
  108 
  109 	if (pnp_resource_valid(res))
  110 		return res->start;
  111 	return 0;
  112 }
  113 
  114 static inline resource_size_t pnp_mem_end(struct pnp_dev *dev,
  115 					  unsigned int bar)
  116 {
  117 	struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
  118 
  119 	if (pnp_resource_valid(res))
  120 		return res->end;
  121 	return 0;
  122 }
  123 
  124 static inline unsigned long pnp_mem_flags(struct pnp_dev *dev, unsigned int bar)
  125 {
  126 	struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
  127 
  128 	if (pnp_resource_valid(res))
  129 		return res->flags;
  130 	return IORESOURCE_MEM | IORESOURCE_AUTO;
  131 }
  132 
  133 static inline int pnp_mem_valid(struct pnp_dev *dev, unsigned int bar)
  134 {
  135 	return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_MEM, bar));
  136 }
  137 
  138 static inline resource_size_t pnp_mem_len(struct pnp_dev *dev,
  139 					  unsigned int bar)
  140 {
  141 	struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
  142 
  143 	if (pnp_resource_valid(res))
  144 		return pnp_resource_len(res);
  145 	return 0;
  146 }
  147 
  148 
  149 static inline resource_size_t pnp_irq(struct pnp_dev *dev, unsigned int bar)
  150 {
  151 	struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar);
  152 
  153 	if (pnp_resource_valid(res))
  154 		return res->start;
  155 	return -1;
  156 }
  157 
  158 static inline unsigned long pnp_irq_flags(struct pnp_dev *dev, unsigned int bar)
  159 {
  160 	struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar);
  161 
  162 	if (pnp_resource_valid(res))
  163 		return res->flags;
  164 	return IORESOURCE_IRQ | IORESOURCE_AUTO;
  165 }
  166 
  167 static inline int pnp_irq_valid(struct pnp_dev *dev, unsigned int bar)
  168 {
  169 	return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_IRQ, bar));
  170 }
  171 
  172 
  173 static inline resource_size_t pnp_dma(struct pnp_dev *dev, unsigned int bar)
  174 {
  175 	struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar);
  176 
  177 	if (pnp_resource_valid(res))
  178 		return res->start;
  179 	return -1;
  180 }
  181 
  182 static inline unsigned long pnp_dma_flags(struct pnp_dev *dev, unsigned int bar)
  183 {
  184 	struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar);
  185 
  186 	if (pnp_resource_valid(res))
  187 		return res->flags;
  188 	return IORESOURCE_DMA | IORESOURCE_AUTO;
  189 }
  190 
  191 static inline int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar)
  192 {
  193 	return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_DMA, bar));
  194 }
  195 
  196 
  197 /*
  198  * Device Management
  199  */
  200 
  201 struct pnp_card {
  202 	struct device dev;		/* Driver Model device interface */
  203 	unsigned char number;		/* used as an index, must be unique */
  204 	struct list_head global_list;	/* node in global list of cards */
  205 	struct list_head protocol_list;	/* node in protocol's list of cards */
  206 	struct list_head devices;	/* devices attached to the card */
  207 
  208 	struct pnp_protocol *protocol;
  209 	struct pnp_id *id;		/* contains supported EISA IDs */
  210 
  211 	char name[PNP_NAME_LEN];	/* contains a human-readable name */
  212 	unsigned char pnpver;		/* Plug & Play version */
  213 	unsigned char productver;	/* product version */
  214 	unsigned int serial;		/* serial number */
  215 	unsigned char checksum;		/* if zero - checksum passed */
  216 	struct proc_dir_entry *procdir;	/* directory entry in /proc/bus/isapnp */
  217 };
  218 
  219 #define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list)
  220 #define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list)
  221 #define to_pnp_card(n) container_of(n, struct pnp_card, dev)
  222 #define pnp_for_each_card(card) \
  223 	for((card) = global_to_pnp_card(pnp_cards.next); \
  224 	(card) != global_to_pnp_card(&pnp_cards); \
  225 	(card) = global_to_pnp_card((card)->global_list.next))
  226 
  227 struct pnp_card_link {
  228 	struct pnp_card *card;
  229 	struct pnp_card_driver *driver;
  230 	void *driver_data;
  231 	pm_message_t pm_state;
  232 };
  233 
  234 static inline void *pnp_get_card_drvdata(struct pnp_card_link *pcard)
  235 {
  236 	return pcard->driver_data;
  237 }
  238 
  239 static inline void pnp_set_card_drvdata(struct pnp_card_link *pcard, void *data)
  240 {
  241 	pcard->driver_data = data;
  242 }
  243 
  244 struct pnp_dev {
  245 	struct device dev;		/* Driver Model device interface */
  246 	u64 dma_mask;
  247 	unsigned int number;		/* used as an index, must be unique */
  248 	int status;
  249 
  250 	struct list_head global_list;	/* node in global list of devices */
  251 	struct list_head protocol_list;	/* node in list of device's protocol */
  252 	struct list_head card_list;	/* node in card's list of devices */
  253 	struct list_head rdev_list;	/* node in cards list of requested devices */
  254 
  255 	struct pnp_protocol *protocol;
  256 	struct pnp_card *card;	/* card the device is attached to, none if NULL */
  257 	struct pnp_driver *driver;
  258 	struct pnp_card_link *card_link;
  259 
  260 	struct pnp_id *id;		/* supported EISA IDs */
  261 
  262 	int active;
  263 	int capabilities;
  264 	unsigned int num_dependent_sets;
  265 	struct list_head resources;
  266 	struct list_head options;
  267 
  268 	char name[PNP_NAME_LEN];	/* contains a human-readable name */
  269 	int flags;			/* used by protocols */
  270 	struct proc_dir_entry *procent;	/* device entry in /proc/bus/isapnp */
  271 	void *data;
  272 };
  273 
  274 #define global_to_pnp_dev(n) list_entry(n, struct pnp_dev, global_list)
  275 #define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list)
  276 #define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list)
  277 #define	to_pnp_dev(n) container_of(n, struct pnp_dev, dev)
  278 #define pnp_for_each_dev(dev) \
  279 	for((dev) = global_to_pnp_dev(pnp_global.next); \
  280 	(dev) != global_to_pnp_dev(&pnp_global); \
  281 	(dev) = global_to_pnp_dev((dev)->global_list.next))
  282 #define card_for_each_dev(card,dev) \
  283 	for((dev) = card_to_pnp_dev((card)->devices.next); \
  284 	(dev) != card_to_pnp_dev(&(card)->devices); \
  285 	(dev) = card_to_pnp_dev((dev)->card_list.next))
  286 #define pnp_dev_name(dev) (dev)->name
  287 
  288 static inline void *pnp_get_drvdata(struct pnp_dev *pdev)
  289 {
  290 	return dev_get_drvdata(&pdev->dev);
  291 }
  292 
  293 static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
  294 {
  295 	dev_set_drvdata(&pdev->dev, data);
  296 }
  297 
  298 struct pnp_fixup {
  299 	char id[7];
  300 	void (*quirk_function) (struct pnp_dev * dev);	/* fixup function */
  301 };
  302 
  303 /* config parameters */
  304 #define PNP_CONFIG_NORMAL	0x0001
  305 #define PNP_CONFIG_FORCE	0x0002	/* disables validity checking */
  306 
  307 /* capabilities */
  308 #define PNP_READ		0x0001
  309 #define PNP_WRITE		0x0002
  310 #define PNP_DISABLE		0x0004
  311 #define PNP_CONFIGURABLE	0x0008
  312 #define PNP_REMOVABLE		0x0010
  313 #define PNP_CONSOLE		0x0020
  314 
  315 #define pnp_can_read(dev)	(((dev)->protocol->get) && \
  316 				 ((dev)->capabilities & PNP_READ))
  317 #define pnp_can_write(dev)	(((dev)->protocol->set) && \
  318 				 ((dev)->capabilities & PNP_WRITE))
  319 #define pnp_can_disable(dev)	(((dev)->protocol->disable) &&		  \
  320 				 ((dev)->capabilities & PNP_DISABLE) &&	  \
  321 				 (!((dev)->capabilities & PNP_CONSOLE) || \
  322 				  console_suspend_enabled))
  323 #define pnp_can_configure(dev)	((!(dev)->active) && \
  324 				 ((dev)->capabilities & PNP_CONFIGURABLE))
  325 #define pnp_can_suspend(dev)	(((dev)->protocol->suspend) &&		  \
  326 				 (!((dev)->capabilities & PNP_CONSOLE) || \
  327 				  console_suspend_enabled))
  328 
  329 
  330 #ifdef CONFIG_ISAPNP
  331 extern struct pnp_protocol isapnp_protocol;
  332 #define pnp_device_is_isapnp(dev) ((dev)->protocol == (&isapnp_protocol))
  333 #else
  334 #define pnp_device_is_isapnp(dev) 0
  335 #endif
  336 extern struct mutex pnp_res_mutex;
  337 
  338 #ifdef CONFIG_PNPBIOS
  339 extern struct pnp_protocol pnpbios_protocol;
  340 extern bool arch_pnpbios_disabled(void);
  341 #define pnp_device_is_pnpbios(dev) ((dev)->protocol == (&pnpbios_protocol))
  342 #else
  343 #define pnp_device_is_pnpbios(dev) 0
  344 #define arch_pnpbios_disabled()	false
  345 #endif
  346 
  347 #ifdef CONFIG_PNPACPI
  348 extern struct pnp_protocol pnpacpi_protocol;
  349 
  350 static inline struct acpi_device *pnp_acpi_device(struct pnp_dev *dev)
  351 {
  352 	if (dev->protocol == &pnpacpi_protocol)
  353 		return dev->data;
  354 	return NULL;
  355 }
  356 #else
  357 #define pnp_acpi_device(dev) 0
  358 #endif
  359 
  360 /* status */
  361 #define PNP_READY		0x0000
  362 #define PNP_ATTACHED		0x0001
  363 #define PNP_BUSY		0x0002
  364 #define PNP_FAULTY		0x0004
  365 
  366 /* isapnp specific macros */
  367 
  368 #define isapnp_card_number(dev)	((dev)->card ? (dev)->card->number : -1)
  369 #define isapnp_csn_number(dev)  ((dev)->number)
  370 
  371 /*
  372  * Driver Management
  373  */
  374 
  375 struct pnp_id {
  376 	char id[PNP_ID_LEN];
  377 	struct pnp_id *next;
  378 };
  379 
  380 struct pnp_driver {
  381 	char *name;
  382 	const struct pnp_device_id *id_table;
  383 	unsigned int flags;
  384 	int (*probe) (struct pnp_dev *dev, const struct pnp_device_id *dev_id);
  385 	void (*remove) (struct pnp_dev *dev);
  386 	void (*shutdown) (struct pnp_dev *dev);
  387 	int (*suspend) (struct pnp_dev *dev, pm_message_t state);
  388 	int (*resume) (struct pnp_dev *dev);
  389 	struct device_driver driver;
  390 };
  391 
  392 #define	to_pnp_driver(drv) container_of(drv, struct pnp_driver, driver)
  393 
  394 struct pnp_card_driver {
  395 	struct list_head global_list;
  396 	char *name;
  397 	const struct pnp_card_device_id *id_table;
  398 	unsigned int flags;
  399 	int (*probe) (struct pnp_card_link *card,
  400 		      const struct pnp_card_device_id *card_id);
  401 	void (*remove) (struct pnp_card_link *card);
  402 	int (*suspend) (struct pnp_card_link *card, pm_message_t state);
  403 	int (*resume) (struct pnp_card_link *card);
  404 	struct pnp_driver link;
  405 };
  406 
  407 #define	to_pnp_card_driver(drv) container_of(drv, struct pnp_card_driver, link)
  408 
  409 /* pnp driver flags */
  410 #define PNP_DRIVER_RES_DO_NOT_CHANGE	0x0001	/* do not change the state of the device */
  411 #define PNP_DRIVER_RES_DISABLE		0x0003	/* ensure the device is disabled */
  412 
  413 /*
  414  * Protocol Management
  415  */
  416 
  417 struct pnp_protocol {
  418 	struct list_head protocol_list;
  419 	char *name;
  420 
  421 	/* resource control functions */
  422 	int (*get) (struct pnp_dev *dev);
  423 	int (*set) (struct pnp_dev *dev);
  424 	int (*disable) (struct pnp_dev *dev);
  425 
  426 	/* protocol specific suspend/resume */
  427 	bool (*can_wakeup) (struct pnp_dev *dev);
  428 	int (*suspend) (struct pnp_dev * dev, pm_message_t state);
  429 	int (*resume) (struct pnp_dev * dev);
  430 
  431 	/* used by pnp layer only (look but don't touch) */
  432 	unsigned char number;	/* protocol number */
  433 	struct device dev;	/* link to driver model */
  434 	struct list_head cards;
  435 	struct list_head devices;
  436 };
  437 
  438 #define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list)
  439 #define protocol_for_each_card(protocol,card) \
  440 	for((card) = protocol_to_pnp_card((protocol)->cards.next); \
  441 	(card) != protocol_to_pnp_card(&(protocol)->cards); \
  442 	(card) = protocol_to_pnp_card((card)->protocol_list.next))
  443 #define protocol_for_each_dev(protocol,dev) \
  444 	for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \
  445 	(dev) != protocol_to_pnp_dev(&(protocol)->devices); \
  446 	(dev) = protocol_to_pnp_dev((dev)->protocol_list.next))
  447 
  448 extern struct bus_type pnp_bus_type;
  449 
  450 #if defined(CONFIG_PNP)
  451 
  452 /* device management */
  453 int pnp_device_attach(struct pnp_dev *pnp_dev);
  454 void pnp_device_detach(struct pnp_dev *pnp_dev);
  455 extern struct list_head pnp_global;
  456 extern int pnp_platform_devices;
  457 
  458 /* multidevice card support */
  459 struct pnp_dev *pnp_request_card_device(struct pnp_card_link *clink,
  460 					const char *id, struct pnp_dev *from);
  461 void pnp_release_card_device(struct pnp_dev *dev);
  462 int pnp_register_card_driver(struct pnp_card_driver *drv);
  463 void pnp_unregister_card_driver(struct pnp_card_driver *drv);
  464 extern struct list_head pnp_cards;
  465 
  466 /* resource management */
  467 int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t base,
  468 			resource_size_t size);
  469 int pnp_auto_config_dev(struct pnp_dev *dev);
  470 int pnp_start_dev(struct pnp_dev *dev);
  471 int pnp_stop_dev(struct pnp_dev *dev);
  472 int pnp_activate_dev(struct pnp_dev *dev);
  473 int pnp_disable_dev(struct pnp_dev *dev);
  474 int pnp_range_reserved(resource_size_t start, resource_size_t end);
  475 
  476 /* protocol helpers */
  477 int pnp_is_active(struct pnp_dev *dev);
  478 int compare_pnp_id(struct pnp_id *pos, const char *id);
  479 int pnp_register_driver(struct pnp_driver *drv);
  480 void pnp_unregister_driver(struct pnp_driver *drv);
  481 
  482 #else
  483 
  484 /* device management */
  485 static inline int pnp_device_attach(struct pnp_dev *pnp_dev) { return -ENODEV; }
  486 static inline void pnp_device_detach(struct pnp_dev *pnp_dev) { }
  487 
  488 #define pnp_platform_devices 0
  489 
  490 /* multidevice card support */
  491 static inline struct pnp_dev *pnp_request_card_device(struct pnp_card_link *clink, const char *id, struct pnp_dev *from) { return NULL; }
  492 static inline void pnp_release_card_device(struct pnp_dev *dev) { }
  493 static inline int pnp_register_card_driver(struct pnp_card_driver *drv) { return -ENODEV; }
  494 static inline void pnp_unregister_card_driver(struct pnp_card_driver *drv) { }
  495 
  496 /* resource management */
  497 static inline int pnp_possible_config(struct pnp_dev *dev, int type,
  498 				      resource_size_t base,
  499 				      resource_size_t size) { return 0; }
  500 static inline int pnp_auto_config_dev(struct pnp_dev *dev) { return -ENODEV; }
  501 static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; }
  502 static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; }
  503 static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; }
  504 static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; }
  505 static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0;}
  506 
  507 /* protocol helpers */
  508 static inline int pnp_is_active(struct pnp_dev *dev) { return 0; }
  509 static inline int compare_pnp_id(struct pnp_id *pos, const char *id) { return -ENODEV; }
  510 static inline int pnp_register_driver(struct pnp_driver *drv) { return -ENODEV; }
  511 static inline void pnp_unregister_driver(struct pnp_driver *drv) { }
  512 
  513 #endif /* CONFIG_PNP */
  514 
  515 /**
  516  * module_pnp_driver() - Helper macro for registering a PnP driver
  517  * @__pnp_driver: pnp_driver struct
  518  *
  519  * Helper macro for PnP drivers which do not do anything special in module
  520  * init/exit. This eliminates a lot of boilerplate. Each module may only
  521  * use this macro once, and calling it replaces module_init() and module_exit()
  522  */
  523 #define module_pnp_driver(__pnp_driver) \
  524 	module_driver(__pnp_driver, pnp_register_driver, \
  525 				    pnp_unregister_driver)
  526 
  527 #endif /* _LINUX_PNP_H */                 1 /*
    2  * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
    3  *
    4  * (C) SGI 2006, Christoph Lameter
    5  * 	Cleaned up and restructured to ease the addition of alternative
    6  * 	implementations of SLAB allocators.
    7  * (C) Linux Foundation 2008-2013
    8  *      Unified interface for all slab allocators
    9  */
   10 
   11 #ifndef _LINUX_SLAB_H
   12 #define	_LINUX_SLAB_H
   13 
   14 #include <linux/gfp.h>
   15 #include <linux/types.h>
   16 #include <linux/workqueue.h>
   17 
   18 
   19 /*
   20  * Flags to pass to kmem_cache_create().
   21  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
   22  */
   23 #define SLAB_CONSISTENCY_CHECKS	0x00000100UL	/* DEBUG: Perform (expensive) checks on alloc/free */
   24 #define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
   25 #define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
   26 #define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
   27 #define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
   28 #define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
   29 #define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
   30 /*
   31  * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
   32  *
   33  * This delays freeing the SLAB page by a grace period, it does _NOT_
   34  * delay object freeing. This means that if you do kmem_cache_free()
   35  * that memory location is free to be reused at any time. Thus it may
   36  * be possible to see another object there in the same RCU grace period.
   37  *
   38  * This feature only ensures the memory location backing the object
   39  * stays valid, the trick to using this is relying on an independent
   40  * object validation pass. Something like:
   41  *
   42  *  rcu_read_lock()
   43  * again:
   44  *  obj = lockless_lookup(key);
   45  *  if (obj) {
   46  *    if (!try_get_ref(obj)) // might fail for free objects
   47  *      goto again;
   48  *
   49  *    if (obj->key != key) { // not the object we expected
   50  *      put_ref(obj);
   51  *      goto again;
   52  *    }
   53  *  }
   54  *  rcu_read_unlock();
   55  *
   56  * This is useful if we need to approach a kernel structure obliquely,
   57  * from its address obtained without the usual locking. We can lock
   58  * the structure to stabilize it and check it's still at the given address,
   59  * only if we can be sure that the memory has not been meanwhile reused
   60  * for some other kind of object (which our subsystem's lock might corrupt).
   61  *
   62  * rcu_read_lock before reading the address, then rcu_read_unlock after
   63  * taking the spinlock within the structure expected at that address.
   64  */
   65 #define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
   66 #define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
   67 #define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
   68 
   69 /* Flag to prevent checks on free */
   70 #ifdef CONFIG_DEBUG_OBJECTS
   71 # define SLAB_DEBUG_OBJECTS	0x00400000UL
   72 #else
   73 # define SLAB_DEBUG_OBJECTS	0x00000000UL
   74 #endif
   75 
   76 #define SLAB_NOLEAKTRACE	0x00800000UL	/* Avoid kmemleak tracing */
   77 
   78 /* Don't track use of uninitialized memory */
   79 #ifdef CONFIG_KMEMCHECK
   80 # define SLAB_NOTRACK		0x01000000UL
   81 #else
   82 # define SLAB_NOTRACK		0x00000000UL
   83 #endif
   84 #ifdef CONFIG_FAILSLAB
   85 # define SLAB_FAILSLAB		0x02000000UL	/* Fault injection mark */
   86 #else
   87 # define SLAB_FAILSLAB		0x00000000UL
   88 #endif
   89 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
   90 # define SLAB_ACCOUNT		0x04000000UL	/* Account to memcg */
   91 #else
   92 # define SLAB_ACCOUNT		0x00000000UL
   93 #endif
   94 
   95 #ifdef CONFIG_KASAN
   96 #define SLAB_KASAN		0x08000000UL
   97 #else
   98 #define SLAB_KASAN		0x00000000UL
   99 #endif
  100 
  101 /* The following flags affect the page allocator grouping pages by mobility */
  102 #define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
  103 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
  104 /*
  105  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
  106  *
  107  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
  108  *
  109  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
  110  * Both make kfree a no-op.
  111  */
  112 #define ZERO_SIZE_PTR ((void *)16)
  113 
  114 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
  115 				(unsigned long)ZERO_SIZE_PTR)
  116 
  117 #include <linux/kmemleak.h>
  118 #include <linux/kasan.h>
  119 
  120 struct mem_cgroup;
  121 /*
  122  * struct kmem_cache related prototypes
  123  */
  124 void __init kmem_cache_init(void);
  125 bool slab_is_available(void);
  126 
  127 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
  128 			unsigned long,
  129 			void (*)(void *));
  130 void kmem_cache_destroy(struct kmem_cache *);
  131 int kmem_cache_shrink(struct kmem_cache *);
  132 
  133 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
  134 void memcg_deactivate_kmem_caches(struct mem_cgroup *);
  135 void memcg_destroy_kmem_caches(struct mem_cgroup *);
  136 
  137 /*
  138  * Please use this macro to create slab caches. Simply specify the
  139  * name of the structure and maybe some flags that are listed above.
  140  *
  141  * The alignment of the struct determines object alignment. If you
  142  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
  143  * then the objects will be properly aligned in SMP configurations.
  144  */
  145 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
  146 		sizeof(struct __struct), __alignof__(struct __struct),\
  147 		(__flags), NULL)
  148 
  149 /*
  150  * Common kmalloc functions provided by all allocators
  151  */
  152 void * __must_check __krealloc(const void *, size_t, gfp_t);
  153 void * __must_check krealloc(const void *, size_t, gfp_t);
  154 void kfree(const void *);
  155 void kzfree(const void *);
  156 size_t ksize(const void *);
  157 
  158 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
  159 const char *__check_heap_object(const void *ptr, unsigned long n,
  160 				struct page *page);
  161 #else
  162 static inline const char *__check_heap_object(const void *ptr,
  163 					      unsigned long n,
  164 					      struct page *page)
  165 {
  166 	return NULL;
  167 }
  168 #endif
  169 
  170 /*
  171  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
  172  * alignment larger than the alignment of a 64-bit integer.
  173  * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
  174  */
  175 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
  176 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
  177 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
  178 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
  179 #else
  180 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
  181 #endif
  182 
  183 /*
  184  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
  185  * Intended for arches that get misalignment faults even for 64 bit integer
  186  * aligned buffers.
  187  */
  188 #ifndef ARCH_SLAB_MINALIGN
  189 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
  190 #endif
  191 
  192 /*
  193  * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
  194  * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
  195  * aligned pointers.
  196  */
  197 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
  198 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
  199 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
  200 
  201 /*
  202  * Kmalloc array related definitions
  203  */
  204 
  205 #ifdef CONFIG_SLAB
  206 /*
  207  * The largest kmalloc size supported by the SLAB allocators is
  208  * 32 megabyte (2^25) or the maximum allocatable page order if that is
  209  * less than 32 MB.
  210  *
  211  * WARNING: Its not easy to increase this value since the allocators have
  212  * to do various tricks to work around compiler limitations in order to
  213  * ensure proper constant folding.
  214  */
  215 #define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
  216 				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
  217 #define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH
  218 #ifndef KMALLOC_SHIFT_LOW
  219 #define KMALLOC_SHIFT_LOW	5
  220 #endif
  221 #endif
  222 
  223 #ifdef CONFIG_SLUB
  224 /*
  225  * SLUB directly allocates requests fitting in to an order-1 page
  226  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
  227  */
  228 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
  229 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT)
  230 #ifndef KMALLOC_SHIFT_LOW
  231 #define KMALLOC_SHIFT_LOW	3
  232 #endif
  233 #endif
  234 
  235 #ifdef CONFIG_SLOB
  236 /*
  237  * SLOB passes all requests larger than one page to the page allocator.
  238  * No kmalloc array is necessary since objects of different sizes can
  239  * be allocated from the same page.
  240  */
  241 #define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
  242 #define KMALLOC_SHIFT_MAX	30
  243 #ifndef KMALLOC_SHIFT_LOW
  244 #define KMALLOC_SHIFT_LOW	3
  245 #endif
  246 #endif
  247 
  248 /* Maximum allocatable size */
  249 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
  250 /* Maximum size for which we actually use a slab cache */
  251 #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
  252 /* Maximum order allocatable via the slab allocagtor */
  253 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
  254 
  255 /*
  256  * Kmalloc subsystem.
  257  */
  258 #ifndef KMALLOC_MIN_SIZE
  259 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
  260 #endif
  261 
  262 /*
  263  * This restriction comes from byte sized index implementation.
  264  * Page size is normally 2^12 bytes and, in this case, if we want to use
  265  * byte sized index which can represent 2^8 entries, the size of the object
  266  * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
  267  * If minimum size of kmalloc is less than 16, we use it as minimum object
  268  * size and give up to use byte sized index.
  269  */
  270 #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
  271                                (KMALLOC_MIN_SIZE) : 16)
  272 
  273 #ifndef CONFIG_SLOB
  274 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
  275 #ifdef CONFIG_ZONE_DMA
  276 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
  277 #endif
  278 
  279 /*
  280  * Figure out which kmalloc slab an allocation of a certain size
  281  * belongs to.
  282  * 0 = zero alloc
  283  * 1 =  65 .. 96 bytes
  284  * 2 = 129 .. 192 bytes
  285  * n = 2^(n-1)+1 .. 2^n
  286  */
  287 static __always_inline int kmalloc_index(size_t size)
  288 {
  289 	if (!size)
  290 		return 0;
  291 
  292 	if (size <= KMALLOC_MIN_SIZE)
  293 		return KMALLOC_SHIFT_LOW;
  294 
  295 	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
  296 		return 1;
  297 	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
  298 		return 2;
  299 	if (size <=          8) return 3;
  300 	if (size <=         16) return 4;
  301 	if (size <=         32) return 5;
  302 	if (size <=         64) return 6;
  303 	if (size <=        128) return 7;
  304 	if (size <=        256) return 8;
  305 	if (size <=        512) return 9;
  306 	if (size <=       1024) return 10;
  307 	if (size <=   2 * 1024) return 11;
  308 	if (size <=   4 * 1024) return 12;
  309 	if (size <=   8 * 1024) return 13;
  310 	if (size <=  16 * 1024) return 14;
  311 	if (size <=  32 * 1024) return 15;
  312 	if (size <=  64 * 1024) return 16;
  313 	if (size <= 128 * 1024) return 17;
  314 	if (size <= 256 * 1024) return 18;
  315 	if (size <= 512 * 1024) return 19;
  316 	if (size <= 1024 * 1024) return 20;
  317 	if (size <=  2 * 1024 * 1024) return 21;
  318 	if (size <=  4 * 1024 * 1024) return 22;
  319 	if (size <=  8 * 1024 * 1024) return 23;
  320 	if (size <=  16 * 1024 * 1024) return 24;
  321 	if (size <=  32 * 1024 * 1024) return 25;
  322 	if (size <=  64 * 1024 * 1024) return 26;
  323 	BUG();
  324 
  325 	/* Will never be reached. Needed because the compiler may complain */
  326 	return -1;
  327 }
  328 #endif /* !CONFIG_SLOB */
  329 
  330 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
  331 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
  332 void kmem_cache_free(struct kmem_cache *, void *);
  333 
  334 /*
  335  * Bulk allocation and freeing operations. These are accelerated in an
  336  * allocator specific way to avoid taking locks repeatedly or building
  337  * metadata structures unnecessarily.
  338  *
  339  * Note that interrupts must be enabled when calling these functions.
  340  */
  341 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
  342 int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
  343 
  344 /*
  345  * Caller must not use kfree_bulk() on memory not originally allocated
  346  * by kmalloc(), because the SLOB allocator cannot handle this.
  347  */
  348 static __always_inline void kfree_bulk(size_t size, void **p)
  349 {
  350 	kmem_cache_free_bulk(NULL, size, p);
  351 }
  352 
  353 #ifdef CONFIG_NUMA
  354 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
  355 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
  356 #else
  357 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
  358 {
  359 	return __kmalloc(size, flags);
  360 }
  361 
  362 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
  363 {
  364 	return kmem_cache_alloc(s, flags);
  365 }
  366 #endif
  367 
  368 #ifdef CONFIG_TRACING
  369 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
  370 
  371 #ifdef CONFIG_NUMA
  372 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
  373 					   gfp_t gfpflags,
  374 					   int node, size_t size) __assume_slab_alignment __malloc;
  375 #else
  376 static __always_inline void *
  377 kmem_cache_alloc_node_trace(struct kmem_cache *s,
  378 			      gfp_t gfpflags,
  379 			      int node, size_t size)
  380 {
  381 	return kmem_cache_alloc_trace(s, gfpflags, size);
  382 }
  383 #endif /* CONFIG_NUMA */
  384 
  385 #else /* CONFIG_TRACING */
  386 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
  387 		gfp_t flags, size_t size)
  388 {
  389 	void *ret = kmem_cache_alloc(s, flags);
  390 
  391 	kasan_kmalloc(s, ret, size, flags);
  392 	return ret;
  393 }
  394 
  395 static __always_inline void *
  396 kmem_cache_alloc_node_trace(struct kmem_cache *s,
  397 			      gfp_t gfpflags,
  398 			      int node, size_t size)
  399 {
  400 	void *ret = kmem_cache_alloc_node(s, gfpflags, node);
  401 
  402 	kasan_kmalloc(s, ret, size, gfpflags);
  403 	return ret;
  404 }
  405 #endif /* CONFIG_TRACING */
  406 
  407 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
  408 
  409 #ifdef CONFIG_TRACING
  410 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
  411 #else
  412 static __always_inline void *
  413 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
  414 {
  415 	return kmalloc_order(size, flags, order);
  416 }
  417 #endif
  418 
  419 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
  420 {
  421 	unsigned int order = get_order(size);
  422 	return kmalloc_order_trace(size, flags, order);
  423 }
  424 
  425 /**
  426  * kmalloc - allocate memory
  427  * @size: how many bytes of memory are required.
  428  * @flags: the type of memory to allocate.
  429  *
  430  * kmalloc is the normal method of allocating memory
  431  * for objects smaller than page size in the kernel.
  432  *
  433  * The @flags argument may be one of:
  434  *
  435  * %GFP_USER - Allocate memory on behalf of user.  May sleep.
  436  *
  437  * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
  438  *
  439  * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools.
  440  *   For example, use this inside interrupt handlers.
  441  *
  442  * %GFP_HIGHUSER - Allocate pages from high memory.
  443  *
  444  * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
  445  *
  446  * %GFP_NOFS - Do not make any fs calls while trying to get memory.
  447  *
  448  * %GFP_NOWAIT - Allocation will not sleep.
  449  *
  450  * %__GFP_THISNODE - Allocate node-local memory only.
  451  *
  452  * %GFP_DMA - Allocation suitable for DMA.
  453  *   Should only be used for kmalloc() caches. Otherwise, use a
  454  *   slab created with SLAB_DMA.
  455  *
  456  * Also it is possible to set different flags by OR'ing
  457  * in one or more of the following additional @flags:
  458  *
  459  * %__GFP_COLD - Request cache-cold pages instead of
  460  *   trying to return cache-warm pages.
  461  *
  462  * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
  463  *
  464  * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
  465  *   (think twice before using).
  466  *
  467  * %__GFP_NORETRY - If memory is not immediately available,
  468  *   then give up at once.
  469  *
  470  * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
  471  *
  472  * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
  473  *
  474  * There are other flags available as well, but these are not intended
  475  * for general use, and so are not documented here. For a full list of
  476  * potential flags, always refer to linux/gfp.h.
  477  */
  478 static __always_inline void *kmalloc(size_t size, gfp_t flags)
  479 {
  480 	if (__builtin_constant_p(size)) {
  481 		if (size > KMALLOC_MAX_CACHE_SIZE)
  482 			return kmalloc_large(size, flags);
  483 #ifndef CONFIG_SLOB
  484 		if (!(flags & GFP_DMA)) {
  485 			int index = kmalloc_index(size);
  486 
  487 			if (!index)
  488 				return ZERO_SIZE_PTR;
  489 
  490 			return kmem_cache_alloc_trace(kmalloc_caches[index],
  491 					flags, size);
  492 		}
  493 #endif
  494 	}
  495 	return __kmalloc(size, flags);
  496 }
  497 
  498 /*
  499  * Determine size used for the nth kmalloc cache.
  500  * return size or 0 if a kmalloc cache for that
  501  * size does not exist
  502  */
  503 static __always_inline int kmalloc_size(int n)
  504 {
  505 #ifndef CONFIG_SLOB
  506 	if (n > 2)
  507 		return 1 << n;
  508 
  509 	if (n == 1 && KMALLOC_MIN_SIZE <= 32)
  510 		return 96;
  511 
  512 	if (n == 2 && KMALLOC_MIN_SIZE <= 64)
  513 		return 192;
  514 #endif
  515 	return 0;
  516 }
  517 
  518 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  519 {
  520 #ifndef CONFIG_SLOB
  521 	if (__builtin_constant_p(size) &&
  522 		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
  523 		int i = kmalloc_index(size);
  524 
  525 		if (!i)
  526 			return ZERO_SIZE_PTR;
  527 
  528 		return kmem_cache_alloc_node_trace(kmalloc_caches[i],
  529 						flags, node, size);
  530 	}
  531 #endif
  532 	return __kmalloc_node(size, flags, node);
  533 }
  534 
  535 struct memcg_cache_array {
  536 	struct rcu_head rcu;
  537 	struct kmem_cache *entries[0];
  538 };
  539 
  540 /*
  541  * This is the main placeholder for memcg-related information in kmem caches.
  542  * Both the root cache and the child caches will have it. For the root cache,
  543  * this will hold a dynamically allocated array large enough to hold
  544  * information about the currently limited memcgs in the system. To allow the
  545  * array to be accessed without taking any locks, on relocation we free the old
  546  * version only after a grace period.
  547  *
  548  * Child caches will hold extra metadata needed for its operation. Fields are:
  549  *
  550  * @memcg: pointer to the memcg this cache belongs to
  551  * @root_cache: pointer to the global, root cache, this cache was derived from
  552  *
  553  * Both root and child caches of the same kind are linked into a list chained
  554  * through @list.
  555  */
  556 struct memcg_cache_params {
  557 	bool is_root_cache;
  558 	struct list_head list;
  559 	union {
  560 		struct memcg_cache_array __rcu *memcg_caches;
  561 		struct {
  562 			struct mem_cgroup *memcg;
  563 			struct kmem_cache *root_cache;
  564 		};
  565 	};
  566 };
  567 
  568 int memcg_update_all_caches(int num_memcgs);
  569 
  570 /**
  571  * kmalloc_array - allocate memory for an array.
  572  * @n: number of elements.
  573  * @size: element size.
  574  * @flags: the type of memory to allocate (see kmalloc).
  575  */
  576 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
  577 {
  578 	if (size != 0 && n > SIZE_MAX / size)
  579 		return NULL;
  580 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
  581 		return kmalloc(n * size, flags);
  582 	return __kmalloc(n * size, flags);
  583 }
  584 
  585 /**
  586  * kcalloc - allocate memory for an array. The memory is set to zero.
  587  * @n: number of elements.
  588  * @size: element size.
  589  * @flags: the type of memory to allocate (see kmalloc).
  590  */
  591 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
  592 {
  593 	return kmalloc_array(n, size, flags | __GFP_ZERO);
  594 }
  595 
  596 /*
  597  * kmalloc_track_caller is a special version of kmalloc that records the
  598  * calling function of the routine calling it for slab leak tracking instead
  599  * of just the calling function (confusing, eh?).
  600  * It's useful when the call to kmalloc comes from a widely-used standard
  601  * allocator where we care about the real place the memory allocation
  602  * request comes from.
  603  */
  604 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
  605 #define kmalloc_track_caller(size, flags) \
  606 	__kmalloc_track_caller(size, flags, _RET_IP_)
  607 
  608 #ifdef CONFIG_NUMA
  609 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
  610 #define kmalloc_node_track_caller(size, flags, node) \
  611 	__kmalloc_node_track_caller(size, flags, node, \
  612 			_RET_IP_)
  613 
  614 #else /* CONFIG_NUMA */
  615 
  616 #define kmalloc_node_track_caller(size, flags, node) \
  617 	kmalloc_track_caller(size, flags)
  618 
  619 #endif /* CONFIG_NUMA */
  620 
  621 /*
  622  * Shortcuts
  623  */
  624 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
  625 {
  626 	return kmem_cache_alloc(k, flags | __GFP_ZERO);
  627 }
  628 
  629 /**
  630  * kzalloc - allocate memory. The memory is set to zero.
  631  * @size: how many bytes of memory are required.
  632  * @flags: the type of memory to allocate (see kmalloc).
  633  */
  634 static inline void *kzalloc(size_t size, gfp_t flags)
  635 {
  636 	return kmalloc(size, flags | __GFP_ZERO);
  637 }
  638 
  639 /**
  640  * kzalloc_node - allocate zeroed memory from a particular memory node.
  641  * @size: how many bytes of memory are required.
  642  * @flags: the type of memory to allocate (see kmalloc).
  643  * @node: memory node from which to allocate
  644  */
  645 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
  646 {
  647 	return kmalloc_node(size, flags | __GFP_ZERO, node);
  648 }
  649 
  650 unsigned int kmem_cache_size(struct kmem_cache *s);
  651 void __init kmem_cache_init_late(void);
  652 
  653 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
  654 int slab_prepare_cpu(unsigned int cpu);
  655 int slab_dead_cpu(unsigned int cpu);
  656 #else
  657 #define slab_prepare_cpu	NULL
  658 #define slab_dead_cpu		NULL
  659 #endif
  660 
  661 #endif	/* _LINUX_SLAB_H */                 1 #ifndef __LINUX_SPINLOCK_H
    2 #define __LINUX_SPINLOCK_H
    3 
    4 /*
    5  * include/linux/spinlock.h - generic spinlock/rwlock declarations
    6  *
    7  * here's the role of the various spinlock/rwlock related include files:
    8  *
    9  * on SMP builds:
   10  *
   11  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
   12  *                        initializers
   13  *
   14  *  linux/spinlock_types.h:
   15  *                        defines the generic type and initializers
   16  *
   17  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
   18  *                        implementations, mostly inline assembly code
   19  *
   20  *   (also included on UP-debug builds:)
   21  *
   22  *  linux/spinlock_api_smp.h:
   23  *                        contains the prototypes for the _spin_*() APIs.
   24  *
   25  *  linux/spinlock.h:     builds the final spin_*() APIs.
   26  *
   27  * on UP builds:
   28  *
   29  *  linux/spinlock_type_up.h:
   30  *                        contains the generic, simplified UP spinlock type.
   31  *                        (which is an empty structure on non-debug builds)
   32  *
   33  *  linux/spinlock_types.h:
   34  *                        defines the generic type and initializers
   35  *
   36  *  linux/spinlock_up.h:
   37  *                        contains the arch_spin_*()/etc. version of UP
   38  *                        builds. (which are NOPs on non-debug, non-preempt
   39  *                        builds)
   40  *
   41  *   (included on UP-non-debug builds:)
   42  *
   43  *  linux/spinlock_api_up.h:
   44  *                        builds the _spin_*() APIs.
   45  *
   46  *  linux/spinlock.h:     builds the final spin_*() APIs.
   47  */
   48 
   49 #include <linux/typecheck.h>
   50 #include <linux/preempt.h>
   51 #include <linux/linkage.h>
   52 #include <linux/compiler.h>
   53 #include <linux/irqflags.h>
   54 #include <linux/thread_info.h>
   55 #include <linux/kernel.h>
   56 #include <linux/stringify.h>
   57 #include <linux/bottom_half.h>
   58 #include <asm/barrier.h>
   59 
   60 
   61 /*
   62  * Must define these before including other files, inline functions need them
   63  */
   64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
   65 
   66 #define LOCK_SECTION_START(extra)               \
   67         ".subsection 1\n\t"                     \
   68         extra                                   \
   69         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
   70         LOCK_SECTION_NAME ":\n\t"               \
   71         ".endif\n"
   72 
   73 #define LOCK_SECTION_END                        \
   74         ".previous\n\t"
   75 
   76 #define __lockfunc __attribute__((section(".spinlock.text")))
   77 
   78 /*
   79  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
   80  */
   81 #include <linux/spinlock_types.h>
   82 
   83 /*
   84  * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
   85  */
   86 #ifdef CONFIG_SMP
   87 # include <asm/spinlock.h>
   88 #else
   89 # include <linux/spinlock_up.h>
   90 #endif
   91 
   92 #ifdef CONFIG_DEBUG_SPINLOCK
   93   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
   94 				   struct lock_class_key *key);
   95 # define raw_spin_lock_init(lock)				\
   96 do {								\
   97 	static struct lock_class_key __key;			\
   98 								\
   99 	__raw_spin_lock_init((lock), #lock, &__key);		\
  100 } while (0)
  101 
  102 #else
  103 # define raw_spin_lock_init(lock)				\
  104 	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
  105 #endif
  106 
  107 #define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
  108 
  109 #ifdef CONFIG_GENERIC_LOCKBREAK
  110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
  111 #else
  112 
  113 #ifdef arch_spin_is_contended
  114 #define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
  115 #else
  116 #define raw_spin_is_contended(lock)	(((void)(lock), 0))
  117 #endif /*arch_spin_is_contended*/
  118 #endif
  119 
  120 /*
  121  * Despite its name it doesn't necessarily has to be a full barrier.
  122  * It should only guarantee that a STORE before the critical section
  123  * can not be reordered with LOADs and STOREs inside this section.
  124  * spin_lock() is the one-way barrier, this LOAD can not escape out
  125  * of the region. So the default implementation simply ensures that
  126  * a STORE can not move into the critical section, smp_wmb() should
  127  * serialize it with another STORE done by spin_lock().
  128  */
  129 #ifndef smp_mb__before_spinlock
  130 #define smp_mb__before_spinlock()	smp_wmb()
  131 #endif
  132 
  133 /**
  134  * raw_spin_unlock_wait - wait until the spinlock gets unlocked
  135  * @lock: the spinlock in question.
  136  */
  137 #define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
  138 
  139 #ifdef CONFIG_DEBUG_SPINLOCK
  140  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
  141 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  142  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  143  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
  144 #else
  145 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
  146 {
  147 	__acquire(lock);
  148 	arch_spin_lock(&lock->raw_lock);
  149 }
  150 
  151 static inline void
  152 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
  153 {
  154 	__acquire(lock);
  155 	arch_spin_lock_flags(&lock->raw_lock, *flags);
  156 }
  157 
  158 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
  159 {
  160 	return arch_spin_trylock(&(lock)->raw_lock);
  161 }
  162 
  163 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
  164 {
  165 	arch_spin_unlock(&lock->raw_lock);
  166 	__release(lock);
  167 }
  168 #endif
  169 
  170 /*
  171  * Define the various spin_lock methods.  Note we define these
  172  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
  173  * various methods are defined as nops in the case they are not
  174  * required.
  175  */
  176 #define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
  177 
  178 #define raw_spin_lock(lock)	_raw_spin_lock(lock)
  179 
  180 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  181 # define raw_spin_lock_nested(lock, subclass) \
  182 	_raw_spin_lock_nested(lock, subclass)
  183 # define raw_spin_lock_bh_nested(lock, subclass) \
  184 	_raw_spin_lock_bh_nested(lock, subclass)
  185 
  186 # define raw_spin_lock_nest_lock(lock, nest_lock)			\
  187 	 do {								\
  188 		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
  189 		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
  190 	 } while (0)
  191 #else
  192 /*
  193  * Always evaluate the 'subclass' argument to avoid that the compiler
  194  * warns about set-but-not-used variables when building with
  195  * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
  196  */
  197 # define raw_spin_lock_nested(lock, subclass)		\
  198 	_raw_spin_lock(((void)(subclass), (lock)))
  199 # define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
  200 # define raw_spin_lock_bh_nested(lock, subclass)	_raw_spin_lock_bh(lock)
  201 #endif
  202 
  203 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  204 
  205 #define raw_spin_lock_irqsave(lock, flags)			\
  206 	do {						\
  207 		typecheck(unsigned long, flags);	\
  208 		flags = _raw_spin_lock_irqsave(lock);	\
  209 	} while (0)
  210 
  211 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  212 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  213 	do {								\
  214 		typecheck(unsigned long, flags);			\
  215 		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
  216 	} while (0)
  217 #else
  218 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  219 	do {								\
  220 		typecheck(unsigned long, flags);			\
  221 		flags = _raw_spin_lock_irqsave(lock);			\
  222 	} while (0)
  223 #endif
  224 
  225 #else
  226 
  227 #define raw_spin_lock_irqsave(lock, flags)		\
  228 	do {						\
  229 		typecheck(unsigned long, flags);	\
  230 		_raw_spin_lock_irqsave(lock, flags);	\
  231 	} while (0)
  232 
  233 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
  234 	raw_spin_lock_irqsave(lock, flags)
  235 
  236 #endif
  237 
  238 #define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
  239 #define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
  240 #define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
  241 #define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
  242 
  243 #define raw_spin_unlock_irqrestore(lock, flags)		\
  244 	do {							\
  245 		typecheck(unsigned long, flags);		\
  246 		_raw_spin_unlock_irqrestore(lock, flags);	\
  247 	} while (0)
  248 #define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
  249 
  250 #define raw_spin_trylock_bh(lock) \
  251 	__cond_lock(lock, _raw_spin_trylock_bh(lock))
  252 
  253 #define raw_spin_trylock_irq(lock) \
  254 ({ \
  255 	local_irq_disable(); \
  256 	raw_spin_trylock(lock) ? \
  257 	1 : ({ local_irq_enable(); 0;  }); \
  258 })
  259 
  260 #define raw_spin_trylock_irqsave(lock, flags) \
  261 ({ \
  262 	local_irq_save(flags); \
  263 	raw_spin_trylock(lock) ? \
  264 	1 : ({ local_irq_restore(flags); 0; }); \
  265 })
  266 
  267 /**
  268  * raw_spin_can_lock - would raw_spin_trylock() succeed?
  269  * @lock: the spinlock in question.
  270  */
  271 #define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))
  272 
  273 /* Include rwlock functions */
  274 #include <linux/rwlock.h>
  275 
  276 /*
  277  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  278  */
  279 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  280 # include <linux/spinlock_api_smp.h>
  281 #else
  282 # include <linux/spinlock_api_up.h>
  283 #endif
  284 
  285 /*
  286  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  287  */
  288 
  289 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
  290 {
  291 	return &lock->rlock;
  292 }
  293 
  294 #define spin_lock_init(_lock)				\
  295 do {							\
  296 	spinlock_check(_lock);				\
  297 	raw_spin_lock_init(&(_lock)->rlock);		\
  298 } while (0)
  299 
  300 static __always_inline void spin_lock(spinlock_t *lock)
  301 {
  302 	raw_spin_lock(&lock->rlock);
  303 }
  304 
  305 static __always_inline void spin_lock_bh(spinlock_t *lock)
  306 {
  307 	raw_spin_lock_bh(&lock->rlock);
  308 }
  309 
  310 static __always_inline int spin_trylock(spinlock_t *lock)
  311 {
  312 	return raw_spin_trylock(&lock->rlock);
  313 }
  314 
  315 #define spin_lock_nested(lock, subclass)			\
  316 do {								\
  317 	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
  318 } while (0)
  319 
  320 #define spin_lock_bh_nested(lock, subclass)			\
  321 do {								\
  322 	raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
  323 } while (0)
  324 
  325 #define spin_lock_nest_lock(lock, nest_lock)				\
  326 do {									\
  327 	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
  328 } while (0)
  329 
  330 static __always_inline void spin_lock_irq(spinlock_t *lock)
  331 {
  332 	raw_spin_lock_irq(&lock->rlock);
  333 }
  334 
  335 #define spin_lock_irqsave(lock, flags)				\
  336 do {								\
  337 	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
  338 } while (0)
  339 
  340 #define spin_lock_irqsave_nested(lock, flags, subclass)			\
  341 do {									\
  342 	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
  343 } while (0)
  344 
  345 static __always_inline void spin_unlock(spinlock_t *lock)
  346 {
  347 	raw_spin_unlock(&lock->rlock);
  348 }
  349 
  350 static __always_inline void spin_unlock_bh(spinlock_t *lock)
  351 {
  352 	raw_spin_unlock_bh(&lock->rlock);
  353 }
  354 
  355 static __always_inline void spin_unlock_irq(spinlock_t *lock)
  356 {
  357 	raw_spin_unlock_irq(&lock->rlock);
  358 }
  359 
  360 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  361 {
  362 	raw_spin_unlock_irqrestore(&lock->rlock, flags);
  363 }
  364 
  365 static __always_inline int spin_trylock_bh(spinlock_t *lock)
  366 {
  367 	return raw_spin_trylock_bh(&lock->rlock);
  368 }
  369 
  370 static __always_inline int spin_trylock_irq(spinlock_t *lock)
  371 {
  372 	return raw_spin_trylock_irq(&lock->rlock);
  373 }
  374 
  375 #define spin_trylock_irqsave(lock, flags)			\
  376 ({								\
  377 	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
  378 })
  379 
  380 static __always_inline void spin_unlock_wait(spinlock_t *lock)
  381 {
  382 	raw_spin_unlock_wait(&lock->rlock);
  383 }
  384 
  385 static __always_inline int spin_is_locked(spinlock_t *lock)
  386 {
  387 	return raw_spin_is_locked(&lock->rlock);
  388 }
  389 
  390 static __always_inline int spin_is_contended(spinlock_t *lock)
  391 {
  392 	return raw_spin_is_contended(&lock->rlock);
  393 }
  394 
  395 static __always_inline int spin_can_lock(spinlock_t *lock)
  396 {
  397 	return raw_spin_can_lock(&lock->rlock);
  398 }
  399 
  400 #define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
  401 
  402 /*
  403  * Pull the atomic_t declaration:
  404  * (asm-mips/atomic.h needs above definitions)
  405  */
  406 #include <linux/atomic.h>
  407 /**
  408  * atomic_dec_and_lock - lock on reaching reference count zero
  409  * @atomic: the atomic counter
  410  * @lock: the spinlock in question
  411  *
  412  * Decrements @atomic by 1.  If the result is 0, returns true and locks
  413  * @lock.  Returns false for all other cases.
  414  */
  415 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  416 #define atomic_dec_and_lock(atomic, lock) \
  417 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  418 
  419 #endif /* __LINUX_SPINLOCK_H */            | 
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
| Ядро | Модуль | Правило | Верификатор | Вердикт | Статус | Время создания | Описание проблемы | 
| linux-4.9-rc1.tar.xz | drivers/mmc/host/wbsd.ko | 331_1a | CPAchecker | Bug | Fixed | 2016-11-12 00:45:42 | L0255 | 
Комментарий
Reported: 12 Nov 2016
[В начало]