Bug
        
                          [В начало]
Ошибка # 177
Показать/спрятать трассу ошибок|            Error trace     
         {    20     typedef unsigned char __u8;    23     typedef unsigned short __u16;    25     typedef int __s32;    26     typedef unsigned int __u32;    29     typedef long long __s64;    30     typedef unsigned long long __u64;    15     typedef signed char s8;    16     typedef unsigned char u8;    18     typedef short s16;    19     typedef unsigned short u16;    21     typedef int s32;    22     typedef unsigned int u32;    24     typedef long long s64;    25     typedef unsigned long long u64;    14     typedef long __kernel_long_t;    15     typedef unsigned long __kernel_ulong_t;    27     typedef int __kernel_pid_t;    48     typedef unsigned int __kernel_uid32_t;    49     typedef unsigned int __kernel_gid32_t;    71     typedef __kernel_ulong_t __kernel_size_t;    72     typedef __kernel_long_t __kernel_ssize_t;    87     typedef long long __kernel_loff_t;    88     typedef __kernel_long_t __kernel_time_t;    89     typedef __kernel_long_t __kernel_clock_t;    90     typedef int __kernel_timer_t;    91     typedef int __kernel_clockid_t;    28     typedef __u16 __le16;   291     struct kernel_symbol {   unsigned long value;   const char *name; } ;    34     struct module ;    12     typedef __u32 __kernel_dev_t;    15     typedef __kernel_dev_t dev_t;    18     typedef unsigned short umode_t;    21     typedef __kernel_pid_t pid_t;    26     typedef __kernel_clockid_t clockid_t;    29     typedef _Bool bool;    31     typedef __kernel_uid32_t uid_t;    32     typedef __kernel_gid32_t gid_t;    45     typedef __kernel_loff_t loff_t;    54     typedef __kernel_size_t size_t;    59     typedef __kernel_ssize_t ssize_t;    69     typedef __kernel_time_t time_t;   102     typedef __s32 int32_t;   108     typedef __u32 uint32_t;   133     typedef unsigned long sector_t;   134     typedef unsigned long blkcnt_t;   152     typedef u64 dma_addr_t;   157     typedef unsigned int gfp_t;   158     typedef unsigned int fmode_t;   161     typedef u64 phys_addr_t;   166     typedef phys_addr_t resource_size_t;   176     struct __anonstruct_atomic_t_6 {   int counter; } ;   176     typedef struct __anonstruct_atomic_t_6 atomic_t;   181     struct __anonstruct_atomic64_t_7 {   long counter; } ;   181     typedef struct __anonstruct_atomic64_t_7 atomic64_t;   182     struct list_head {   struct list_head *next;   struct list_head *prev; } ;   187     struct hlist_node ;   187     struct hlist_head {   struct hlist_node *first; } ;   191     struct hlist_node {   struct hlist_node *next;   struct hlist_node **pprev; } ;   202     struct callback_head {   struct callback_head *next;   void (*func)(struct callback_head *); } ;   115     typedef void (*ctor_fn_t)();   283     struct _ddebug {   const char *modname;   const char *function;   const char *filename;   const char *format;   unsigned int lineno;   unsigned char flags; } ;    58     struct device ;   474     struct file_operations ;   486     struct completion ;   487     struct pt_regs ;    27     union __anonunion___u_9 {   struct list_head *__val;   char __c[1U]; } ;    65     union __anonunion___u_11 {   struct list_head *__val;   char __c[1U]; } ;   105     union __anonunion___u_13 {   struct list_head *__val;   char __c[1U]; } ;   202     union __anonunion___u_15 {   struct list_head *__val;   char __c[1U]; } ;   546     struct bug_entry {   int bug_addr_disp;   int file_disp;   unsigned short line;   unsigned short flags; } ;   131     struct timespec ;   132     struct compat_timespec ;   133     struct pollfd ;   134     struct __anonstruct_futex_27 {   u32 *uaddr;   u32 val;   u32 flags;   u32 bitset;   u64 time;   u32 *uaddr2; } ;   134     struct __anonstruct_nanosleep_28 {   clockid_t clockid;   struct timespec *rmtp;   struct compat_timespec *compat_rmtp;   u64 expires; } ;   134     struct __anonstruct_poll_29 {   struct pollfd *ufds;   int nfds;   int has_timeout;   unsigned long tv_sec;   unsigned long tv_nsec; } ;   134     union __anonunion____missing_field_name_26 {   struct __anonstruct_futex_27 futex;   struct __anonstruct_nanosleep_28 nanosleep;   struct __anonstruct_poll_29 poll; } ;   134     struct restart_block {   long int (*fn)(struct restart_block *);   union __anonunion____missing_field_name_26 __annonCompField4; } ;    50     struct task_struct ;    39     struct page ;    26     struct mm_struct ;   288     struct pt_regs {   unsigned long r15;   unsigned long r14;   unsigned long r13;   unsigned long r12;   unsigned long bp;   unsigned long bx;   unsigned long r11;   unsigned long r10;   unsigned long r9;   unsigned long r8;   unsigned long ax;   unsigned long cx;   unsigned long dx;   unsigned long si;   unsigned long di;   unsigned long orig_ax;   unsigned long ip;   unsigned long cs;   unsigned long flags;   unsigned long sp;   unsigned long ss; } ;    66     struct __anonstruct____missing_field_name_32 {   unsigned int a;   unsigned int b; } ;    66     struct __anonstruct____missing_field_name_33 {   u16 limit0;   u16 base0;   unsigned char base1;   unsigned char type;   unsigned char s;   unsigned char dpl;   unsigned char p;   unsigned char limit;   unsigned char avl;   unsigned char l;   unsigned char d;   unsigned char g;   unsigned char base2; } ;    66     union __anonunion____missing_field_name_31 {   struct __anonstruct____missing_field_name_32 __annonCompField5;   struct __anonstruct____missing_field_name_33 __annonCompField6; } ;    66     struct desc_struct {   union __anonunion____missing_field_name_31 __annonCompField7; } ;    13     typedef unsigned long pteval_t;    14     typedef unsigned long pmdval_t;    15     typedef unsigned long pudval_t;    16     typedef unsigned long pgdval_t;    17     typedef unsigned long pgprotval_t;    19     struct __anonstruct_pte_t_34 {   pteval_t pte; } ;    19     typedef struct __anonstruct_pte_t_34 pte_t;    21     struct pgprot {   pgprotval_t pgprot; } ;   256     typedef struct pgprot pgprot_t;   258     struct __anonstruct_pgd_t_35 {   pgdval_t pgd; } ;   258     typedef struct __anonstruct_pgd_t_35 pgd_t;   276     struct __anonstruct_pud_t_36 {   pudval_t pud; } ;   276     typedef struct __anonstruct_pud_t_36 pud_t;   297     struct __anonstruct_pmd_t_37 {   pmdval_t pmd; } ;   297     typedef struct __anonstruct_pmd_t_37 pmd_t;   423     typedef struct page *pgtable_t;   434     struct file ;   445     struct seq_file ;   481     struct thread_struct ;   483     struct cpumask ;    20     struct qspinlock {   atomic_t val; } ;    33     typedef struct qspinlock arch_spinlock_t;    34     struct qrwlock {   atomic_t cnts;   arch_spinlock_t wait_lock; } ;    14     typedef struct qrwlock arch_rwlock_t;   247     struct math_emu_info {   long ___orig_eip;   struct pt_regs *regs; } ;    83     struct static_key {   atomic_t enabled; } ;    23     typedef atomic64_t atomic_long_t;   359     struct cpumask {   unsigned long bits[128U]; } ;    15     typedef struct cpumask cpumask_t;   657     typedef struct cpumask *cpumask_var_t;    22     struct tracepoint_func {   void *func;   void *data;   int prio; } ;    28     struct tracepoint {   const char *name;   struct static_key key;   int (*regfunc)();   void (*unregfunc)();   struct tracepoint_func *funcs; } ;   233     struct fregs_state {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u32 status; } ;    26     struct __anonstruct____missing_field_name_61 {   u64 rip;   u64 rdp; } ;    26     struct __anonstruct____missing_field_name_62 {   u32 fip;   u32 fcs;   u32 foo;   u32 fos; } ;    26     union __anonunion____missing_field_name_60 {   struct __anonstruct____missing_field_name_61 __annonCompField13;   struct __anonstruct____missing_field_name_62 __annonCompField14; } ;    26     union __anonunion____missing_field_name_63 {   u32 padding1[12U];   u32 sw_reserved[12U]; } ;    26     struct fxregs_state {   u16 cwd;   u16 swd;   u16 twd;   u16 fop;   union __anonunion____missing_field_name_60 __annonCompField15;   u32 mxcsr;   u32 mxcsr_mask;   u32 st_space[32U];   u32 xmm_space[64U];   u32 padding[12U];   union __anonunion____missing_field_name_63 __annonCompField16; } ;    66     struct swregs_state {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u8 ftop;   u8 changed;   u8 lookahead;   u8 no_update;   u8 rm;   u8 alimit;   struct math_emu_info *info;   u32 entry_eip; } ;   227     struct xstate_header {   u64 xfeatures;   u64 xcomp_bv;   u64 reserved[6U]; } ;   233     struct xregs_state {   struct fxregs_state i387;   struct xstate_header header;   u8 extended_state_area[0U]; } ;   254     union fpregs_state {   struct fregs_state fsave;   struct fxregs_state fxsave;   struct swregs_state soft;   struct xregs_state xsave;   u8 __padding[4096U]; } ;   271     struct fpu {   unsigned int last_cpu;   unsigned char fpstate_active;   unsigned char fpregs_active;   union fpregs_state state; } ;   181     struct seq_operations ;   415     struct perf_event ;   420     struct __anonstruct_mm_segment_t_75 {   unsigned long seg; } ;   420     typedef struct __anonstruct_mm_segment_t_75 mm_segment_t;   421     struct thread_struct {   struct desc_struct tls_array[3U];   unsigned long sp0;   unsigned long sp;   unsigned short es;   unsigned short ds;   unsigned short fsindex;   unsigned short gsindex;   u32 status;   unsigned long fsbase;   unsigned long gsbase;   struct perf_event *ptrace_bps[4U];   unsigned long debugreg6;   unsigned long ptrace_dr7;   unsigned long cr2;   unsigned long trap_nr;   unsigned long error_code;   unsigned long *io_bitmap_ptr;   unsigned long iopl;   unsigned int io_bitmap_max;   mm_segment_t addr_limit;   unsigned char sig_on_uaccess_err;   unsigned char uaccess_err;   struct fpu fpu; } ;    48     struct thread_info {   unsigned long flags; } ;    33     struct lockdep_map ;    55     struct stack_trace {   unsigned int nr_entries;   unsigned int max_entries;   unsigned long *entries;   int skip; } ;    28     struct lockdep_subclass_key {   char __one_byte; } ;    53     struct lock_class_key {   struct lockdep_subclass_key subkeys[8U]; } ;    59     struct lock_class {   struct hlist_node hash_entry;   struct list_head lock_entry;   struct lockdep_subclass_key *key;   unsigned int subclass;   unsigned int dep_gen_id;   unsigned long usage_mask;   struct stack_trace usage_traces[13U];   struct list_head locks_after;   struct list_head locks_before;   unsigned int version;   unsigned long ops;   const char *name;   int name_version;   unsigned long contention_point[4U];   unsigned long contending_point[4U]; } ;   144     struct lockdep_map {   struct lock_class_key *key;   struct lock_class *class_cache[2U];   const char *name;   int cpu;   unsigned long ip; } ;   207     struct held_lock {   u64 prev_chain_key;   unsigned long acquire_ip;   struct lockdep_map *instance;   struct lockdep_map *nest_lock;   u64 waittime_stamp;   u64 holdtime_stamp;   unsigned short class_idx;   unsigned char irq_context;   unsigned char trylock;   unsigned char read;   unsigned char check;   unsigned char hardirqs_off;   unsigned short references;   unsigned int pin_count; } ;   593     struct raw_spinlock {   arch_spinlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    32     typedef struct raw_spinlock raw_spinlock_t;    33     struct __anonstruct____missing_field_name_77 {   u8 __padding[24U];   struct lockdep_map dep_map; } ;    33     union __anonunion____missing_field_name_76 {   struct raw_spinlock rlock;   struct __anonstruct____missing_field_name_77 __annonCompField19; } ;    33     struct spinlock {   union __anonunion____missing_field_name_76 __annonCompField20; } ;    76     typedef struct spinlock spinlock_t;    23     struct __anonstruct_rwlock_t_78 {   arch_rwlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    23     typedef struct __anonstruct_rwlock_t_78 rwlock_t;   408     struct seqcount {   unsigned int sequence;   struct lockdep_map dep_map; } ;    52     typedef struct seqcount seqcount_t;   601     struct timespec {   __kernel_time_t tv_sec;   long tv_nsec; } ;     7     typedef __s64 time64_t;    83     struct user_namespace ;    22     struct __anonstruct_kuid_t_94 {   uid_t val; } ;    22     typedef struct __anonstruct_kuid_t_94 kuid_t;    27     struct __anonstruct_kgid_t_95 {   gid_t val; } ;    27     typedef struct __anonstruct_kgid_t_95 kgid_t;   139     struct kstat {   u32 result_mask;   umode_t mode;   unsigned int nlink;   uint32_t blksize;   u64 attributes;   u64 ino;   dev_t dev;   dev_t rdev;   kuid_t uid;   kgid_t gid;   loff_t size;   struct timespec atime;   struct timespec mtime;   struct timespec ctime;   struct timespec btime;   u64 blocks; } ;    48     struct vm_area_struct ;    39     struct __wait_queue_head {   spinlock_t lock;   struct list_head task_list; } ;    44     typedef struct __wait_queue_head wait_queue_head_t;    97     struct __anonstruct_nodemask_t_96 {   unsigned long bits[16U]; } ;    97     typedef struct __anonstruct_nodemask_t_96 nodemask_t;   247     typedef unsigned int isolate_mode_t;    13     struct optimistic_spin_queue {   atomic_t tail; } ;    39     struct ww_acquire_ctx ;    40     struct mutex {   atomic_long_t owner;   spinlock_t wait_lock;   struct optimistic_spin_queue osq;   struct list_head wait_list;   void *magic;   struct lockdep_map dep_map; } ;    72     struct mutex_waiter {   struct list_head list;   struct task_struct *task;   struct ww_acquire_ctx *ww_ctx;   void *magic; } ;   229     struct rw_semaphore ;   230     struct rw_semaphore {   atomic_long_t count;   struct list_head wait_list;   raw_spinlock_t wait_lock;   struct optimistic_spin_queue osq;   struct task_struct *owner;   struct lockdep_map dep_map; } ;    28     typedef s64 ktime_t;  1109     struct timer_list {   struct hlist_node entry;   unsigned long expires;   void (*function)(unsigned long);   unsigned long data;   u32 flags;   struct lockdep_map lockdep_map; } ;   211     struct hrtimer ;   212     enum hrtimer_restart ;   235     struct workqueue_struct ;   236     struct work_struct ;    54     struct work_struct {   atomic_long_t data;   struct list_head entry;   void (*func)(struct work_struct *);   struct lockdep_map lockdep_map; } ;   107     struct delayed_work {   struct work_struct work;   struct timer_list timer;   struct workqueue_struct *wq;   int cpu; } ;   217     struct resource ;    68     struct resource {   resource_size_t start;   resource_size_t end;   const char *name;   unsigned long flags;   unsigned long desc;   struct resource *parent;   struct resource *sibling;   struct resource *child; } ;    38     struct ldt_struct ;    38     struct vdso_image ;    38     struct __anonstruct_mm_context_t_161 {   struct ldt_struct *ldt;   unsigned short ia32_compat;   struct mutex lock;   void *vdso;   const struct vdso_image *vdso_image;   atomic_t perf_rdpmc_allowed;   u16 pkey_allocation_map;   s16 execute_only_pkey;   void *bd_addr; } ;    38     typedef struct __anonstruct_mm_context_t_161 mm_context_t;  1266     struct llist_node ;    69     struct llist_node {   struct llist_node *next; } ;   551     struct rb_node {   unsigned long __rb_parent_color;   struct rb_node *rb_right;   struct rb_node *rb_left; } ;    41     struct rb_root {   struct rb_node *rb_node; } ;   835     struct nsproxy ;    37     struct cred ;    19     struct vmacache {   u32 seqnum;   struct vm_area_struct *vmas[4U]; } ;    41     struct task_rss_stat {   int events;   int count[4U]; } ;    49     struct mm_rss_stat {   atomic_long_t count[4U]; } ;    54     struct page_frag {   struct page *page;   __u32 offset;   __u32 size; } ;    61     struct tlbflush_unmap_batch {   struct cpumask cpumask;   bool flush_required;   bool writable; } ;    85     struct completion {   unsigned int done;   wait_queue_head_t wait; } ;   108     struct inode ;    58     struct arch_uprobe_task {   unsigned long saved_scratch_register;   unsigned int saved_trap_nr;   unsigned int saved_tf; } ;    66     enum uprobe_task_state {   UTASK_RUNNING = 0,   UTASK_SSTEP = 1,   UTASK_SSTEP_ACK = 2,   UTASK_SSTEP_TRAPPED = 3 } ;    73     struct __anonstruct____missing_field_name_215 {   struct arch_uprobe_task autask;   unsigned long vaddr; } ;    73     struct __anonstruct____missing_field_name_216 {   struct callback_head dup_xol_work;   unsigned long dup_xol_addr; } ;    73     union __anonunion____missing_field_name_214 {   struct __anonstruct____missing_field_name_215 __annonCompField35;   struct __anonstruct____missing_field_name_216 __annonCompField36; } ;    73     struct uprobe ;    73     struct return_instance ;    73     struct uprobe_task {   enum uprobe_task_state state;   union __anonunion____missing_field_name_214 __annonCompField37;   struct uprobe *active_uprobe;   unsigned long xol_vaddr;   struct return_instance *return_instances;   unsigned int depth; } ;    95     struct return_instance {   struct uprobe *uprobe;   unsigned long func;   unsigned long stack;   unsigned long orig_ret_vaddr;   bool chained;   struct return_instance *next; } ;   111     struct xol_area ;   112     struct uprobes_state {   struct xol_area *xol_area; } ;   151     struct address_space ;   152     struct mem_cgroup ;   153     union __anonunion____missing_field_name_217 {   struct address_space *mapping;   void *s_mem;   atomic_t compound_mapcount; } ;   153     union __anonunion____missing_field_name_218 {   unsigned long index;   void *freelist; } ;   153     struct __anonstruct____missing_field_name_222 {   unsigned short inuse;   unsigned short objects;   unsigned char frozen; } ;   153     union __anonunion____missing_field_name_221 {   atomic_t _mapcount;   unsigned int active;   struct __anonstruct____missing_field_name_222 __annonCompField40;   int units; } ;   153     struct __anonstruct____missing_field_name_220 {   union __anonunion____missing_field_name_221 __annonCompField41;   atomic_t _refcount; } ;   153     union __anonunion____missing_field_name_219 {   unsigned long counters;   struct __anonstruct____missing_field_name_220 __annonCompField42; } ;   153     struct dev_pagemap ;   153     struct __anonstruct____missing_field_name_224 {   struct page *next;   int pages;   int pobjects; } ;   153     struct __anonstruct____missing_field_name_225 {   unsigned long compound_head;   unsigned int compound_dtor;   unsigned int compound_order; } ;   153     struct __anonstruct____missing_field_name_226 {   unsigned long __pad;   pgtable_t pmd_huge_pte; } ;   153     union __anonunion____missing_field_name_223 {   struct list_head lru;   struct dev_pagemap *pgmap;   struct __anonstruct____missing_field_name_224 __annonCompField44;   struct callback_head callback_head;   struct __anonstruct____missing_field_name_225 __annonCompField45;   struct __anonstruct____missing_field_name_226 __annonCompField46; } ;   153     struct kmem_cache ;   153     union __anonunion____missing_field_name_227 {   unsigned long private;   spinlock_t *ptl;   struct kmem_cache *slab_cache; } ;   153     struct page {   unsigned long flags;   union __anonunion____missing_field_name_217 __annonCompField38;   union __anonunion____missing_field_name_218 __annonCompField39;   union __anonunion____missing_field_name_219 __annonCompField43;   union __anonunion____missing_field_name_223 __annonCompField47;   union __anonunion____missing_field_name_227 __annonCompField48;   struct mem_cgroup *mem_cgroup; } ;   266     struct userfaultfd_ctx ;   266     struct vm_userfaultfd_ctx {   struct userfaultfd_ctx *ctx; } ;   273     struct __anonstruct_shared_228 {   struct rb_node rb;   unsigned long rb_subtree_last; } ;   273     struct anon_vma ;   273     struct vm_operations_struct ;   273     struct mempolicy ;   273     struct vm_area_struct {   unsigned long vm_start;   unsigned long vm_end;   struct vm_area_struct *vm_next;   struct vm_area_struct *vm_prev;   struct rb_node vm_rb;   unsigned long rb_subtree_gap;   struct mm_struct *vm_mm;   pgprot_t vm_page_prot;   unsigned long vm_flags;   struct __anonstruct_shared_228 shared;   struct list_head anon_vma_chain;   struct anon_vma *anon_vma;   const struct vm_operations_struct *vm_ops;   unsigned long vm_pgoff;   struct file *vm_file;   void *vm_private_data;   struct mempolicy *vm_policy;   struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;   346     struct core_thread {   struct task_struct *task;   struct core_thread *next; } ;   351     struct core_state {   atomic_t nr_threads;   struct core_thread dumper;   struct completion startup; } ;   357     struct kioctx_table ;   358     struct linux_binfmt ;   358     struct mmu_notifier_mm ;   358     struct mm_struct {   struct vm_area_struct *mmap;   struct rb_root mm_rb;   u32 vmacache_seqnum;   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   unsigned long mmap_base;   unsigned long mmap_legacy_base;   unsigned long task_size;   unsigned long highest_vm_end;   pgd_t *pgd;   atomic_t mm_users;   atomic_t mm_count;   atomic_long_t nr_ptes;   atomic_long_t nr_pmds;   int map_count;   spinlock_t page_table_lock;   struct rw_semaphore mmap_sem;   struct list_head mmlist;   unsigned long hiwater_rss;   unsigned long hiwater_vm;   unsigned long total_vm;   unsigned long locked_vm;   unsigned long pinned_vm;   unsigned long data_vm;   unsigned long exec_vm;   unsigned long stack_vm;   unsigned long def_flags;   unsigned long start_code;   unsigned long end_code;   unsigned long start_data;   unsigned long end_data;   unsigned long start_brk;   unsigned long brk;   unsigned long start_stack;   unsigned long arg_start;   unsigned long arg_end;   unsigned long env_start;   unsigned long env_end;   unsigned long saved_auxv[46U];   struct mm_rss_stat rss_stat;   struct linux_binfmt *binfmt;   cpumask_var_t cpu_vm_mask_var;   mm_context_t context;   unsigned long flags;   struct core_state *core_state;   spinlock_t ioctx_lock;   struct kioctx_table *ioctx_table;   struct task_struct *owner;   struct user_namespace *user_ns;   struct file *exe_file;   struct mmu_notifier_mm *mmu_notifier_mm;   struct cpumask cpumask_allocation;   unsigned long numa_next_scan;   unsigned long numa_scan_offset;   int numa_scan_seq;   bool tlb_flush_pending;   struct uprobes_state uprobes_state;   atomic_long_t hugetlb_usage;   struct work_struct async_put_work; } ;   544     struct vm_fault ;   598     struct vdso_image {   void *data;   unsigned long size;   unsigned long alt;   unsigned long alt_len;   long sym_vvar_start;   long sym_vvar_page;   long sym_hpet_page;   long sym_pvclock_page;   long sym_VDSO32_NOTE_MASK;   long sym___kernel_sigreturn;   long sym___kernel_rt_sigreturn;   long sym___kernel_vsyscall;   long sym_int80_landing_pad; } ;    15     typedef __u64 Elf64_Addr;    16     typedef __u16 Elf64_Half;    18     typedef __u64 Elf64_Off;    20     typedef __u32 Elf64_Word;    21     typedef __u64 Elf64_Xword;   190     struct elf64_sym {   Elf64_Word st_name;   unsigned char st_info;   unsigned char st_other;   Elf64_Half st_shndx;   Elf64_Addr st_value;   Elf64_Xword st_size; } ;   198     typedef struct elf64_sym Elf64_Sym;   219     struct elf64_hdr {   unsigned char e_ident[16U];   Elf64_Half e_type;   Elf64_Half e_machine;   Elf64_Word e_version;   Elf64_Addr e_entry;   Elf64_Off e_phoff;   Elf64_Off e_shoff;   Elf64_Word e_flags;   Elf64_Half e_ehsize;   Elf64_Half e_phentsize;   Elf64_Half e_phnum;   Elf64_Half e_shentsize;   Elf64_Half e_shnum;   Elf64_Half e_shstrndx; } ;   235     typedef struct elf64_hdr Elf64_Ehdr;   314     struct elf64_shdr {   Elf64_Word sh_name;   Elf64_Word sh_type;   Elf64_Xword sh_flags;   Elf64_Addr sh_addr;   Elf64_Off sh_offset;   Elf64_Xword sh_size;   Elf64_Word sh_link;   Elf64_Word sh_info;   Elf64_Xword sh_addralign;   Elf64_Xword sh_entsize; } ;   326     typedef struct elf64_shdr Elf64_Shdr;    65     struct radix_tree_root ;    65     union __anonunion____missing_field_name_233 {   struct list_head private_list;   struct callback_head callback_head; } ;    65     struct radix_tree_node {   unsigned char shift;   unsigned char offset;   unsigned char count;   unsigned char exceptional;   struct radix_tree_node *parent;   struct radix_tree_root *root;   union __anonunion____missing_field_name_233 __annonCompField49;   void *slots[64U];   unsigned long tags[3U][1U]; } ;   107     struct radix_tree_root {   gfp_t gfp_mask;   struct radix_tree_node *rnode; } ;   176     struct ida {   struct radix_tree_root ida_rt; } ;   216     struct dentry ;   217     struct iattr ;   218     struct super_block ;   219     struct file_system_type ;   220     struct kernfs_open_node ;   221     struct kernfs_iattrs ;   245     struct kernfs_root ;   245     struct kernfs_elem_dir {   unsigned long subdirs;   struct rb_root children;   struct kernfs_root *root; } ;    86     struct kernfs_node ;    86     struct kernfs_elem_symlink {   struct kernfs_node *target_kn; } ;    90     struct kernfs_ops ;    90     struct kernfs_elem_attr {   const struct kernfs_ops *ops;   struct kernfs_open_node *open;   loff_t size;   struct kernfs_node *notify_next; } ;    97     union __anonunion____missing_field_name_242 {   struct kernfs_elem_dir dir;   struct kernfs_elem_symlink symlink;   struct kernfs_elem_attr attr; } ;    97     struct kernfs_node {   atomic_t count;   atomic_t active;   struct lockdep_map dep_map;   struct kernfs_node *parent;   const char *name;   struct rb_node rb;   const void *ns;   unsigned int hash;   union __anonunion____missing_field_name_242 __annonCompField50;   void *priv;   unsigned short flags;   umode_t mode;   unsigned int ino;   struct kernfs_iattrs *iattr; } ;   139     struct kernfs_syscall_ops {   int (*remount_fs)(struct kernfs_root *, int *, char *);   int (*show_options)(struct seq_file *, struct kernfs_root *);   int (*mkdir)(struct kernfs_node *, const char *, umode_t );   int (*rmdir)(struct kernfs_node *);   int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *);   int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ;   158     struct kernfs_root {   struct kernfs_node *kn;   unsigned int flags;   struct ida ino_ida;   struct kernfs_syscall_ops *syscall_ops;   struct list_head supers;   wait_queue_head_t deactivate_waitq; } ;   174     struct kernfs_open_file {   struct kernfs_node *kn;   struct file *file;   struct seq_file *seq_file;   void *priv;   struct mutex mutex;   struct mutex prealloc_mutex;   int event;   struct list_head list;   char *prealloc_buf;   size_t atomic_write_len;   bool mmapped;   bool released;   const struct vm_operations_struct *vm_ops; } ;   194     struct kernfs_ops {   int (*open)(struct kernfs_open_file *);   void (*release)(struct kernfs_open_file *);   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   ssize_t  (*read)(struct kernfs_open_file *, char *, size_t , loff_t );   size_t atomic_write_len;   bool prealloc;   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *);   struct lock_class_key lockdep_key; } ;   521     struct sock ;   522     struct kobject ;   523     enum kobj_ns_type {   KOBJ_NS_TYPE_NONE = 0,   KOBJ_NS_TYPE_NET = 1,   KOBJ_NS_TYPES = 2 } ;   529     struct kobj_ns_type_operations {   enum kobj_ns_type type;   bool  (*current_may_mount)();   void * (*grab_current_ns)();   const void * (*netlink_ns)(struct sock *);   const void * (*initial_ns)();   void (*drop_ns)(void *); } ;    59     struct bin_attribute ;    60     struct attribute {   const char *name;   umode_t mode;   bool ignore_lockdep;   struct lock_class_key *key;   struct lock_class_key skey; } ;    37     struct attribute_group {   const char *name;   umode_t  (*is_visible)(struct kobject *, struct attribute *, int);   umode_t  (*is_bin_visible)(struct kobject *, struct bin_attribute *, int);   struct attribute **attrs;   struct bin_attribute **bin_attrs; } ;    92     struct bin_attribute {   struct attribute attr;   size_t size;   void *private;   ssize_t  (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   ssize_t  (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;   165     struct sysfs_ops {   ssize_t  (*show)(struct kobject *, struct attribute *, char *);   ssize_t  (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;   530     struct refcount_struct {   atomic_t refs; } ;    11     typedef struct refcount_struct refcount_t;    41     struct kref {   refcount_t refcount; } ;    52     struct kset ;    52     struct kobj_type ;    52     struct kobject {   const char *name;   struct list_head entry;   struct kobject *parent;   struct kset *kset;   struct kobj_type *ktype;   struct kernfs_node *sd;   struct kref kref;   struct delayed_work release;   unsigned char state_initialized;   unsigned char state_in_sysfs;   unsigned char state_add_uevent_sent;   unsigned char state_remove_uevent_sent;   unsigned char uevent_suppress; } ;   115     struct kobj_type {   void (*release)(struct kobject *);   const struct sysfs_ops *sysfs_ops;   struct attribute **default_attrs;   const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *);   const void * (*namespace)(struct kobject *); } ;   123     struct kobj_uevent_env {   char *argv[3U];   char *envp[32U];   int envp_idx;   char buf[2048U];   int buflen; } ;   131     struct kset_uevent_ops {   const int (*filter)(struct kset *, struct kobject *);   const const char * (*name)(struct kset *, struct kobject *);   const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;   148     struct kset {   struct list_head list;   spinlock_t list_lock;   struct kobject kobj;   const struct kset_uevent_ops *uevent_ops; } ;   223     struct kernel_param ;   228     struct kernel_param_ops {   unsigned int flags;   int (*set)(const char *, const struct kernel_param *);   int (*get)(char *, const struct kernel_param *);   void (*free)(void *); } ;    62     struct kparam_string ;    62     struct kparam_array ;    62     union __anonunion____missing_field_name_245 {   void *arg;   const struct kparam_string *str;   const struct kparam_array *arr; } ;    62     struct kernel_param {   const char *name;   struct module *mod;   const struct kernel_param_ops *ops;   const u16 perm;   s8 level;   u8 flags;   union __anonunion____missing_field_name_245 __annonCompField51; } ;    83     struct kparam_string {   unsigned int maxlen;   char *string; } ;    89     struct kparam_array {   unsigned int max;   unsigned int elemsize;   unsigned int *num;   const struct kernel_param_ops *ops;   void *elem; } ;   470     struct latch_tree_node {   struct rb_node node[2U]; } ;   211     struct mod_arch_specific { } ;    38     struct exception_table_entry ;    39     struct module_param_attrs ;    39     struct module_kobject {   struct kobject kobj;   struct module *mod;   struct kobject *drivers_dir;   struct module_param_attrs *mp;   struct completion *kobj_completion; } ;    49     struct module_attribute {   struct attribute attr;   ssize_t  (*show)(struct module_attribute *, struct module_kobject *, char *);   ssize_t  (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t );   void (*setup)(struct module *, const char *);   int (*test)(struct module *);   void (*free)(struct module *); } ;   276     enum module_state {   MODULE_STATE_LIVE = 0,   MODULE_STATE_COMING = 1,   MODULE_STATE_GOING = 2,   MODULE_STATE_UNFORMED = 3 } ;   283     struct mod_tree_node {   struct module *mod;   struct latch_tree_node node; } ;   288     struct module_layout {   void *base;   unsigned int size;   unsigned int text_size;   unsigned int ro_size;   unsigned int ro_after_init_size;   struct mod_tree_node mtn; } ;   304     struct mod_kallsyms {   Elf64_Sym *symtab;   unsigned int num_symtab;   char *strtab; } ;   318     struct klp_modinfo {   Elf64_Ehdr hdr;   Elf64_Shdr *sechdrs;   char *secstrings;   unsigned int symndx; } ;   326     struct module_sect_attrs ;   326     struct module_notes_attrs ;   326     struct trace_event_call ;   326     struct trace_enum_map ;   326     struct module {   enum module_state state;   struct list_head list;   char name[56U];   struct module_kobject mkobj;   struct module_attribute *modinfo_attrs;   const char *version;   const char *srcversion;   struct kobject *holders_dir;   const struct kernel_symbol *syms;   const s32 *crcs;   unsigned int num_syms;   struct mutex param_lock;   struct kernel_param *kp;   unsigned int num_kp;   unsigned int num_gpl_syms;   const struct kernel_symbol *gpl_syms;   const s32 *gpl_crcs;   const struct kernel_symbol *unused_syms;   const s32 *unused_crcs;   unsigned int num_unused_syms;   unsigned int num_unused_gpl_syms;   const struct kernel_symbol *unused_gpl_syms;   const s32 *unused_gpl_crcs;   bool sig_ok;   bool async_probe_requested;   const struct kernel_symbol *gpl_future_syms;   const s32 *gpl_future_crcs;   unsigned int num_gpl_future_syms;   unsigned int num_exentries;   struct exception_table_entry *extable;   int (*init)();   struct module_layout core_layout;   struct module_layout init_layout;   struct mod_arch_specific arch;   unsigned long taints;   unsigned int num_bugs;   struct list_head bug_list;   struct bug_entry *bug_table;   struct mod_kallsyms *kallsyms;   struct mod_kallsyms core_kallsyms;   struct module_sect_attrs *sect_attrs;   struct module_notes_attrs *notes_attrs;   char *args;   void *percpu;   unsigned int percpu_size;   unsigned int num_tracepoints;   const struct tracepoint **tracepoints_ptrs;   unsigned int num_trace_bprintk_fmt;   const char **trace_bprintk_fmt_start;   struct trace_event_call **trace_events;   unsigned int num_trace_events;   struct trace_enum_map **trace_enums;   unsigned int num_trace_enums;   bool klp;   bool klp_alive;   struct klp_modinfo *klp_info;   struct list_head source_list;   struct list_head target_list;   void (*exit)();   atomic_t refcnt;   ctor_fn_t  (**ctors)();   unsigned int num_ctors; } ;   796     struct clk ;    15     struct klist_node ;    37     struct klist_node {   void *n_klist;   struct list_head n_node;   struct kref n_ref; } ;    93     struct hlist_bl_node ;    93     struct hlist_bl_head {   struct hlist_bl_node *first; } ;    36     struct hlist_bl_node {   struct hlist_bl_node *next;   struct hlist_bl_node **pprev; } ;   114     struct __anonstruct____missing_field_name_299 {   spinlock_t lock;   int count; } ;   114     union __anonunion____missing_field_name_298 {   struct __anonstruct____missing_field_name_299 __annonCompField52; } ;   114     struct lockref {   union __anonunion____missing_field_name_298 __annonCompField53; } ;    77     struct path ;    78     struct vfsmount ;    79     struct __anonstruct____missing_field_name_301 {   u32 hash;   u32 len; } ;    79     union __anonunion____missing_field_name_300 {   struct __anonstruct____missing_field_name_301 __annonCompField54;   u64 hash_len; } ;    79     struct qstr {   union __anonunion____missing_field_name_300 __annonCompField55;   const unsigned char *name; } ;    66     struct dentry_operations ;    66     union __anonunion____missing_field_name_302 {   struct list_head d_lru;   wait_queue_head_t *d_wait; } ;    66     union __anonunion_d_u_303 {   struct hlist_node d_alias;   struct hlist_bl_node d_in_lookup_hash;   struct callback_head d_rcu; } ;    66     struct dentry {   unsigned int d_flags;   seqcount_t d_seq;   struct hlist_bl_node d_hash;   struct dentry *d_parent;   struct qstr d_name;   struct inode *d_inode;   unsigned char d_iname[32U];   struct lockref d_lockref;   const struct dentry_operations *d_op;   struct super_block *d_sb;   unsigned long d_time;   void *d_fsdata;   union __anonunion____missing_field_name_302 __annonCompField56;   struct list_head d_child;   struct list_head d_subdirs;   union __anonunion_d_u_303 d_u; } ;   122     struct dentry_operations {   int (*d_revalidate)(struct dentry *, unsigned int);   int (*d_weak_revalidate)(struct dentry *, unsigned int);   int (*d_hash)(const struct dentry *, struct qstr *);   int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *);   int (*d_delete)(const struct dentry *);   int (*d_init)(struct dentry *);   void (*d_release)(struct dentry *);   void (*d_prune)(struct dentry *);   void (*d_iput)(struct dentry *, struct inode *);   char * (*d_dname)(struct dentry *, char *, int);   struct vfsmount * (*d_automount)(struct path *);   int (*d_manage)(const struct path *, bool );   struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ;   593     struct path {   struct vfsmount *mnt;   struct dentry *dentry; } ;    19     struct shrink_control {   gfp_t gfp_mask;   unsigned long nr_to_scan;   int nid;   struct mem_cgroup *memcg; } ;    27     struct shrinker {   unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *);   unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *);   int seeks;   long batch;   unsigned long flags;   struct list_head list;   atomic_long_t *nr_deferred; } ;    80     struct list_lru_one {   struct list_head list;   long nr_items; } ;    32     struct list_lru_memcg {   struct list_lru_one *lru[0U]; } ;    37     struct list_lru_node {   spinlock_t lock;   struct list_lru_one lru;   struct list_lru_memcg *memcg_lrus; } ;    47     struct list_lru {   struct list_lru_node *node;   struct list_head list; } ;   189     enum pid_type {   PIDTYPE_PID = 0,   PIDTYPE_PGID = 1,   PIDTYPE_SID = 2,   PIDTYPE_MAX = 3 } ;   196     struct pid_namespace ;   196     struct upid {   int nr;   struct pid_namespace *ns;   struct hlist_node pid_chain; } ;    56     struct pid {   atomic_t count;   unsigned int level;   struct hlist_head tasks[3U];   struct callback_head rcu;   struct upid numbers[1U]; } ;    68     struct pid_link {   struct hlist_node node;   struct pid *pid; } ;    22     struct kernel_cap_struct {   __u32 cap[2U]; } ;    25     typedef struct kernel_cap_struct kernel_cap_t;    45     struct fiemap_extent {   __u64 fe_logical;   __u64 fe_physical;   __u64 fe_length;   __u64 fe_reserved64[2U];   __u32 fe_flags;   __u32 fe_reserved[3U]; } ;    38     enum migrate_mode {   MIGRATE_ASYNC = 0,   MIGRATE_SYNC_LIGHT = 1,   MIGRATE_SYNC = 2 } ;    44     struct rcuwait {   struct task_struct *task; } ;    32     enum rcu_sync_type {   RCU_SYNC = 0,   RCU_SCHED_SYNC = 1,   RCU_BH_SYNC = 2 } ;    38     struct rcu_sync {   int gp_state;   int gp_count;   wait_queue_head_t gp_wait;   int cb_state;   struct callback_head cb_head;   enum rcu_sync_type gp_type; } ;    66     struct percpu_rw_semaphore {   struct rcu_sync rss;   unsigned int *read_count;   struct rw_semaphore rw_sem;   struct rcuwait writer;   int readers_block; } ;   144     struct delayed_call {   void (*fn)(void *);   void *arg; } ;   283     struct backing_dev_info ;   284     struct bdi_writeback ;   286     struct export_operations ;   289     struct kiocb ;   290     struct pipe_inode_info ;   291     struct poll_table_struct ;   292     struct kstatfs ;   293     struct swap_info_struct ;   294     struct iov_iter ;   295     struct fscrypt_info ;   296     struct fscrypt_operations ;    76     struct iattr {   unsigned int ia_valid;   umode_t ia_mode;   kuid_t ia_uid;   kgid_t ia_gid;   loff_t ia_size;   struct timespec ia_atime;   struct timespec ia_mtime;   struct timespec ia_ctime;   struct file *ia_file; } ;   213     struct dquot ;   214     struct kqid ;    19     typedef __kernel_uid32_t projid_t;    23     struct __anonstruct_kprojid_t_305 {   projid_t val; } ;    23     typedef struct __anonstruct_kprojid_t_305 kprojid_t;   181     enum quota_type {   USRQUOTA = 0,   GRPQUOTA = 1,   PRJQUOTA = 2 } ;    66     typedef long long qsize_t;    67     union __anonunion____missing_field_name_306 {   kuid_t uid;   kgid_t gid;   kprojid_t projid; } ;    67     struct kqid {   union __anonunion____missing_field_name_306 __annonCompField57;   enum quota_type type; } ;   194     struct mem_dqblk {   qsize_t dqb_bhardlimit;   qsize_t dqb_bsoftlimit;   qsize_t dqb_curspace;   qsize_t dqb_rsvspace;   qsize_t dqb_ihardlimit;   qsize_t dqb_isoftlimit;   qsize_t dqb_curinodes;   time64_t dqb_btime;   time64_t dqb_itime; } ;   216     struct quota_format_type ;   217     struct mem_dqinfo {   struct quota_format_type *dqi_format;   int dqi_fmt_id;   struct list_head dqi_dirty_list;   unsigned long dqi_flags;   unsigned int dqi_bgrace;   unsigned int dqi_igrace;   qsize_t dqi_max_spc_limit;   qsize_t dqi_max_ino_limit;   void *dqi_priv; } ;   282     struct dquot {   struct hlist_node dq_hash;   struct list_head dq_inuse;   struct list_head dq_free;   struct list_head dq_dirty;   struct mutex dq_lock;   atomic_t dq_count;   wait_queue_head_t dq_wait_unused;   struct super_block *dq_sb;   struct kqid dq_id;   loff_t dq_off;   unsigned long dq_flags;   struct mem_dqblk dq_dqb; } ;   309     struct quota_format_ops {   int (*check_quota_file)(struct super_block *, int);   int (*read_file_info)(struct super_block *, int);   int (*write_file_info)(struct super_block *, int);   int (*free_file_info)(struct super_block *, int);   int (*read_dqblk)(struct dquot *);   int (*commit_dqblk)(struct dquot *);   int (*release_dqblk)(struct dquot *);   int (*get_next_id)(struct super_block *, struct kqid *); } ;   321     struct dquot_operations {   int (*write_dquot)(struct dquot *);   struct dquot * (*alloc_dquot)(struct super_block *, int);   void (*destroy_dquot)(struct dquot *);   int (*acquire_dquot)(struct dquot *);   int (*release_dquot)(struct dquot *);   int (*mark_dirty)(struct dquot *);   int (*write_info)(struct super_block *, int);   qsize_t * (*get_reserved_space)(struct inode *);   int (*get_projid)(struct inode *, kprojid_t *);   int (*get_next_id)(struct super_block *, struct kqid *); } ;   338     struct qc_dqblk {   int d_fieldmask;   u64 d_spc_hardlimit;   u64 d_spc_softlimit;   u64 d_ino_hardlimit;   u64 d_ino_softlimit;   u64 d_space;   u64 d_ino_count;   s64 d_ino_timer;   s64 d_spc_timer;   int d_ino_warns;   int d_spc_warns;   u64 d_rt_spc_hardlimit;   u64 d_rt_spc_softlimit;   u64 d_rt_space;   s64 d_rt_spc_timer;   int d_rt_spc_warns; } ;   361     struct qc_type_state {   unsigned int flags;   unsigned int spc_timelimit;   unsigned int ino_timelimit;   unsigned int rt_spc_timelimit;   unsigned int spc_warnlimit;   unsigned int ino_warnlimit;   unsigned int rt_spc_warnlimit;   unsigned long long ino;   blkcnt_t blocks;   blkcnt_t nextents; } ;   407     struct qc_state {   unsigned int s_incoredqs;   struct qc_type_state s_state[3U]; } ;   418     struct qc_info {   int i_fieldmask;   unsigned int i_flags;   unsigned int i_spc_timelimit;   unsigned int i_ino_timelimit;   unsigned int i_rt_spc_timelimit;   unsigned int i_spc_warnlimit;   unsigned int i_ino_warnlimit;   unsigned int i_rt_spc_warnlimit; } ;   431     struct quotactl_ops {   int (*quota_on)(struct super_block *, int, int, const struct path *);   int (*quota_off)(struct super_block *, int);   int (*quota_enable)(struct super_block *, unsigned int);   int (*quota_disable)(struct super_block *, unsigned int);   int (*quota_sync)(struct super_block *, int);   int (*set_info)(struct super_block *, int, struct qc_info *);   int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *);   int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*get_state)(struct super_block *, struct qc_state *);   int (*rm_xquota)(struct super_block *, unsigned int); } ;   447     struct quota_format_type {   int qf_fmt_id;   const struct quota_format_ops *qf_ops;   struct module *qf_owner;   struct quota_format_type *qf_next; } ;   511     struct quota_info {   unsigned int flags;   struct mutex dqio_mutex;   struct inode *files[3U];   struct mem_dqinfo info[3U];   const struct quota_format_ops *ops[3U]; } ;   540     struct writeback_control ;   541     struct kiocb {   struct file *ki_filp;   loff_t ki_pos;   void (*ki_complete)(struct kiocb *, long, long);   void *private;   int ki_flags; } ;   317     struct address_space_operations {   int (*writepage)(struct page *, struct writeback_control *);   int (*readpage)(struct file *, struct page *);   int (*writepages)(struct address_space *, struct writeback_control *);   int (*set_page_dirty)(struct page *);   int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int);   int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **);   int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *);   sector_t  (*bmap)(struct address_space *, sector_t );   void (*invalidatepage)(struct page *, unsigned int, unsigned int);   int (*releasepage)(struct page *, gfp_t );   void (*freepage)(struct page *);   ssize_t  (*direct_IO)(struct kiocb *, struct iov_iter *);   int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode );   bool  (*isolate_page)(struct page *, isolate_mode_t );   void (*putback_page)(struct page *);   int (*launder_page)(struct page *);   int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);   void (*is_dirty_writeback)(struct page *, bool *, bool *);   int (*error_remove_page)(struct address_space *, struct page *);   int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *);   void (*swap_deactivate)(struct file *); } ;   376     struct address_space {   struct inode *host;   struct radix_tree_root page_tree;   spinlock_t tree_lock;   atomic_t i_mmap_writable;   struct rb_root i_mmap;   struct rw_semaphore i_mmap_rwsem;   unsigned long nrpages;   unsigned long nrexceptional;   unsigned long writeback_index;   const struct address_space_operations *a_ops;   unsigned long flags;   spinlock_t private_lock;   gfp_t gfp_mask;   struct list_head private_list;   void *private_data; } ;   398     struct request_queue ;   399     struct hd_struct ;   399     struct gendisk ;   399     struct block_device {   dev_t bd_dev;   int bd_openers;   struct inode *bd_inode;   struct super_block *bd_super;   struct mutex bd_mutex;   void *bd_claiming;   void *bd_holder;   int bd_holders;   bool bd_write_holder;   struct list_head bd_holder_disks;   struct block_device *bd_contains;   unsigned int bd_block_size;   struct hd_struct *bd_part;   unsigned int bd_part_count;   int bd_invalidated;   struct gendisk *bd_disk;   struct request_queue *bd_queue;   struct backing_dev_info *bd_bdi;   struct list_head bd_list;   unsigned long bd_private;   int bd_fsfreeze_count;   struct mutex bd_fsfreeze_mutex; } ;   515     struct posix_acl ;   542     struct inode_operations ;   542     union __anonunion____missing_field_name_311 {   const unsigned int i_nlink;   unsigned int __i_nlink; } ;   542     union __anonunion____missing_field_name_312 {   struct hlist_head i_dentry;   struct callback_head i_rcu; } ;   542     struct file_lock_context ;   542     struct cdev ;   542     union __anonunion____missing_field_name_313 {   struct pipe_inode_info *i_pipe;   struct block_device *i_bdev;   struct cdev *i_cdev;   char *i_link;   unsigned int i_dir_seq; } ;   542     struct inode {   umode_t i_mode;   unsigned short i_opflags;   kuid_t i_uid;   kgid_t i_gid;   unsigned int i_flags;   struct posix_acl *i_acl;   struct posix_acl *i_default_acl;   const struct inode_operations *i_op;   struct super_block *i_sb;   struct address_space *i_mapping;   void *i_security;   unsigned long i_ino;   union __anonunion____missing_field_name_311 __annonCompField58;   dev_t i_rdev;   loff_t i_size;   struct timespec i_atime;   struct timespec i_mtime;   struct timespec i_ctime;   spinlock_t i_lock;   unsigned short i_bytes;   unsigned int i_blkbits;   blkcnt_t i_blocks;   unsigned long i_state;   struct rw_semaphore i_rwsem;   unsigned long dirtied_when;   unsigned long dirtied_time_when;   struct hlist_node i_hash;   struct list_head i_io_list;   struct bdi_writeback *i_wb;   int i_wb_frn_winner;   u16 i_wb_frn_avg_time;   u16 i_wb_frn_history;   struct list_head i_lru;   struct list_head i_sb_list;   struct list_head i_wb_list;   union __anonunion____missing_field_name_312 __annonCompField59;   u64 i_version;   atomic_t i_count;   atomic_t i_dio_count;   atomic_t i_writecount;   atomic_t i_readcount;   const struct file_operations *i_fop;   struct file_lock_context *i_flctx;   struct address_space i_data;   struct list_head i_devices;   union __anonunion____missing_field_name_313 __annonCompField60;   __u32 i_generation;   __u32 i_fsnotify_mask;   struct hlist_head i_fsnotify_marks;   struct fscrypt_info *i_crypt_info;   void *i_private; } ;   803     struct fown_struct {   rwlock_t lock;   struct pid *pid;   enum pid_type pid_type;   kuid_t uid;   kuid_t euid;   int signum; } ;   811     struct file_ra_state {   unsigned long start;   unsigned int size;   unsigned int async_size;   unsigned int ra_pages;   unsigned int mmap_miss;   loff_t prev_pos; } ;   834     union __anonunion_f_u_314 {   struct llist_node fu_llist;   struct callback_head fu_rcuhead; } ;   834     struct file {   union __anonunion_f_u_314 f_u;   struct path f_path;   struct inode *f_inode;   const struct file_operations *f_op;   spinlock_t f_lock;   atomic_long_t f_count;   unsigned int f_flags;   fmode_t f_mode;   struct mutex f_pos_lock;   loff_t f_pos;   struct fown_struct f_owner;   const struct cred *f_cred;   struct file_ra_state f_ra;   u64 f_version;   void *f_security;   void *private_data;   struct list_head f_ep_links;   struct list_head f_tfile_llink;   struct address_space *f_mapping; } ;   919     typedef void *fl_owner_t;   920     struct file_lock ;   921     struct file_lock_operations {   void (*fl_copy_lock)(struct file_lock *, struct file_lock *);   void (*fl_release_private)(struct file_lock *); } ;   927     struct lock_manager_operations {   int (*lm_compare_owner)(struct file_lock *, struct file_lock *);   unsigned long int (*lm_owner_key)(struct file_lock *);   fl_owner_t  (*lm_get_owner)(fl_owner_t );   void (*lm_put_owner)(fl_owner_t );   void (*lm_notify)(struct file_lock *);   int (*lm_grant)(struct file_lock *, int);   bool  (*lm_break)(struct file_lock *);   int (*lm_change)(struct file_lock *, int, struct list_head *);   void (*lm_setup)(struct file_lock *, void **); } ;   954     struct nlm_lockowner ;   955     struct nfs_lock_info {   u32 state;   struct nlm_lockowner *owner;   struct list_head list; } ;    14     struct nfs4_lock_state ;    15     struct nfs4_lock_info {   struct nfs4_lock_state *owner; } ;    19     struct fasync_struct ;    19     struct __anonstruct_afs_316 {   struct list_head link;   int state; } ;    19     union __anonunion_fl_u_315 {   struct nfs_lock_info nfs_fl;   struct nfs4_lock_info nfs4_fl;   struct __anonstruct_afs_316 afs; } ;    19     struct file_lock {   struct file_lock *fl_next;   struct list_head fl_list;   struct hlist_node fl_link;   struct list_head fl_block;   fl_owner_t fl_owner;   unsigned int fl_flags;   unsigned char fl_type;   unsigned int fl_pid;   int fl_link_cpu;   struct pid *fl_nspid;   wait_queue_head_t fl_wait;   struct file *fl_file;   loff_t fl_start;   loff_t fl_end;   struct fasync_struct *fl_fasync;   unsigned long fl_break_time;   unsigned long fl_downgrade_time;   const struct file_lock_operations *fl_ops;   const struct lock_manager_operations *fl_lmops;   union __anonunion_fl_u_315 fl_u; } ;  1007     struct file_lock_context {   spinlock_t flc_lock;   struct list_head flc_flock;   struct list_head flc_posix;   struct list_head flc_lease; } ;  1074     struct files_struct ;  1227     struct fasync_struct {   spinlock_t fa_lock;   int magic;   int fa_fd;   struct fasync_struct *fa_next;   struct file *fa_file;   struct callback_head fa_rcu; } ;  1262     struct sb_writers {   int frozen;   wait_queue_head_t wait_unfrozen;   struct percpu_rw_semaphore rw_sem[3U]; } ;  1292     struct super_operations ;  1292     struct xattr_handler ;  1292     struct mtd_info ;  1292     struct super_block {   struct list_head s_list;   dev_t s_dev;   unsigned char s_blocksize_bits;   unsigned long s_blocksize;   loff_t s_maxbytes;   struct file_system_type *s_type;   const struct super_operations *s_op;   const struct dquot_operations *dq_op;   const struct quotactl_ops *s_qcop;   const struct export_operations *s_export_op;   unsigned long s_flags;   unsigned long s_iflags;   unsigned long s_magic;   struct dentry *s_root;   struct rw_semaphore s_umount;   int s_count;   atomic_t s_active;   void *s_security;   const struct xattr_handler **s_xattr;   const struct fscrypt_operations *s_cop;   struct hlist_bl_head s_anon;   struct list_head s_mounts;   struct block_device *s_bdev;   struct backing_dev_info *s_bdi;   struct mtd_info *s_mtd;   struct hlist_node s_instances;   unsigned int s_quota_types;   struct quota_info s_dquot;   struct sb_writers s_writers;   char s_id[32U];   u8 s_uuid[16U];   void *s_fs_info;   unsigned int s_max_links;   fmode_t s_mode;   u32 s_time_gran;   struct mutex s_vfs_rename_mutex;   char *s_subtype;   char *s_options;   const struct dentry_operations *s_d_op;   int cleancache_poolid;   struct shrinker s_shrink;   atomic_long_t s_remove_count;   int s_readonly_remount;   struct workqueue_struct *s_dio_done_wq;   struct hlist_head s_pins;   struct user_namespace *s_user_ns;   struct list_lru s_dentry_lru;   struct list_lru s_inode_lru;   struct callback_head rcu;   struct work_struct destroy_work;   struct mutex s_sync_lock;   int s_stack_depth;   spinlock_t s_inode_list_lock;   struct list_head s_inodes;   spinlock_t s_inode_wblist_lock;   struct list_head s_inodes_wb; } ;  1579     struct fiemap_extent_info {   unsigned int fi_flags;   unsigned int fi_extents_mapped;   unsigned int fi_extents_max;   struct fiemap_extent *fi_extents_start; } ;  1592     struct dir_context ;  1617     struct dir_context {   int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int);   loff_t pos; } ;  1624     struct file_operations {   struct module *owner;   loff_t  (*llseek)(struct file *, loff_t , int);   ssize_t  (*read)(struct file *, char *, size_t , loff_t *);   ssize_t  (*write)(struct file *, const char *, size_t , loff_t *);   ssize_t  (*read_iter)(struct kiocb *, struct iov_iter *);   ssize_t  (*write_iter)(struct kiocb *, struct iov_iter *);   int (*iterate)(struct file *, struct dir_context *);   int (*iterate_shared)(struct file *, struct dir_context *);   unsigned int (*poll)(struct file *, struct poll_table_struct *);   long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long);   long int (*compat_ioctl)(struct file *, unsigned int, unsigned long);   int (*mmap)(struct file *, struct vm_area_struct *);   int (*open)(struct inode *, struct file *);   int (*flush)(struct file *, fl_owner_t );   int (*release)(struct inode *, struct file *);   int (*fsync)(struct file *, loff_t , loff_t , int);   int (*fasync)(int, struct file *, int);   int (*lock)(struct file *, int, struct file_lock *);   ssize_t  (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int);   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   int (*check_flags)(int);   int (*flock)(struct file *, int, struct file_lock *);   ssize_t  (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int);   ssize_t  (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int);   int (*setlease)(struct file *, long, struct file_lock **, void **);   long int (*fallocate)(struct file *, int, loff_t , loff_t );   void (*show_fdinfo)(struct seq_file *, struct file *);   ssize_t  (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int);   int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 );   ssize_t  (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ;  1692     struct inode_operations {   struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int);   const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *);   int (*permission)(struct inode *, int);   struct posix_acl * (*get_acl)(struct inode *, int);   int (*readlink)(struct dentry *, char *, int);   int (*create)(struct inode *, struct dentry *, umode_t , bool );   int (*link)(struct dentry *, struct inode *, struct dentry *);   int (*unlink)(struct inode *, struct dentry *);   int (*symlink)(struct inode *, struct dentry *, const char *);   int (*mkdir)(struct inode *, struct dentry *, umode_t );   int (*rmdir)(struct inode *, struct dentry *);   int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t );   int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int);   int (*setattr)(struct dentry *, struct iattr *);   int (*getattr)(const struct path *, struct kstat *, u32 , unsigned int);   ssize_t  (*listxattr)(struct dentry *, char *, size_t );   int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 );   int (*update_time)(struct inode *, struct timespec *, int);   int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *);   int (*tmpfile)(struct inode *, struct dentry *, umode_t );   int (*set_acl)(struct inode *, struct posix_acl *, int); } ;  1771     struct super_operations {   struct inode * (*alloc_inode)(struct super_block *);   void (*destroy_inode)(struct inode *);   void (*dirty_inode)(struct inode *, int);   int (*write_inode)(struct inode *, struct writeback_control *);   int (*drop_inode)(struct inode *);   void (*evict_inode)(struct inode *);   void (*put_super)(struct super_block *);   int (*sync_fs)(struct super_block *, int);   int (*freeze_super)(struct super_block *);   int (*freeze_fs)(struct super_block *);   int (*thaw_super)(struct super_block *);   int (*unfreeze_fs)(struct super_block *);   int (*statfs)(struct dentry *, struct kstatfs *);   int (*remount_fs)(struct super_block *, int *, char *);   void (*umount_begin)(struct super_block *);   int (*show_options)(struct seq_file *, struct dentry *);   int (*show_devname)(struct seq_file *, struct dentry *);   int (*show_path)(struct seq_file *, struct dentry *);   int (*show_stats)(struct seq_file *, struct dentry *);   ssize_t  (*quota_read)(struct super_block *, int, char *, size_t , loff_t );   ssize_t  (*quota_write)(struct super_block *, int, const char *, size_t , loff_t );   struct dquot ** (*get_dquots)(struct inode *);   int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t );   long int (*nr_cached_objects)(struct super_block *, struct shrink_control *);   long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;  2014     struct file_system_type {   const char *name;   int fs_flags;   struct dentry * (*mount)(struct file_system_type *, int, const char *, void *);   void (*kill_sb)(struct super_block *);   struct module *owner;   struct file_system_type *next;   struct hlist_head fs_supers;   struct lock_class_key s_lock_key;   struct lock_class_key s_umount_key;   struct lock_class_key s_vfs_rename_key;   struct lock_class_key s_writers_key[3U];   struct lock_class_key i_lock_key;   struct lock_class_key i_mutex_key;   struct lock_class_key i_mutex_dir_key; } ;  3219     struct assoc_array_ptr ;  3219     struct assoc_array {   struct assoc_array_ptr *root;   unsigned long nr_leaves_on_tree; } ;    31     typedef int32_t key_serial_t;    34     typedef uint32_t key_perm_t;    35     struct key ;    36     struct user_struct ;    37     struct signal_struct ;    38     struct key_type ;    42     struct keyring_index_key {   struct key_type *type;   const char *description;   size_t desc_len; } ;    91     union key_payload {   void *rcu_data0;   void *data[4U]; } ;   128     union __anonunion____missing_field_name_317 {   struct list_head graveyard_link;   struct rb_node serial_node; } ;   128     struct key_user ;   128     union __anonunion____missing_field_name_318 {   time_t expiry;   time_t revoked_at; } ;   128     struct __anonstruct____missing_field_name_320 {   struct key_type *type;   char *description; } ;   128     union __anonunion____missing_field_name_319 {   struct keyring_index_key index_key;   struct __anonstruct____missing_field_name_320 __annonCompField63; } ;   128     struct __anonstruct____missing_field_name_322 {   struct list_head name_link;   struct assoc_array keys; } ;   128     union __anonunion____missing_field_name_321 {   union key_payload payload;   struct __anonstruct____missing_field_name_322 __annonCompField65;   int reject_error; } ;   128     struct key {   atomic_t usage;   key_serial_t serial;   union __anonunion____missing_field_name_317 __annonCompField61;   struct rw_semaphore sem;   struct key_user *user;   void *security;   union __anonunion____missing_field_name_318 __annonCompField62;   time_t last_used_at;   kuid_t uid;   kgid_t gid;   key_perm_t perm;   unsigned short quotalen;   unsigned short datalen;   unsigned long flags;   union __anonunion____missing_field_name_319 __annonCompField64;   union __anonunion____missing_field_name_321 __annonCompField66;   int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ;   380     struct audit_context ;    26     struct sem_undo_list ;    26     struct sysv_sem {   struct sem_undo_list *undo_list; } ;    26     struct sysv_shm {   struct list_head shm_clist; } ;    12     enum kcov_mode {   KCOV_MODE_DISABLED = 0,   KCOV_MODE_TRACE = 1 } ;    84     struct plist_node {   int prio;   struct list_head prio_list;   struct list_head node_list; } ;   299     struct timerqueue_node {   struct rb_node node;   ktime_t expires; } ;    12     struct timerqueue_head {   struct rb_root head;   struct timerqueue_node *next; } ;    50     struct hrtimer_clock_base ;    51     struct hrtimer_cpu_base ;    60     enum hrtimer_restart {   HRTIMER_NORESTART = 0,   HRTIMER_RESTART = 1 } ;    65     struct hrtimer {   struct timerqueue_node node;   ktime_t _softexpires;   enum hrtimer_restart  (*function)(struct hrtimer *);   struct hrtimer_clock_base *base;   u8 state;   u8 is_rel; } ;   113     struct hrtimer_clock_base {   struct hrtimer_cpu_base *cpu_base;   int index;   clockid_t clockid;   struct timerqueue_head active;   ktime_t  (*get_time)();   ktime_t offset; } ;   146     struct hrtimer_cpu_base {   raw_spinlock_t lock;   seqcount_t seq;   struct hrtimer *running;   unsigned int cpu;   unsigned int active_bases;   unsigned int clock_was_set_seq;   bool migration_enabled;   bool nohz_active;   unsigned char in_hrtirq;   unsigned char hres_active;   unsigned char hang_detected;   ktime_t expires_next;   struct hrtimer *next_timer;   unsigned int nr_events;   unsigned int nr_retries;   unsigned int nr_hangs;   unsigned int max_hang_time;   struct hrtimer_clock_base clock_base[4U]; } ;    43     struct seccomp_filter ;    44     struct seccomp {   int mode;   struct seccomp_filter *filter; } ;    11     struct latency_record {   unsigned long backtrace[12U];   unsigned int count;   unsigned long time;   unsigned long max; } ;    24     struct __anonstruct_sigset_t_323 {   unsigned long sig[1U]; } ;    24     typedef struct __anonstruct_sigset_t_323 sigset_t;    25     struct siginfo ;    38     union sigval {   int sival_int;   void *sival_ptr; } ;    10     typedef union sigval sigval_t;    11     struct __anonstruct__kill_325 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid; } ;    11     struct __anonstruct__timer_326 {   __kernel_timer_t _tid;   int _overrun;   char _pad[0U];   sigval_t _sigval;   int _sys_private; } ;    11     struct __anonstruct__rt_327 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   sigval_t _sigval; } ;    11     struct __anonstruct__sigchld_328 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   int _status;   __kernel_clock_t _utime;   __kernel_clock_t _stime; } ;    11     struct __anonstruct__addr_bnd_331 {   void *_lower;   void *_upper; } ;    11     union __anonunion____missing_field_name_330 {   struct __anonstruct__addr_bnd_331 _addr_bnd;   __u32 _pkey; } ;    11     struct __anonstruct__sigfault_329 {   void *_addr;   short _addr_lsb;   union __anonunion____missing_field_name_330 __annonCompField67; } ;    11     struct __anonstruct__sigpoll_332 {   long _band;   int _fd; } ;    11     struct __anonstruct__sigsys_333 {   void *_call_addr;   int _syscall;   unsigned int _arch; } ;    11     union __anonunion__sifields_324 {   int _pad[28U];   struct __anonstruct__kill_325 _kill;   struct __anonstruct__timer_326 _timer;   struct __anonstruct__rt_327 _rt;   struct __anonstruct__sigchld_328 _sigchld;   struct __anonstruct__sigfault_329 _sigfault;   struct __anonstruct__sigpoll_332 _sigpoll;   struct __anonstruct__sigsys_333 _sigsys; } ;    11     struct siginfo {   int si_signo;   int si_errno;   int si_code;   union __anonunion__sifields_324 _sifields; } ;   118     typedef struct siginfo siginfo_t;    21     struct sigpending {   struct list_head list;   sigset_t signal; } ;    65     struct task_io_accounting {   u64 rchar;   u64 wchar;   u64 syscr;   u64 syscw;   u64 read_bytes;   u64 write_bytes;   u64 cancelled_write_bytes; } ;    45     struct bio_list ;    46     struct blk_plug ;    47     struct cfs_rq ;    48     struct fs_struct ;    49     struct futex_pi_state ;    50     struct io_context ;    51     struct nameidata ;    52     struct perf_event_context ;    54     struct reclaim_state ;    55     struct robust_list_head ;    58     struct sighand_struct ;    59     struct task_delay_info ;    60     struct task_group ;   187     struct prev_cputime {   u64 utime;   u64 stime;   raw_spinlock_t lock; } ;   203     struct task_cputime {   u64 utime;   u64 stime;   unsigned long long sum_exec_runtime; } ;   220     struct sched_info {   unsigned long pcount;   unsigned long long run_delay;   unsigned long long last_arrival;   unsigned long long last_queued; } ;   244     struct load_weight {   unsigned long weight;   u32 inv_weight; } ;   261     struct sched_avg {   u64 last_update_time;   u64 load_sum;   u32 util_sum;   u32 period_contrib;   unsigned long load_avg;   unsigned long util_avg; } ;   322     struct sched_statistics {   u64 wait_start;   u64 wait_max;   u64 wait_count;   u64 wait_sum;   u64 iowait_count;   u64 iowait_sum;   u64 sleep_start;   u64 sleep_max;   s64 sum_sleep_runtime;   u64 block_start;   u64 block_max;   u64 exec_max;   u64 slice_max;   u64 nr_migrations_cold;   u64 nr_failed_migrations_affine;   u64 nr_failed_migrations_running;   u64 nr_failed_migrations_hot;   u64 nr_forced_migrations;   u64 nr_wakeups;   u64 nr_wakeups_sync;   u64 nr_wakeups_migrate;   u64 nr_wakeups_local;   u64 nr_wakeups_remote;   u64 nr_wakeups_affine;   u64 nr_wakeups_affine_attempts;   u64 nr_wakeups_passive;   u64 nr_wakeups_idle; } ;   357     struct sched_entity {   struct load_weight load;   struct rb_node run_node;   struct list_head group_node;   unsigned int on_rq;   u64 exec_start;   u64 sum_exec_runtime;   u64 vruntime;   u64 prev_sum_exec_runtime;   u64 nr_migrations;   struct sched_statistics statistics;   int depth;   struct sched_entity *parent;   struct cfs_rq *cfs_rq;   struct cfs_rq *my_q;   struct sched_avg avg; } ;   393     struct rt_rq ;   393     struct sched_rt_entity {   struct list_head run_list;   unsigned long timeout;   unsigned long watchdog_stamp;   unsigned int time_slice;   unsigned short on_rq;   unsigned short on_list;   struct sched_rt_entity *back;   struct sched_rt_entity *parent;   struct rt_rq *rt_rq;   struct rt_rq *my_q; } ;   411     struct sched_dl_entity {   struct rb_node rb_node;   u64 dl_runtime;   u64 dl_deadline;   u64 dl_period;   u64 dl_bw;   s64 runtime;   u64 deadline;   unsigned int flags;   int dl_throttled;   int dl_boosted;   int dl_yielded;   struct hrtimer dl_timer; } ;   478     struct wake_q_node {   struct wake_q_node *next; } ;   482     struct sched_class ;   482     struct rt_mutex_waiter ;   482     struct css_set ;   482     struct compat_robust_list_head ;   482     struct numa_group ;   482     struct kcov ;   482     struct task_struct {   struct thread_info thread_info;   volatile long state;   void *stack;   atomic_t usage;   unsigned int flags;   unsigned int ptrace;   struct llist_node wake_entry;   int on_cpu;   unsigned int cpu;   unsigned int wakee_flips;   unsigned long wakee_flip_decay_ts;   struct task_struct *last_wakee;   int wake_cpu;   int on_rq;   int prio;   int static_prio;   int normal_prio;   unsigned int rt_priority;   const struct sched_class *sched_class;   struct sched_entity se;   struct sched_rt_entity rt;   struct task_group *sched_task_group;   struct sched_dl_entity dl;   struct hlist_head preempt_notifiers;   unsigned int policy;   int nr_cpus_allowed;   cpumask_t cpus_allowed;   unsigned long rcu_tasks_nvcsw;   bool rcu_tasks_holdout;   struct list_head rcu_tasks_holdout_list;   int rcu_tasks_idle_cpu;   struct sched_info sched_info;   struct list_head tasks;   struct plist_node pushable_tasks;   struct rb_node pushable_dl_tasks;   struct mm_struct *mm;   struct mm_struct *active_mm;   struct vmacache vmacache;   struct task_rss_stat rss_stat;   int exit_state;   int exit_code;   int exit_signal;   int pdeath_signal;   unsigned long jobctl;   unsigned int personality;   unsigned char sched_reset_on_fork;   unsigned char sched_contributes_to_load;   unsigned char sched_migrated;   unsigned char sched_remote_wakeup;   unsigned char;   unsigned char in_execve;   unsigned char in_iowait;   unsigned char restore_sigmask;   unsigned char memcg_may_oom;   unsigned char memcg_kmem_skip_account;   unsigned char brk_randomized;   unsigned long atomic_flags;   struct restart_block restart_block;   pid_t pid;   pid_t tgid;   struct task_struct *real_parent;   struct task_struct *parent;   struct list_head children;   struct list_head sibling;   struct task_struct *group_leader;   struct list_head ptraced;   struct list_head ptrace_entry;   struct pid_link pids[3U];   struct list_head thread_group;   struct list_head thread_node;   struct completion *vfork_done;   int *set_child_tid;   int *clear_child_tid;   u64 utime;   u64 stime;   u64 gtime;   struct prev_cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   u64 start_time;   u64 real_start_time;   unsigned long min_flt;   unsigned long maj_flt;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   const struct cred *ptracer_cred;   const struct cred *real_cred;   const struct cred *cred;   char comm[16U];   struct nameidata *nameidata;   struct sysv_sem sysvsem;   struct sysv_shm sysvshm;   unsigned long last_switch_count;   struct fs_struct *fs;   struct files_struct *files;   struct nsproxy *nsproxy;   struct signal_struct *signal;   struct sighand_struct *sighand;   sigset_t blocked;   sigset_t real_blocked;   sigset_t saved_sigmask;   struct sigpending pending;   unsigned long sas_ss_sp;   size_t sas_ss_size;   unsigned int sas_ss_flags;   struct callback_head *task_works;   struct audit_context *audit_context;   kuid_t loginuid;   unsigned int sessionid;   struct seccomp seccomp;   u32 parent_exec_id;   u32 self_exec_id;   spinlock_t alloc_lock;   raw_spinlock_t pi_lock;   struct wake_q_node wake_q;   struct rb_root pi_waiters;   struct rb_node *pi_waiters_leftmost;   struct rt_mutex_waiter *pi_blocked_on;   struct mutex_waiter *blocked_on;   unsigned int irq_events;   unsigned long hardirq_enable_ip;   unsigned long hardirq_disable_ip;   unsigned int hardirq_enable_event;   unsigned int hardirq_disable_event;   int hardirqs_enabled;   int hardirq_context;   unsigned long softirq_disable_ip;   unsigned long softirq_enable_ip;   unsigned int softirq_disable_event;   unsigned int softirq_enable_event;   int softirqs_enabled;   int softirq_context;   u64 curr_chain_key;   int lockdep_depth;   unsigned int lockdep_recursion;   struct held_lock held_locks[48U];   gfp_t lockdep_reclaim_gfp;   unsigned int in_ubsan;   void *journal_info;   struct bio_list *bio_list;   struct blk_plug *plug;   struct reclaim_state *reclaim_state;   struct backing_dev_info *backing_dev_info;   struct io_context *io_context;   unsigned long ptrace_message;   siginfo_t *last_siginfo;   struct task_io_accounting ioac;   u64 acct_rss_mem1;   u64 acct_vm_mem1;   u64 acct_timexpd;   nodemask_t mems_allowed;   seqcount_t mems_allowed_seq;   int cpuset_mem_spread_rotor;   int cpuset_slab_spread_rotor;   struct css_set *cgroups;   struct list_head cg_list;   int closid;   struct robust_list_head *robust_list;   struct compat_robust_list_head *compat_robust_list;   struct list_head pi_state_list;   struct futex_pi_state *pi_state_cache;   struct perf_event_context *perf_event_ctxp[2U];   struct mutex perf_event_mutex;   struct list_head perf_event_list;   struct mempolicy *mempolicy;   short il_next;   short pref_node_fork;   int numa_scan_seq;   unsigned int numa_scan_period;   unsigned int numa_scan_period_max;   int numa_preferred_nid;   unsigned long numa_migrate_retry;   u64 node_stamp;   u64 last_task_numa_placement;   u64 last_sum_exec_runtime;   struct callback_head numa_work;   struct list_head numa_entry;   struct numa_group *numa_group;   unsigned long *numa_faults;   unsigned long total_numa_faults;   unsigned long numa_faults_locality[3U];   unsigned long numa_pages_migrated;   struct tlbflush_unmap_batch tlb_ubc;   struct callback_head rcu;   struct pipe_inode_info *splice_pipe;   struct page_frag task_frag;   struct task_delay_info *delays;   int make_it_fail;   int nr_dirtied;   int nr_dirtied_pause;   unsigned long dirty_paused_when;   int latency_record_count;   struct latency_record latency_record[32U];   u64 timer_slack_ns;   u64 default_timer_slack_ns;   unsigned int kasan_depth;   unsigned long trace;   unsigned long trace_recursion;   enum kcov_mode kcov_mode;   unsigned int kcov_size;   void *kcov_area;   struct kcov *kcov;   struct mem_cgroup *memcg_in_oom;   gfp_t memcg_oom_gfp_mask;   int memcg_oom_order;   unsigned int memcg_nr_pages_over_high;   struct uprobe_task *utask;   unsigned int sequential_io;   unsigned int sequential_io_avg;   unsigned long task_state_change;   int pagefault_disabled;   struct task_struct *oom_reaper_list;   atomic_t stack_refcount;   struct thread_struct thread; } ;  1562     struct user_struct {   atomic_t __count;   atomic_t processes;   atomic_t sigpending;   atomic_t fanotify_listeners;   atomic_long_t epoll_watches;   unsigned long mq_bytes;   unsigned long locked_shm;   unsigned long unix_inflight;   atomic_long_t pipe_bufs;   struct key *uid_keyring;   struct key *session_keyring;   struct hlist_node uidhash_node;   kuid_t uid;   atomic_long_t locked_vm; } ;    60     struct group_info {   atomic_t usage;   int ngroups;   kgid_t gid[0U]; } ;    86     struct cred {   atomic_t usage;   atomic_t subscribers;   void *put_addr;   unsigned int magic;   kuid_t uid;   kgid_t gid;   kuid_t suid;   kgid_t sgid;   kuid_t euid;   kgid_t egid;   kuid_t fsuid;   kgid_t fsgid;   unsigned int securebits;   kernel_cap_t cap_inheritable;   kernel_cap_t cap_permitted;   kernel_cap_t cap_effective;   kernel_cap_t cap_bset;   kernel_cap_t cap_ambient;   unsigned char jit_keyring;   struct key *session_keyring;   struct key *process_keyring;   struct key *thread_keyring;   struct key *request_key_auth;   void *security;   struct user_struct *user;   struct user_namespace *user_ns;   struct group_info *group_info;   struct callback_head rcu; } ;   369     struct seq_file {   char *buf;   size_t size;   size_t from;   size_t count;   size_t pad_until;   loff_t index;   loff_t read_pos;   u64 version;   struct mutex lock;   const struct seq_operations *op;   int poll_event;   const struct file *file;   void *private; } ;    30     struct seq_operations {   void * (*start)(struct seq_file *, loff_t *);   void (*stop)(struct seq_file *, void *);   void * (*next)(struct seq_file *, void *, loff_t *);   int (*show)(struct seq_file *, void *); } ;   222     struct pinctrl ;   223     struct pinctrl_state ;   200     struct dev_pin_info {   struct pinctrl *p;   struct pinctrl_state *default_state;   struct pinctrl_state *init_state;   struct pinctrl_state *sleep_state;   struct pinctrl_state *idle_state; } ;    58     struct pm_message {   int event; } ;    64     typedef struct pm_message pm_message_t;    65     struct dev_pm_ops {   int (*prepare)(struct device *);   void (*complete)(struct device *);   int (*suspend)(struct device *);   int (*resume)(struct device *);   int (*freeze)(struct device *);   int (*thaw)(struct device *);   int (*poweroff)(struct device *);   int (*restore)(struct device *);   int (*suspend_late)(struct device *);   int (*resume_early)(struct device *);   int (*freeze_late)(struct device *);   int (*thaw_early)(struct device *);   int (*poweroff_late)(struct device *);   int (*restore_early)(struct device *);   int (*suspend_noirq)(struct device *);   int (*resume_noirq)(struct device *);   int (*freeze_noirq)(struct device *);   int (*thaw_noirq)(struct device *);   int (*poweroff_noirq)(struct device *);   int (*restore_noirq)(struct device *);   int (*runtime_suspend)(struct device *);   int (*runtime_resume)(struct device *);   int (*runtime_idle)(struct device *); } ;   315     enum rpm_status {   RPM_ACTIVE = 0,   RPM_RESUMING = 1,   RPM_SUSPENDED = 2,   RPM_SUSPENDING = 3 } ;   322     enum rpm_request {   RPM_REQ_NONE = 0,   RPM_REQ_IDLE = 1,   RPM_REQ_SUSPEND = 2,   RPM_REQ_AUTOSUSPEND = 3,   RPM_REQ_RESUME = 4 } ;   330     struct wakeup_source ;   331     struct wake_irq ;   332     struct pm_domain_data ;   333     struct pm_subsys_data {   spinlock_t lock;   unsigned int refcount;   struct list_head clock_list;   struct pm_domain_data *domain_data; } ;   551     struct dev_pm_qos ;   551     struct dev_pm_info {   pm_message_t power_state;   unsigned char can_wakeup;   unsigned char async_suspend;   bool in_dpm_list;   bool is_prepared;   bool is_suspended;   bool is_noirq_suspended;   bool is_late_suspended;   bool early_init;   bool direct_complete;   spinlock_t lock;   struct list_head entry;   struct completion completion;   struct wakeup_source *wakeup;   bool wakeup_path;   bool syscore;   bool no_pm_callbacks;   struct timer_list suspend_timer;   unsigned long timer_expires;   struct work_struct work;   wait_queue_head_t wait_queue;   struct wake_irq *wakeirq;   atomic_t usage_count;   atomic_t child_count;   unsigned char disable_depth;   unsigned char idle_notification;   unsigned char request_pending;   unsigned char deferred_resume;   unsigned char run_wake;   unsigned char runtime_auto;   bool ignore_children;   unsigned char no_callbacks;   unsigned char irq_safe;   unsigned char use_autosuspend;   unsigned char timer_autosuspends;   unsigned char memalloc_noio;   unsigned int links_count;   enum rpm_request request;   enum rpm_status runtime_status;   int runtime_error;   int autosuspend_delay;   unsigned long last_busy;   unsigned long active_jiffies;   unsigned long suspended_jiffies;   unsigned long accounting_timestamp;   struct pm_subsys_data *subsys_data;   void (*set_latency_tolerance)(struct device *, s32 );   struct dev_pm_qos *qos; } ;   613     struct dev_pm_domain {   struct dev_pm_ops ops;   void (*detach)(struct device *, bool );   int (*activate)(struct device *);   void (*sync)(struct device *);   void (*dismiss)(struct device *); } ;    76     struct dev_archdata {   void *iommu; } ;     8     struct dma_map_ops ;    18     struct pdev_archdata { } ;    21     struct device_private ;    22     struct device_driver ;    23     struct driver_private ;    24     struct class ;    25     struct subsys_private ;    26     struct bus_type ;    27     struct device_node ;    28     struct fwnode_handle ;    29     struct iommu_ops ;    30     struct iommu_group ;    31     struct iommu_fwspec ;    62     struct device_attribute ;    62     struct bus_type {   const char *name;   const char *dev_name;   struct device *dev_root;   struct device_attribute *dev_attrs;   const struct attribute_group **bus_groups;   const struct attribute_group **dev_groups;   const struct attribute_group **drv_groups;   int (*match)(struct device *, struct device_driver *);   int (*uevent)(struct device *, struct kobj_uevent_env *);   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*online)(struct device *);   int (*offline)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   int (*num_vf)(struct device *);   const struct dev_pm_ops *pm;   const struct iommu_ops *iommu_ops;   struct subsys_private *p;   struct lock_class_key lock_key; } ;   147     struct device_type ;   206     enum probe_type {   PROBE_DEFAULT_STRATEGY = 0,   PROBE_PREFER_ASYNCHRONOUS = 1,   PROBE_FORCE_SYNCHRONOUS = 2 } ;   212     struct of_device_id ;   212     struct acpi_device_id ;   212     struct device_driver {   const char *name;   struct bus_type *bus;   struct module *owner;   const char *mod_name;   bool suppress_bind_attrs;   enum probe_type probe_type;   const struct of_device_id *of_match_table;   const struct acpi_device_id *acpi_match_table;   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct attribute_group **groups;   const struct dev_pm_ops *pm;   struct driver_private *p; } ;   362     struct class_attribute ;   362     struct class {   const char *name;   struct module *owner;   struct class_attribute *class_attrs;   const struct attribute_group **class_groups;   const struct attribute_group **dev_groups;   struct kobject *dev_kobj;   int (*dev_uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *);   void (*class_release)(struct class *);   void (*dev_release)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct kobj_ns_type_operations *ns_type;   const void * (*namespace)(struct device *);   const struct dev_pm_ops *pm;   struct subsys_private *p; } ;   457     struct class_attribute {   struct attribute attr;   ssize_t  (*show)(struct class *, struct class_attribute *, char *);   ssize_t  (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;   527     struct device_type {   const char *name;   const struct attribute_group **groups;   int (*uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *);   void (*release)(struct device *);   const struct dev_pm_ops *pm; } ;   555     struct device_attribute {   struct attribute attr;   ssize_t  (*show)(struct device *, struct device_attribute *, char *);   ssize_t  (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;   727     struct device_dma_parameters {   unsigned int max_segment_size;   unsigned long segment_boundary_mask; } ;   790     enum dl_dev_state {   DL_DEV_NO_DRIVER = 0,   DL_DEV_PROBING = 1,   DL_DEV_DRIVER_BOUND = 2,   DL_DEV_UNBINDING = 3 } ;   797     struct dev_links_info {   struct list_head suppliers;   struct list_head consumers;   enum dl_dev_state status; } ;   817     struct irq_domain ;   817     struct dma_coherent_mem ;   817     struct cma ;   817     struct device {   struct device *parent;   struct device_private *p;   struct kobject kobj;   const char *init_name;   const struct device_type *type;   struct mutex mutex;   struct bus_type *bus;   struct device_driver *driver;   void *platform_data;   void *driver_data;   struct dev_links_info links;   struct dev_pm_info power;   struct dev_pm_domain *pm_domain;   struct irq_domain *msi_domain;   struct dev_pin_info *pins;   struct list_head msi_list;   int numa_node;   const struct dma_map_ops *dma_ops;   u64 *dma_mask;   u64 coherent_dma_mask;   unsigned long dma_pfn_offset;   struct device_dma_parameters *dma_parms;   struct list_head dma_pools;   struct dma_coherent_mem *dma_mem;   struct cma *cma_area;   struct dev_archdata archdata;   struct device_node *of_node;   struct fwnode_handle *fwnode;   dev_t devt;   u32 id;   spinlock_t devres_lock;   struct list_head devres_head;   struct klist_node knode_class;   struct class *class;   const struct attribute_group **groups;   void (*release)(struct device *);   struct iommu_group *iommu_group;   struct iommu_fwspec *iommu_fwspec;   bool offline_disabled;   bool offline; } ;   976     struct wakeup_source {   const char *name;   struct list_head entry;   spinlock_t lock;   struct wake_irq *wakeirq;   struct timer_list timer;   unsigned long timer_expires;   ktime_t total_time;   ktime_t max_time;   ktime_t last_time;   ktime_t start_prevent_time;   ktime_t prevent_sleep_time;   unsigned long event_count;   unsigned long active_count;   unsigned long relax_count;   unsigned long expire_count;   unsigned long wakeup_count;   bool active;   bool autosleep_enabled; } ;  1453     struct scatterlist ;    96     enum dma_data_direction {   DMA_BIDIRECTIONAL = 0,   DMA_TO_DEVICE = 1,   DMA_FROM_DEVICE = 2,   DMA_NONE = 3 } ;   277     struct vm_fault {   struct vm_area_struct *vma;   unsigned int flags;   gfp_t gfp_mask;   unsigned long pgoff;   unsigned long address;   pmd_t *pmd;   pud_t *pud;   pte_t orig_pte;   struct page *cow_page;   struct mem_cgroup *memcg;   struct page *page;   pte_t *pte;   spinlock_t *ptl;   pgtable_t prealloc_pte; } ;   340     enum page_entry_size {   PE_SIZE_PTE = 0,   PE_SIZE_PMD = 1,   PE_SIZE_PUD = 2 } ;   346     struct vm_operations_struct {   void (*open)(struct vm_area_struct *);   void (*close)(struct vm_area_struct *);   int (*mremap)(struct vm_area_struct *);   int (*fault)(struct vm_fault *);   int (*huge_fault)(struct vm_fault *, enum page_entry_size );   void (*map_pages)(struct vm_fault *, unsigned long, unsigned long);   int (*page_mkwrite)(struct vm_fault *);   int (*pfn_mkwrite)(struct vm_fault *);   int (*access)(struct vm_area_struct *, unsigned long, void *, int, int);   const char * (*name)(struct vm_area_struct *);   int (*set_policy)(struct vm_area_struct *, struct mempolicy *);   struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long);   struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;  2513     struct scatterlist {   unsigned long sg_magic;   unsigned long page_link;   unsigned int offset;   unsigned int length;   dma_addr_t dma_address;   unsigned int dma_length; } ;    21     struct sg_table {   struct scatterlist *sgl;   unsigned int nents;   unsigned int orig_nents; } ;   158     struct dma_map_ops {   void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long);   void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long);   int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long);   int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long);   dma_addr_t  (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long);   void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long);   int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long);   void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long);   dma_addr_t  (*map_resource)(struct device *, phys_addr_t , size_t , enum dma_data_direction , unsigned long);   void (*unmap_resource)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long);   void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction );   void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction );   int (*mapping_error)(struct device *, dma_addr_t );   int (*dma_supported)(struct device *, u64 );   int (*set_dma_mask)(struct device *, u64 );   int is_phys; } ;    19     struct dma_pool ;   679     enum irqreturn {   IRQ_NONE = 0,   IRQ_HANDLED = 1,   IRQ_WAKE_THREAD = 2 } ;    16     typedef enum irqreturn irqreturn_t;   133     struct exception_table_entry {   int insn;   int fixup;   int handler; } ;   728     struct usb_ctrlrequest {   __u8 bRequestType;   __u8 bRequest;   __le16 wValue;   __le16 wIndex;   __le16 wLength; } ;   388     struct usb_endpoint_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bEndpointAddress;   __u8 bmAttributes;   __le16 wMaxPacketSize;   __u8 bInterval;   __u8 bRefresh;   __u8 bSynchAddress; } ;   670     struct usb_ss_ep_comp_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bMaxBurst;   __u8 bmAttributes;   __le16 wBytesPerInterval; } ;  1122     enum usb_device_speed {   USB_SPEED_UNKNOWN = 0,   USB_SPEED_LOW = 1,   USB_SPEED_FULL = 2,   USB_SPEED_HIGH = 3,   USB_SPEED_WIRELESS = 4,   USB_SPEED_SUPER = 5,   USB_SPEED_SUPER_PLUS = 6 } ;  1132     enum usb_device_state {   USB_STATE_NOTATTACHED = 0,   USB_STATE_ATTACHED = 1,   USB_STATE_POWERED = 2,   USB_STATE_RECONNECTING = 3,   USB_STATE_UNAUTHENTICATED = 4,   USB_STATE_DEFAULT = 5,   USB_STATE_ADDRESS = 6,   USB_STATE_CONFIGURED = 7,   USB_STATE_SUSPENDED = 8 } ;    63     struct usb_ep ;    64     struct usb_request {   void *buf;   unsigned int length;   dma_addr_t dma;   struct scatterlist *sg;   unsigned int num_sgs;   unsigned int num_mapped_sgs;   unsigned short stream_id;   unsigned char no_interrupt;   unsigned char zero;   unsigned char short_not_ok;   void (*complete)(struct usb_ep *, struct usb_request *);   void *context;   struct list_head list;   int status;   unsigned int actual; } ;   115     struct usb_ep_ops {   int (*enable)(struct usb_ep *, const struct usb_endpoint_descriptor *);   int (*disable)(struct usb_ep *);   struct usb_request * (*alloc_request)(struct usb_ep *, gfp_t );   void (*free_request)(struct usb_ep *, struct usb_request *);   int (*queue)(struct usb_ep *, struct usb_request *, gfp_t );   int (*dequeue)(struct usb_ep *, struct usb_request *);   int (*set_halt)(struct usb_ep *, int);   int (*set_wedge)(struct usb_ep *);   int (*fifo_status)(struct usb_ep *);   void (*fifo_flush)(struct usb_ep *); } ;   144     struct usb_ep_caps {   unsigned char type_control;   unsigned char type_iso;   unsigned char type_bulk;   unsigned char type_int;   unsigned char dir_in;   unsigned char dir_out; } ;   162     struct usb_ep {   void *driver_data;   const char *name;   const struct usb_ep_ops *ops;   struct list_head ep_list;   struct usb_ep_caps caps;   bool claimed;   bool enabled;   unsigned short maxpacket;   unsigned short maxpacket_limit;   unsigned short max_streams;   unsigned char mult;   unsigned char maxburst;   u8 address;   const struct usb_endpoint_descriptor *desc;   const struct usb_ss_ep_comp_descriptor *comp_desc; } ;   246     struct usb_dcd_config_params {   __u8 bU1devExitLat;   __le16 bU2DevExitLat; } ;   284     struct usb_gadget ;   285     struct usb_gadget_driver ;   286     struct usb_udc ;   287     struct usb_gadget_ops {   int (*get_frame)(struct usb_gadget *);   int (*wakeup)(struct usb_gadget *);   int (*set_selfpowered)(struct usb_gadget *, int);   int (*vbus_session)(struct usb_gadget *, int);   int (*vbus_draw)(struct usb_gadget *, unsigned int);   int (*pullup)(struct usb_gadget *, int);   int (*ioctl)(struct usb_gadget *, unsigned int, unsigned long);   void (*get_config_params)(struct usb_dcd_config_params *);   int (*udc_start)(struct usb_gadget *, struct usb_gadget_driver *);   int (*udc_stop)(struct usb_gadget *);   struct usb_ep * (*match_ep)(struct usb_gadget *, struct usb_endpoint_descriptor *, struct usb_ss_ep_comp_descriptor *); } ;   309     struct usb_otg_caps ;   309     struct usb_gadget {   struct work_struct work;   struct usb_udc *udc;   const struct usb_gadget_ops *ops;   struct usb_ep *ep0;   struct list_head ep_list;   enum usb_device_speed speed;   enum usb_device_speed max_speed;   enum usb_device_state state;   const char *name;   struct device dev;   unsigned int out_epnum;   unsigned int in_epnum;   unsigned int mA;   struct usb_otg_caps *otg_caps;   unsigned char sg_supported;   unsigned char is_otg;   unsigned char is_a_peripheral;   unsigned char b_hnp_enable;   unsigned char a_hnp_support;   unsigned char a_alt_hnp_support;   unsigned char hnp_polling_support;   unsigned char host_request_flag;   unsigned char quirk_ep_out_aligned_size;   unsigned char quirk_altset_not_supp;   unsigned char quirk_stall_not_supp;   unsigned char quirk_zlp_not_supp;   unsigned char quirk_avoids_skb_reserve;   unsigned char is_selfpowered;   unsigned char deactivated;   unsigned char connected; } ;   549     struct usb_gadget_driver {   char *function;   enum usb_device_speed max_speed;   int (*bind)(struct usb_gadget *, struct usb_gadget_driver *);   void (*unbind)(struct usb_gadget *);   int (*setup)(struct usb_gadget *, const struct usb_ctrlrequest *);   void (*disconnect)(struct usb_gadget *);   void (*suspend)(struct usb_gadget *);   void (*resume)(struct usb_gadget *);   void (*reset)(struct usb_gadget *);   struct device_driver driver;   char *udc_name;   struct list_head pending;   unsigned char match_existing_only; } ;    13     typedef unsigned long kernel_ulong_t;   187     struct acpi_device_id {   __u8 id[9U];   kernel_ulong_t driver_data;   __u32 cls;   __u32 cls_msk; } ;   230     struct of_device_id {   char name[32U];   char type[32U];   char compatible[128U];   const void *data; } ;   485     struct platform_device_id {   char name[20U];   kernel_ulong_t driver_data; } ;   676     struct mfd_cell ;   678     struct platform_device {   const char *name;   int id;   bool id_auto;   struct device dev;   u32 num_resources;   struct resource *resource;   const struct platform_device_id *id_entry;   char *driver_override;   struct mfd_cell *mfd_cell;   struct pdev_archdata archdata; } ;   370     struct mv_usb_addon_irq {   unsigned int irq;   int (*poll)(); } ;    35     struct mv_usb_platform_data {   struct mv_usb_addon_irq *id;   struct mv_usb_addon_irq *vbus;   unsigned int mode;   unsigned char disable_otg_clock_gating;   unsigned char otg_force_a_bus_req;   int (*phy_init)(void *);   void (*phy_deinit)(void *);   int (*set_vbus)(unsigned int);   int (*private_init)(void *, void *); } ;   532     struct mv_u3d_cap_regs {   u32 rsvd[5U];   u32 dboff;   u32 rtsoff;   u32 vuoff; } ;   130     struct mv_u3d_op_regs {   u32 usbcmd;   u32 rsvd1[11U];   u32 dcbaapl;   u32 dcbaaph;   u32 rsvd2[243U];   u32 portsc;   u32 portlinkinfo;   u32 rsvd3[9917U];   u32 doorbell; } ;   143     struct epxcr {   u32 epxoutcr0;   u32 epxoutcr1;   u32 epxincr0;   u32 epxincr1; } ;   151     struct xferstatus {   u32 curdeqlo;   u32 curdeqhi;   u32 statuslo;   u32 statushi; } ;   159     struct mv_u3d_vuc_regs {   u32 ctrlepenable;   u32 setuplock;   u32 endcomplete;   u32 intrcause;   u32 intrenable;   u32 trbcomplete;   u32 linkchange;   u32 rsvd1[5U];   u32 trbunderrun;   u32 rsvd2[43U];   u32 bridgesetting;   u32 rsvd3[7U];   struct xferstatus txst[16U];   struct xferstatus rxst[16U];   u32 ltssm;   u32 pipe;   u32 linkcr0;   u32 linkcr1;   u32 rsvd6[60U];   u32 mib0;   u32 usblink;   u32 ltssmstate;   u32 linkerrorcause;   u32 rsvd7[60U];   u32 devaddrtiebrkr;   u32 itpinfo0;   u32 itpinfo1;   u32 rsvd8[61U];   struct epxcr epcr[16U];   u32 rsvd9[64U];   u32 phyaddr;   u32 phydata; } ;   195     struct mv_u3d_ep_context {   u32 rsvd0;   u32 rsvd1;   u32 trb_addr_lo;   u32 trb_addr_hi;   u32 rsvd2;   u32 rsvd3;   struct usb_ctrlrequest setup_buffer; } ;   206     struct mv_u3d_trb_ctrl {   unsigned char own;   unsigned char rsvd1;   unsigned char chain;   unsigned char ioc;   unsigned char rsvd2;   unsigned char type;   unsigned char dir;   unsigned short rsvd3; } ;   223     struct mv_u3d_trb_hw {   u32 buf_addr_lo;   u32 buf_addr_hi;   u32 trb_len;   struct mv_u3d_trb_ctrl ctrl; } ;   233     struct mv_u3d_trb {   struct mv_u3d_trb_hw *trb_hw;   dma_addr_t trb_dma;   struct list_head trb_list; } ;   240     struct mv_u3d_ep ;   240     struct mv_u3d_req ;   240     struct mv_u3d {   struct usb_gadget gadget;   struct usb_gadget_driver *driver;   spinlock_t lock;   struct completion *done;   struct device *dev;   int irq;   struct mv_u3d_cap_regs *cap_regs;   struct mv_u3d_op_regs *op_regs;   struct mv_u3d_vuc_regs *vuc_regs;   void *phy_regs;   unsigned int max_eps;   struct mv_u3d_ep_context *ep_context;   size_t ep_context_size;   dma_addr_t ep_context_dma;   struct dma_pool *trb_pool;   struct mv_u3d_ep *eps;   struct mv_u3d_req *status_req;   struct usb_ctrlrequest local_setup_buff;   unsigned int resume_state;   unsigned int usb_state;   unsigned int ep0_state;   unsigned int ep0_dir;   unsigned int dev_addr;   unsigned int errors;   unsigned char softconnect;   unsigned char vbus_active;   unsigned char remote_wakeup;   unsigned char clock_gating;   unsigned char active;   unsigned char vbus_valid_detect;   struct mv_usb_addon_irq *vbus;   unsigned int power;   struct clk *clk; } ;   288     struct mv_u3d_ep {   struct usb_ep ep;   struct mv_u3d *u3d;   struct list_head queue;   struct list_head req_list;   struct mv_u3d_ep_context *ep_context;   u32 direction;   char name[14U];   u32 processing;   spinlock_t req_lock;   unsigned char wedge;   unsigned char enabled;   unsigned char ep_type;   unsigned char ep_num; } ;   306     struct mv_u3d_req {   struct usb_request req;   struct mv_u3d_ep *ep;   struct list_head queue;   struct list_head list;   struct list_head trb_list;   struct mv_u3d_trb *trb_head;   unsigned int trb_count;   unsigned int chain; } ;     1     void * __builtin_memcpy(void *, const void *, unsigned long);     1     long int __builtin_expect(long, long);   252     void __read_once_size(const volatile void *p, void *res, int size);   277     void __write_once_size(volatile void *p, void *res, int size);    63     void __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...);   418     int snprintf(char *, size_t , const char *, ...);     3     bool  ldv_is_err(const void *ptr);     6     long int ldv_ptr_err(const void *ptr);    25     void INIT_LIST_HEAD(struct list_head *list);    32     bool  __list_add_valid(struct list_head *, struct list_head *, struct list_head *);    35     bool  __list_del_entry_valid(struct list_head *);    55     void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next);    90     void list_add_tail(struct list_head *new, struct list_head *head);   102     void __list_del(struct list_head *prev, struct list_head *next);   114     void __list_del_entry(struct list_head *entry);   156     void list_del_init(struct list_head *entry);   200     int list_empty(const struct list_head *head);    71     void warn_slowpath_null(const char *, const int);     9     extern unsigned long vmemmap_base;    23     unsigned long int __phys_addr(unsigned long);    32     void * __memcpy(void *, const void *, size_t );    24     char * strncpy(char *, const char *, __kernel_size_t );    32     long int PTR_ERR(const void *ptr);    41     bool  IS_ERR(const void *ptr);    93     void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);    22     void _raw_spin_lock(raw_spinlock_t *);    32     unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *);    39     void _raw_spin_unlock(raw_spinlock_t *);    43     void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);   286     raw_spinlock_t * spinlock_check(spinlock_t *lock);   297     void spin_lock(spinlock_t *lock);   337     void spin_unlock(spinlock_t *lock);   352     void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);   193     resource_size_t  resource_size(const struct resource *res);   118     phys_addr_t  virt_to_phys(volatile void *address);   171     void * ioremap_nocache(resource_size_t , unsigned long);   192     void * ioremap(resource_size_t offset, unsigned long size);   197     void iounmap(volatile void *);    31     unsigned int ioread32(void *);    41     void iowrite32(u32 , void *);    11     void ldv_clk_disable_clk(struct clk *clk);    12     int ldv_clk_enable_clk();    13     void ldv_clk_disable_clk_of_mv_u3d(struct clk *clk);    14     int ldv_clk_enable_clk_of_mv_u3d();  1026     void * dev_get_drvdata(const struct device *dev);  1031     void dev_set_drvdata(struct device *dev, void *data);  1168     void * dev_get_platdata(const struct device *dev);  1261     void dev_err(const struct device *, const char *, ...);    37     void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool );    42     void debug_dma_mapping_error(struct device *, dma_addr_t );    44     void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool );    53     void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *);   131     void kmemcheck_mark_initialized(void *address, unsigned int n);   144     int valid_dma_direction(int dma_direction);    28     extern const struct dma_map_ops *dma_ops;    30     const struct dma_map_ops * get_arch_dma_ops(struct bus_type *bus);    35     bool  arch_dma_alloc_attrs(struct device **, gfp_t *);   175     const struct dma_map_ops * get_dma_ops(struct device *dev);   200     dma_addr_t  dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs);   219     void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs);   476     void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs);   517     void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag);   523     void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle);   543     int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);    19     struct dma_pool * dma_pool_create(const char *, struct device *, size_t , size_t , size_t );    22     void dma_pool_destroy(struct dma_pool *);    24     void * dma_pool_alloc(struct dma_pool *, gfp_t , dma_addr_t *);    33     void dma_pool_free(struct dma_pool *, void *, dma_addr_t );    10     void __const_udelay(unsigned long);   154     void kfree(const void *);   330     void * __kmalloc(size_t , gfp_t );   478     void * kmalloc(size_t size, gfp_t flags);   603     void * kmalloc_array(size_t n, size_t size, gfp_t flags);   618     void * kcalloc(size_t n, size_t size, gfp_t flags);   661     void * kzalloc(size_t size, gfp_t flags);   139     int request_threaded_irq(unsigned int, irqreturn_t  (*)(int, void *), irqreturn_t  (*)(int, void *), unsigned long, const char *, void *);   144     int request_irq(unsigned int irq, irqreturn_t  (*handler)(int, void *), unsigned long flags, const char *name, void *dev);   158     void free_irq(unsigned int, void *);   234     void usb_ep_set_maxpacket_limit(struct usb_ep *, unsigned int);   711     int usb_add_gadget_udc(struct device *, struct usb_gadget *);   712     void usb_del_gadget_udc(struct usb_gadget *);   796     int usb_gadget_map_request(struct usb_gadget *, struct usb_request *, int);   801     void usb_gadget_unmap_request(struct usb_gadget *, struct usb_request *, int);   821     void usb_gadget_giveback_request(struct usb_ep *, struct usb_request *);    52     struct resource * platform_get_resource(struct platform_device *, unsigned int, unsigned int);    56     struct resource * platform_get_resource_byname(struct platform_device *, unsigned int, const char *);   211     void * platform_get_drvdata(const struct platform_device *pdev);   216     void platform_set_drvdata(struct platform_device *pdev, void *data);   230     struct clk * clk_get(struct device *, const char *);   282     int ldv_clk_enable_5(struct clk *clk);   286     int ldv_clk_enable_7(struct clk *clk);   290     int ldv_clk_enable_10(struct clk *clk);   294     int ldv_clk_enable_11(struct clk *clk);   298     int ldv_clk_enable_13(struct clk *clk);   298     void ldv_clk_disable_6(struct clk *clk);   302     void ldv_clk_disable_8(struct clk *clk);   306     void ldv_clk_disable_9(struct clk *clk);   310     void ldv_clk_disable_12(struct clk *clk);   314     void ldv_clk_disable_14(struct clk *clk);   318     void ldv_clk_disable_15(struct clk *clk);   314     void clk_put(struct clk *);    38     const char driver_name[7U] = { 'm', 'v', '_', 'u', '3', 'd', '\x0' };    41     void mv_u3d_nuke(struct mv_u3d_ep *ep, int status);    42     void mv_u3d_stop_activity(struct mv_u3d *u3d, struct usb_gadget_driver *driver);    46     const struct usb_endpoint_descriptor mv_u3d_ep0_desc = { 7U, 5U, 0U, 0U, 512U, 0U, 0U, 0U };    54     void mv_u3d_ep0_reset(struct mv_u3d *u3d);   100     void mv_u3d_ep0_stall(struct mv_u3d *u3d);   119     int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index, struct mv_u3d_req *curr_req);   179     void mv_u3d_done(struct mv_u3d_ep *ep, struct mv_u3d_req *req, int status);   223     int mv_u3d_queue_trb(struct mv_u3d_ep *ep, struct mv_u3d_req *req);   272     struct mv_u3d_trb * mv_u3d_build_trb_one(struct mv_u3d_req *req, unsigned int *length, dma_addr_t *dma);   337     int mv_u3d_build_trb_chain(struct mv_u3d_req *req, unsigned int *length, struct mv_u3d_trb *trb, int *is_last);   404     int mv_u3d_req_to_trb(struct mv_u3d_req *req);   480     int mv_u3d_start_queue(struct mv_u3d_ep *ep);   525     int mv_u3d_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc);   628     int mv_u3d_ep_disable(struct usb_ep *_ep);   671     struct usb_request * mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags);   684     void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req);   691     void mv_u3d_ep_fifo_flush(struct usb_ep *_ep);   775     int mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags);   849     int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req);   923     void mv_u3d_ep_set_stall(struct mv_u3d *u3d, u8 ep_num, u8 direction, int stall);   946     int mv_u3d_ep_set_halt_wedge(struct usb_ep *_ep, int halt___0, int wedge);   989     int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt___0);   994     int mv_u3d_ep_set_wedge(struct usb_ep *_ep);   999     const struct usb_ep_ops mv_u3d_ep_ops = { &mv_u3d_ep_enable, &mv_u3d_ep_disable, &mv_u3d_alloc_request, &mv_u3d_free_request, &mv_u3d_ep_queue, &mv_u3d_ep_dequeue, &mv_u3d_ep_set_halt, &mv_u3d_ep_set_wedge, 0, &mv_u3d_ep_fifo_flush };  1014     void mv_u3d_controller_stop(struct mv_u3d *u3d);  1037     void mv_u3d_controller_start(struct mv_u3d *u3d);  1063     int mv_u3d_controller_reset(struct mv_u3d *u3d);  1095     int mv_u3d_enable(struct mv_u3d *u3d);  1124     void mv_u3d_disable(struct mv_u3d *u3d);  1136     int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active);  1187     int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned int mA);  1196     int mv_u3d_pullup(struct usb_gadget *gadget, int is_on);  1230     int mv_u3d_start(struct usb_gadget *g, struct usb_gadget_driver *driver);  1261     int mv_u3d_stop(struct usb_gadget *g);  1293     const struct usb_gadget_ops mv_u3d_ops = { 0, 0, 0, &mv_u3d_vbus_session, &mv_u3d_vbus_draw, &mv_u3d_pullup, 0, 0, &mv_u3d_start, &mv_u3d_stop, 0 };  1305     int mv_u3d_eps_init(struct mv_u3d *u3d);  1399     void mv_u3d_irq_process_error(struct mv_u3d *u3d);  1406     void mv_u3d_irq_process_link_change(struct mv_u3d *u3d);  1470     void mv_u3d_ch9setaddress(struct mv_u3d *u3d, struct usb_ctrlrequest *setup);  1507     int mv_u3d_is_set_configuration(struct usb_ctrlrequest *setup);  1516     void mv_u3d_handle_setup_packet(struct mv_u3d *u3d, u8 ep_num, struct usb_ctrlrequest *setup);  1586     void mv_u3d_get_setup_data(struct mv_u3d *u3d, u8 ep_num, u8 *buffer_ptr);  1596     void mv_u3d_irq_process_setup(struct mv_u3d *u3d);  1615     void mv_u3d_irq_process_tr_complete(struct mv_u3d *u3d);  1679     irqreturn_t  mv_u3d_irq(int irq, void *dev);  1753     int mv_u3d_remove(struct platform_device *dev);  1786     int mv_u3d_probe(struct platform_device *dev);  2040     void mv_u3d_shutdown(struct platform_device *dev);  2082     void ldv_check_final_state();  2085     void ldv_check_return_value(int);  2088     void ldv_check_return_value_probe(int);  2091     void ldv_initialize();  2094     void ldv_handler_precall();  2097     int nondet_int();  2100     int LDV_IN_INTERRUPT = 0;  2103     void ldv_main0_sequence_infinite_withcheck_stateful();    10     void ldv_error();    25     int ldv_undef_int();    14     void * ldv_err_ptr(long error);    28     bool  ldv_is_err_or_null(const void *ptr);     9     int ldv_counter_clk = 0;    32     int ldv_counter_clk_of_mv_u3d = 0;           return ;         }        {      2105     struct usb_ep *var_group1;  2106     const struct usb_endpoint_descriptor *var_mv_u3d_ep_enable_8_p1;  2107     unsigned int var_mv_u3d_alloc_request_10_p1;  2108     struct usb_request *var_group2;  2109     unsigned int var_mv_u3d_ep_queue_13_p2;  2110     int var_mv_u3d_ep_set_halt_17_p1;  2111     struct usb_gadget *var_group3;  2112     int var_mv_u3d_vbus_session_24_p1;  2113     unsigned int var_mv_u3d_vbus_draw_25_p1;  2114     int var_mv_u3d_pullup_26_p1;  2115     struct usb_gadget_driver *var_group4;  2116     struct platform_device *var_group5;  2117     int res_mv_u3d_probe_41;  2118     int var_mv_u3d_irq_39_p0;  2119     void *var_mv_u3d_irq_39_p1;  2120     int ldv_s_mv_u3d_driver_platform_driver;  2121     int tmp;  2122     int tmp___0;  2309     ldv_s_mv_u3d_driver_platform_driver = 0;  2295     LDV_IN_INTERRUPT = 1;  2304     ldv_initialize() { /* Function call is skipped due to function is undefined */}  2314     goto ldv_34076;  2314     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}  2317     goto ldv_34075;  2315     ldv_34075:;  2318     tmp = nondet_int() { /* Function call is skipped due to function is undefined */}  2318     switch (tmp);           {  1788       struct mv_u3d *u3d;  1789       struct mv_usb_platform_data *pdata;  1790       void *tmp;  1791       int retval;  1792       struct resource *r;  1793       unsigned long size;  1794       void *tmp___0;  1795       void *tmp___1;  1796       struct lock_class_key __key;  1797       long tmp___2;  1798       _Bool tmp___3;  1799       unsigned long long tmp___4;  1800       void *tmp___5;  1801       struct _ddebug descriptor;  1802       long tmp___6;  1803       unsigned int tmp___7;  1804       void *tmp___8;  1805       void *tmp___9;  1806       void *tmp___10;  1807       int tmp___11;  1808       struct _ddebug descriptor___0;  1809       long tmp___12;  1788       u3d = (struct mv_u3d *)0;             {  1170         void *__CPAchecker_TMP_0 = (void *)(dev->platform_data);  1170         return __CPAchecker_TMP_0;;             } 1789       pdata = (struct mv_usb_platform_data *)tmp;  1790       retval = 0;             {  1170         void *__CPAchecker_TMP_0 = (void *)(dev->platform_data);  1170         return __CPAchecker_TMP_0;;             }            {   663         void *tmp;               {   480           void *tmp___2;   495           tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}   495           return tmp___2;;               }  663         return tmp;;             } 1800       u3d = (struct mv_u3d *)tmp___1;             {   288         return &(lock->__annonCompField20.rlock);;             } 1806       __raw_spin_lock_init(&(u3d->lock.__annonCompField20.rlock), "&(&u3d->lock)->rlock", &__key) { /* Function call is skipped due to function is undefined */}             {               {  1033           dev->driver_data = data;  1034           return ;;               }  220         return ;;             } 1810       u3d->dev = &(dev->dev);  1811       u3d->vbus = pdata->vbus;  1813       u3d->clk = clk_get(&(dev->dev), (const char *)0) { /* Function call is skipped due to function is undefined */}  1814       const void *__CPAchecker_TMP_0 = (const void *)(u3d->clk);  1819       r = platform_get_resource_byname(dev, 512U, "capregs") { /* Function call is skipped due to function is undefined */}             {   195         unsigned long long __CPAchecker_TMP_0 = (unsigned long long)(res->end);   195         unsigned long long __CPAchecker_TMP_1 = (unsigned long long)(res->start);   195         return (__CPAchecker_TMP_0 - __CPAchecker_TMP_1) + 1ULL;;             }            {   194         void *tmp;   194         tmp = ioremap_nocache(offset, size) { /* Function call is skipped due to function is undefined */}   194         return tmp;;             } 1826       u3d->cap_regs = (struct mv_u3d_cap_regs *)tmp___5;  1828       unsigned long __CPAchecker_TMP_2 = (unsigned long)(u3d->cap_regs);  1833       descriptor.modname = "mv_u3d_core";  1833       descriptor.function = "mv_u3d_probe";  1833       descriptor.filename = "/home/vitaly/ldv-launches/work/current--X--drivers--X--defaultlinux-4.11-rc1.tar.xz--X--320_7a--X--cpachecker/linux-4.11-rc1.tar.xz/csd_deg_dscv/8743/dscv_tempdir/dscv/ri/320_7a/drivers/usb/gadget/udc/mv_u3d_core.c";  1833       descriptor.format = "cap_regs address: 0x%lx/0x%lx\n";  1833       descriptor.lineno = 1835U;  1833       descriptor.flags = 0U;  1833       tmp___6 = __builtin_expect(((long)(descriptor.flags)) & 1L, 0L) { /* Function call is skipped due to function is undefined */}             {    90         int tmp;               {             }   44           int retval;    45           int tmp;    44           tmp = ldv_undef_int() { /* Function call is skipped due to function is undefined */}    44           retval = tmp;    48           ldv_counter_clk_of_mv_u3d = 1;               } 1841       unsigned long __CPAchecker_TMP_5 = (unsigned long)(pdata->phy_init);  1842       retval = (*(pdata->phy_init))(u3d->phy_regs);  1844       dev_err((const struct device *)(&(dev->dev)), "init phy error %d\n", retval) { /* Function call is skipped due to function is undefined */}  1845       goto err_u3d_enable;  1982       volatile void *__CPAchecker_TMP_25 = (volatile void *)(u3d->cap_regs);  1982       iounmap(__CPAchecker_TMP_25) { /* Function call is skipped due to function is undefined */}  1983       err_map_cap_regs:;  1984       err_get_cap_regs:;  1985       err_get_clk:;  1986       clk_put(u3d->clk) { /* Function call is skipped due to function is undefined */}  1987       kfree((const void *)u3d) { /* Function call is skipped due to function is undefined */}  1988       err_alloc_private:;  1989       err_pdata:;           } 2653     ldv_check_return_value(res_mv_u3d_probe_41) { /* Function call is skipped due to function is undefined */}  2654     ldv_check_return_value_probe(res_mv_u3d_probe_41) { /* Function call is skipped due to function is undefined */}  2656     goto ldv_module_exit;           {         }}  |              Source code         
     1 #ifndef _ASM_X86_IO_H
    2 #define _ASM_X86_IO_H
    3 
    4 /*
    5  * This file contains the definitions for the x86 IO instructions
    6  * inb/inw/inl/outb/outw/outl and the "string versions" of the same
    7  * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
    8  * versions of the single-IO instructions (inb_p/inw_p/..).
    9  *
   10  * This file is not meant to be obfuscating: it's just complicated
   11  * to (a) handle it all in a way that makes gcc able to optimize it
   12  * as well as possible and (b) trying to avoid writing the same thing
   13  * over and over again with slight variations and possibly making a
   14  * mistake somewhere.
   15  */
   16 
   17 /*
   18  * Thanks to James van Artsdalen for a better timing-fix than
   19  * the two short jumps: using outb's to a nonexistent port seems
   20  * to guarantee better timings even on fast machines.
   21  *
   22  * On the other hand, I'd like to be sure of a non-existent port:
   23  * I feel a bit unsafe about using 0x80 (should be safe, though)
   24  *
   25  *		Linus
   26  */
   27 
   28  /*
   29   *  Bit simplified and optimized by Jan Hubicka
   30   *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
   31   *
   32   *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
   33   *  isa_read[wl] and isa_write[wl] fixed
   34   *  - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
   35   */
   36 
   37 #define ARCH_HAS_IOREMAP_WC
   38 #define ARCH_HAS_IOREMAP_WT
   39 
   40 #include <linux/string.h>
   41 #include <linux/compiler.h>
   42 #include <asm/page.h>
   43 #include <asm/early_ioremap.h>
   44 #include <asm/pgtable_types.h>
   45 
   46 #define build_mmio_read(name, size, type, reg, barrier) \
   47 static inline type name(const volatile void __iomem *addr) \
   48 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \
   49 :"m" (*(volatile type __force *)addr) barrier); return ret; }
   50 
   51 #define build_mmio_write(name, size, type, reg, barrier) \
   52 static inline void name(type val, volatile void __iomem *addr) \
   53 { asm volatile("mov" size " %0,%1": :reg (val), \
   54 "m" (*(volatile type __force *)addr) barrier); }
   55 
   56 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
   57 build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
   58 build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
   59 
   60 build_mmio_read(__readb, "b", unsigned char, "=q", )
   61 build_mmio_read(__readw, "w", unsigned short, "=r", )
   62 build_mmio_read(__readl, "l", unsigned int, "=r", )
   63 
   64 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
   65 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
   66 build_mmio_write(writel, "l", unsigned int, "r", :"memory")
   67 
   68 build_mmio_write(__writeb, "b", unsigned char, "q", )
   69 build_mmio_write(__writew, "w", unsigned short, "r", )
   70 build_mmio_write(__writel, "l", unsigned int, "r", )
   71 
   72 #define readb_relaxed(a) __readb(a)
   73 #define readw_relaxed(a) __readw(a)
   74 #define readl_relaxed(a) __readl(a)
   75 #define __raw_readb __readb
   76 #define __raw_readw __readw
   77 #define __raw_readl __readl
   78 
   79 #define writeb_relaxed(v, a) __writeb(v, a)
   80 #define writew_relaxed(v, a) __writew(v, a)
   81 #define writel_relaxed(v, a) __writel(v, a)
   82 #define __raw_writeb __writeb
   83 #define __raw_writew __writew
   84 #define __raw_writel __writel
   85 
   86 #define mmiowb() barrier()
   87 
   88 #ifdef CONFIG_X86_64
   89 
   90 build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
   91 build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
   92 
   93 #define readq_relaxed(a)	readq(a)
   94 #define writeq_relaxed(v, a)	writeq(v, a)
   95 
   96 #define __raw_readq(a)		readq(a)
   97 #define __raw_writeq(val, addr)	writeq(val, addr)
   98 
   99 /* Let people know that we have them */
  100 #define readq			readq
  101 #define writeq			writeq
  102 
  103 #endif
  104 
  105 /**
  106  *	virt_to_phys	-	map virtual addresses to physical
  107  *	@address: address to remap
  108  *
  109  *	The returned physical address is the physical (CPU) mapping for
  110  *	the memory address given. It is only valid to use this function on
  111  *	addresses directly mapped or allocated via kmalloc.
  112  *
  113  *	This function does not give bus mappings for DMA transfers. In
  114  *	almost all conceivable cases a device driver should not be using
  115  *	this function
  116  */
  117 
  118 static inline phys_addr_t virt_to_phys(volatile void *address)
  119 {
  120 	return __pa(address);
  121 }
  122 
  123 /**
  124  *	phys_to_virt	-	map physical address to virtual
  125  *	@address: address to remap
  126  *
  127  *	The returned virtual address is a current CPU mapping for
  128  *	the memory address given. It is only valid to use this function on
  129  *	addresses that have a kernel mapping
  130  *
  131  *	This function does not handle bus mappings for DMA transfers. In
  132  *	almost all conceivable cases a device driver should not be using
  133  *	this function
  134  */
  135 
  136 static inline void *phys_to_virt(phys_addr_t address)
  137 {
  138 	return __va(address);
  139 }
  140 
  141 /*
  142  * Change "struct page" to physical address.
  143  */
  144 #define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
  145 
  146 /*
  147  * ISA I/O bus memory addresses are 1:1 with the physical address.
  148  * However, we truncate the address to unsigned int to avoid undesirable
  149  * promitions in legacy drivers.
  150  */
  151 static inline unsigned int isa_virt_to_bus(volatile void *address)
  152 {
  153 	return (unsigned int)virt_to_phys(address);
  154 }
  155 #define isa_page_to_bus(page)	((unsigned int)page_to_phys(page))
  156 #define isa_bus_to_virt		phys_to_virt
  157 
  158 /*
  159  * However PCI ones are not necessarily 1:1 and therefore these interfaces
  160  * are forbidden in portable PCI drivers.
  161  *
  162  * Allow them on x86 for legacy drivers, though.
  163  */
  164 #define virt_to_bus virt_to_phys
  165 #define bus_to_virt phys_to_virt
  166 
  167 /*
  168  * The default ioremap() behavior is non-cached; if you need something
  169  * else, you probably want one of the following.
  170  */
  171 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
  172 extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
  173 #define ioremap_uc ioremap_uc
  174 
  175 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
  176 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val);
  177 
  178 /**
  179  * ioremap     -   map bus memory into CPU space
  180  * @offset:    bus address of the memory
  181  * @size:      size of the resource to map
  182  *
  183  * ioremap performs a platform specific sequence of operations to
  184  * make bus memory CPU accessible via the readb/readw/readl/writeb/
  185  * writew/writel functions and the other mmio helpers. The returned
  186  * address is not guaranteed to be usable directly as a virtual
  187  * address.
  188  *
  189  * If the area you are trying to map is a PCI BAR you should have a
  190  * look at pci_iomap().
  191  */
  192 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
  193 {
  194 	return ioremap_nocache(offset, size);
  195 }
  196 
  197 extern void iounmap(volatile void __iomem *addr);
  198 
  199 extern void set_iounmap_nonlazy(void);
  200 
  201 #ifdef __KERNEL__
  202 
  203 #include <asm-generic/iomap.h>
  204 
  205 /*
  206  * Convert a virtual cached pointer to an uncached pointer
  207  */
  208 #define xlate_dev_kmem_ptr(p)	p
  209 
  210 /**
  211  * memset_io	Set a range of I/O memory to a constant value
  212  * @addr:	The beginning of the I/O-memory range to set
  213  * @val:	The value to set the memory to
  214  * @count:	The number of bytes to set
  215  *
  216  * Set a range of I/O memory to a given value.
  217  */
  218 static inline void
  219 memset_io(volatile void __iomem *addr, unsigned char val, size_t count)
  220 {
  221 	memset((void __force *)addr, val, count);
  222 }
  223 
  224 /**
  225  * memcpy_fromio	Copy a block of data from I/O memory
  226  * @dst:		The (RAM) destination for the copy
  227  * @src:		The (I/O memory) source for the data
  228  * @count:		The number of bytes to copy
  229  *
  230  * Copy a block of data from I/O memory.
  231  */
  232 static inline void
  233 memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
  234 {
  235 	memcpy(dst, (const void __force *)src, count);
  236 }
  237 
  238 /**
  239  * memcpy_toio		Copy a block of data into I/O memory
  240  * @dst:		The (I/O memory) destination for the copy
  241  * @src:		The (RAM) source for the data
  242  * @count:		The number of bytes to copy
  243  *
  244  * Copy a block of data to I/O memory.
  245  */
  246 static inline void
  247 memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
  248 {
  249 	memcpy((void __force *)dst, src, count);
  250 }
  251 
  252 /*
  253  * ISA space is 'always mapped' on a typical x86 system, no need to
  254  * explicitly ioremap() it. The fact that the ISA IO space is mapped
  255  * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
  256  * are physical addresses. The following constant pointer can be
  257  * used as the IO-area pointer (it can be iounmapped as well, so the
  258  * analogy with PCI is quite large):
  259  */
  260 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
  261 
  262 /*
  263  *	Cache management
  264  *
  265  *	This needed for two cases
  266  *	1. Out of order aware processors
  267  *	2. Accidentally out of order processors (PPro errata #51)
  268  */
  269 
  270 static inline void flush_write_buffers(void)
  271 {
  272 #if defined(CONFIG_X86_PPRO_FENCE)
  273 	asm volatile("lock; addl $0,0(%%esp)": : :"memory");
  274 #endif
  275 }
  276 
  277 #endif /* __KERNEL__ */
  278 
  279 extern void native_io_delay(void);
  280 
  281 extern int io_delay_type;
  282 extern void io_delay_init(void);
  283 
  284 #if defined(CONFIG_PARAVIRT)
  285 #include <asm/paravirt.h>
  286 #else
  287 
  288 static inline void slow_down_io(void)
  289 {
  290 	native_io_delay();
  291 #ifdef REALLY_SLOW_IO
  292 	native_io_delay();
  293 	native_io_delay();
  294 	native_io_delay();
  295 #endif
  296 }
  297 
  298 #endif
  299 
  300 #define BUILDIO(bwl, bw, type)						\
  301 static inline void out##bwl(unsigned type value, int port)		\
  302 {									\
  303 	asm volatile("out" #bwl " %" #bw "0, %w1"			\
  304 		     : : "a"(value), "Nd"(port));			\
  305 }									\
  306 									\
  307 static inline unsigned type in##bwl(int port)				\
  308 {									\
  309 	unsigned type value;						\
  310 	asm volatile("in" #bwl " %w1, %" #bw "0"			\
  311 		     : "=a"(value) : "Nd"(port));			\
  312 	return value;							\
  313 }									\
  314 									\
  315 static inline void out##bwl##_p(unsigned type value, int port)		\
  316 {									\
  317 	out##bwl(value, port);						\
  318 	slow_down_io();							\
  319 }									\
  320 									\
  321 static inline unsigned type in##bwl##_p(int port)			\
  322 {									\
  323 	unsigned type value = in##bwl(port);				\
  324 	slow_down_io();							\
  325 	return value;							\
  326 }									\
  327 									\
  328 static inline void outs##bwl(int port, const void *addr, unsigned long count) \
  329 {									\
  330 	asm volatile("rep; outs" #bwl					\
  331 		     : "+S"(addr), "+c"(count) : "d"(port));		\
  332 }									\
  333 									\
  334 static inline void ins##bwl(int port, void *addr, unsigned long count)	\
  335 {									\
  336 	asm volatile("rep; ins" #bwl					\
  337 		     : "+D"(addr), "+c"(count) : "d"(port));		\
  338 }
  339 
  340 BUILDIO(b, b, char)
  341 BUILDIO(w, w, short)
  342 BUILDIO(l, , int)
  343 
  344 extern void *xlate_dev_mem_ptr(phys_addr_t phys);
  345 extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
  346 
  347 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
  348 				enum page_cache_mode pcm);
  349 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
  350 extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
  351 
  352 extern bool is_early_ioremap_ptep(pte_t *ptep);
  353 
  354 #ifdef CONFIG_XEN
  355 #include <xen/xen.h>
  356 struct bio_vec;
  357 
  358 extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
  359 				      const struct bio_vec *vec2);
  360 
  361 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2)				\
  362 	(__BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&				\
  363 	 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
  364 #endif	/* CONFIG_XEN */
  365 
  366 #define IO_SPACE_LIMIT 0xffff
  367 
  368 #ifdef CONFIG_MTRR
  369 extern int __must_check arch_phys_wc_index(int handle);
  370 #define arch_phys_wc_index arch_phys_wc_index
  371 
  372 extern int __must_check arch_phys_wc_add(unsigned long base,
  373 					 unsigned long size);
  374 extern void arch_phys_wc_del(int handle);
  375 #define arch_phys_wc_add arch_phys_wc_add
  376 #endif
  377 
  378 #ifdef CONFIG_X86_PAT
  379 extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
  380 extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
  381 #define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
  382 #endif
  383 
  384 #endif /* _ASM_X86_IO_H */                 1 
    2 /*
    3  * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
    4  *
    5  * This program is free software; you can redistribute it and/or modify it
    6  * under the terms and conditions of the GNU General Public License,
    7  * version 2, as published by the Free Software Foundation.
    8  */
    9 
   10 #include <linux/module.h>
   11 #include <linux/dma-mapping.h>
   12 #include <linux/dmapool.h>
   13 #include <linux/kernel.h>
   14 #include <linux/delay.h>
   15 #include <linux/ioport.h>
   16 #include <linux/sched.h>
   17 #include <linux/slab.h>
   18 #include <linux/errno.h>
   19 #include <linux/timer.h>
   20 #include <linux/list.h>
   21 #include <linux/notifier.h>
   22 #include <linux/interrupt.h>
   23 #include <linux/moduleparam.h>
   24 #include <linux/device.h>
   25 #include <linux/usb/ch9.h>
   26 #include <linux/usb/gadget.h>
   27 #include <linux/pm.h>
   28 #include <linux/io.h>
   29 #include <linux/irq.h>
   30 #include <linux/platform_device.h>
   31 #include <linux/platform_data/mv_usb.h>
   32 #include <linux/clk.h>
   33 
   34 #include "mv_u3d.h"
   35 
   36 #define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
   37 
   38 static const char driver_name[] = "mv_u3d";
   39 static const char driver_desc[] = DRIVER_DESC;
   40 
   41 static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status);
   42 static void mv_u3d_stop_activity(struct mv_u3d *u3d,
   43 			struct usb_gadget_driver *driver);
   44 
   45 /* for endpoint 0 operations */
   46 static const struct usb_endpoint_descriptor mv_u3d_ep0_desc = {
   47 	.bLength =		USB_DT_ENDPOINT_SIZE,
   48 	.bDescriptorType =	USB_DT_ENDPOINT,
   49 	.bEndpointAddress =	0,
   50 	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
   51 	.wMaxPacketSize =	MV_U3D_EP0_MAX_PKT_SIZE,
   52 };
   53 
   54 static void mv_u3d_ep0_reset(struct mv_u3d *u3d)
   55 {
   56 	struct mv_u3d_ep *ep;
   57 	u32 epxcr;
   58 	int i;
   59 
   60 	for (i = 0; i < 2; i++) {
   61 		ep = &u3d->eps[i];
   62 		ep->u3d = u3d;
   63 
   64 		/* ep0 ep context, ep0 in and out share the same ep context */
   65 		ep->ep_context = &u3d->ep_context[1];
   66 	}
   67 
   68 	/* reset ep state machine */
   69 	/* reset ep0 out */
   70 	epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
   71 	epxcr |= MV_U3D_EPXCR_EP_INIT;
   72 	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
   73 	udelay(5);
   74 	epxcr &= ~MV_U3D_EPXCR_EP_INIT;
   75 	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
   76 
   77 	epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE
   78 		<< MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
   79 		| (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
   80 		| (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
   81 		| MV_U3D_EPXCR_EP_TYPE_CONTROL);
   82 	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr1);
   83 
   84 	/* reset ep0 in */
   85 	epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
   86 	epxcr |= MV_U3D_EPXCR_EP_INIT;
   87 	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
   88 	udelay(5);
   89 	epxcr &= ~MV_U3D_EPXCR_EP_INIT;
   90 	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
   91 
   92 	epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE
   93 		<< MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
   94 		| (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
   95 		| (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
   96 		| MV_U3D_EPXCR_EP_TYPE_CONTROL);
   97 	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr1);
   98 }
   99 
  100 static void mv_u3d_ep0_stall(struct mv_u3d *u3d)
  101 {
  102 	u32 tmp;
  103 	dev_dbg(u3d->dev, "%s\n", __func__);
  104 
  105 	/* set TX and RX to stall */
  106 	tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
  107 	tmp |= MV_U3D_EPXCR_EP_HALT;
  108 	iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
  109 
  110 	tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
  111 	tmp |= MV_U3D_EPXCR_EP_HALT;
  112 	iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
  113 
  114 	/* update ep0 state */
  115 	u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
  116 	u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
  117 }
  118 
  119 static int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index,
  120 	struct mv_u3d_req *curr_req)
  121 {
  122 	struct mv_u3d_trb	*curr_trb;
  123 	int actual, remaining_length = 0;
  124 	int direction, ep_num;
  125 	int retval = 0;
  126 	u32 tmp, status, length;
  127 
  128 	direction = index % 2;
  129 	ep_num = index / 2;
  130 
  131 	actual = curr_req->req.length;
  132 
  133 	while (!list_empty(&curr_req->trb_list)) {
  134 		curr_trb = list_entry(curr_req->trb_list.next,
  135 					struct mv_u3d_trb, trb_list);
  136 		if (!curr_trb->trb_hw->ctrl.own) {
  137 			dev_err(u3d->dev, "%s, TRB own error!\n",
  138 				u3d->eps[index].name);
  139 			return 1;
  140 		}
  141 
  142 		curr_trb->trb_hw->ctrl.own = 0;
  143 		if (direction == MV_U3D_EP_DIR_OUT)
  144 			tmp = ioread32(&u3d->vuc_regs->rxst[ep_num].statuslo);
  145 		else
  146 			tmp = ioread32(&u3d->vuc_regs->txst[ep_num].statuslo);
  147 
  148 		status = tmp >> MV_U3D_XFERSTATUS_COMPLETE_SHIFT;
  149 		length = tmp & MV_U3D_XFERSTATUS_TRB_LENGTH_MASK;
  150 
  151 		if (status == MV_U3D_COMPLETE_SUCCESS ||
  152 			(status == MV_U3D_COMPLETE_SHORT_PACKET &&
  153 			direction == MV_U3D_EP_DIR_OUT)) {
  154 			remaining_length += length;
  155 			actual -= remaining_length;
  156 		} else {
  157 			dev_err(u3d->dev,
  158 				"complete_tr error: ep=%d %s: error = 0x%x\n",
  159 				index >> 1, direction ? "SEND" : "RECV",
  160 				status);
  161 			retval = -EPROTO;
  162 		}
  163 
  164 		list_del_init(&curr_trb->trb_list);
  165 	}
  166 	if (retval)
  167 		return retval;
  168 
  169 	curr_req->req.actual = actual;
  170 	return 0;
  171 }
  172 
  173 /*
  174  * mv_u3d_done() - retire a request; caller blocked irqs
  175  * @status : request status to be set, only works when
  176  * request is still in progress.
  177  */
  178 static
  179 void mv_u3d_done(struct mv_u3d_ep *ep, struct mv_u3d_req *req, int status)
  180 	__releases(&ep->udc->lock)
  181 	__acquires(&ep->udc->lock)
  182 {
  183 	struct mv_u3d *u3d = (struct mv_u3d *)ep->u3d;
  184 
  185 	dev_dbg(u3d->dev, "mv_u3d_done: remove req->queue\n");
  186 	/* Removed the req from ep queue */
  187 	list_del_init(&req->queue);
  188 
  189 	/* req.status should be set as -EINPROGRESS in ep_queue() */
  190 	if (req->req.status == -EINPROGRESS)
  191 		req->req.status = status;
  192 	else
  193 		status = req->req.status;
  194 
  195 	/* Free trb for the request */
  196 	if (!req->chain)
  197 		dma_pool_free(u3d->trb_pool,
  198 			req->trb_head->trb_hw, req->trb_head->trb_dma);
  199 	else {
  200 		dma_unmap_single(ep->u3d->gadget.dev.parent,
  201 			(dma_addr_t)req->trb_head->trb_dma,
  202 			req->trb_count * sizeof(struct mv_u3d_trb_hw),
  203 			DMA_BIDIRECTIONAL);
  204 		kfree(req->trb_head->trb_hw);
  205 	}
  206 	kfree(req->trb_head);
  207 
  208 	usb_gadget_unmap_request(&u3d->gadget, &req->req, mv_u3d_ep_dir(ep));
  209 
  210 	if (status && (status != -ESHUTDOWN)) {
  211 		dev_dbg(u3d->dev, "complete %s req %p stat %d len %u/%u",
  212 			ep->ep.name, &req->req, status,
  213 			req->req.actual, req->req.length);
  214 	}
  215 
  216 	spin_unlock(&ep->u3d->lock);
  217 
  218 	usb_gadget_giveback_request(&ep->ep, &req->req);
  219 
  220 	spin_lock(&ep->u3d->lock);
  221 }
  222 
  223 static int mv_u3d_queue_trb(struct mv_u3d_ep *ep, struct mv_u3d_req *req)
  224 {
  225 	u32 tmp, direction;
  226 	struct mv_u3d *u3d;
  227 	struct mv_u3d_ep_context *ep_context;
  228 	int retval = 0;
  229 
  230 	u3d = ep->u3d;
  231 	direction = mv_u3d_ep_dir(ep);
  232 
  233 	/* ep0 in and out share the same ep context slot 1*/
  234 	if (ep->ep_num == 0)
  235 		ep_context = &(u3d->ep_context[1]);
  236 	else
  237 		ep_context = &(u3d->ep_context[ep->ep_num * 2 + direction]);
  238 
  239 	/* check if the pipe is empty or not */
  240 	if (!list_empty(&ep->queue)) {
  241 		dev_err(u3d->dev, "add trb to non-empty queue!\n");
  242 		retval = -ENOMEM;
  243 		WARN_ON(1);
  244 	} else {
  245 		ep_context->rsvd0 = cpu_to_le32(1);
  246 		ep_context->rsvd1 = 0;
  247 
  248 		/* Configure the trb address and set the DCS bit.
  249 		 * Both DCS bit and own bit in trb should be set.
  250 		 */
  251 		ep_context->trb_addr_lo =
  252 			cpu_to_le32(req->trb_head->trb_dma | DCS_ENABLE);
  253 		ep_context->trb_addr_hi = 0;
  254 
  255 		/* Ensure that updates to the EP Context will
  256 		 * occure before Ring Bell.
  257 		 */
  258 		wmb();
  259 
  260 		/* ring bell the ep */
  261 		if (ep->ep_num == 0)
  262 			tmp = 0x1;
  263 		else
  264 			tmp = ep->ep_num * 2
  265 				+ ((direction == MV_U3D_EP_DIR_OUT) ? 0 : 1);
  266 
  267 		iowrite32(tmp, &u3d->op_regs->doorbell);
  268 	}
  269 	return retval;
  270 }
  271 
  272 static struct mv_u3d_trb *mv_u3d_build_trb_one(struct mv_u3d_req *req,
  273 				unsigned *length, dma_addr_t *dma)
  274 {
  275 	u32 temp;
  276 	unsigned int direction;
  277 	struct mv_u3d_trb *trb;
  278 	struct mv_u3d_trb_hw *trb_hw;
  279 	struct mv_u3d *u3d;
  280 
  281 	/* how big will this transfer be? */
  282 	*length = req->req.length - req->req.actual;
  283 	BUG_ON(*length > (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER);
  284 
  285 	u3d = req->ep->u3d;
  286 
  287 	trb = kzalloc(sizeof(*trb), GFP_ATOMIC);
  288 	if (!trb)
  289 		return NULL;
  290 
  291 	/*
  292 	 * Be careful that no _GFP_HIGHMEM is set,
  293 	 * or we can not use dma_to_virt
  294 	 * cannot use GFP_KERNEL in spin lock
  295 	 */
  296 	trb_hw = dma_pool_alloc(u3d->trb_pool, GFP_ATOMIC, dma);
  297 	if (!trb_hw) {
  298 		kfree(trb);
  299 		dev_err(u3d->dev,
  300 			"%s, dma_pool_alloc fail\n", __func__);
  301 		return NULL;
  302 	}
  303 	trb->trb_dma = *dma;
  304 	trb->trb_hw = trb_hw;
  305 
  306 	/* initialize buffer page pointers */
  307 	temp = (u32)(req->req.dma + req->req.actual);
  308 
  309 	trb_hw->buf_addr_lo = cpu_to_le32(temp);
  310 	trb_hw->buf_addr_hi = 0;
  311 	trb_hw->trb_len = cpu_to_le32(*length);
  312 	trb_hw->ctrl.own = 1;
  313 
  314 	if (req->ep->ep_num == 0)
  315 		trb_hw->ctrl.type = TYPE_DATA;
  316 	else
  317 		trb_hw->ctrl.type = TYPE_NORMAL;
  318 
  319 	req->req.actual += *length;
  320 
  321 	direction = mv_u3d_ep_dir(req->ep);
  322 	if (direction == MV_U3D_EP_DIR_IN)
  323 		trb_hw->ctrl.dir = 1;
  324 	else
  325 		trb_hw->ctrl.dir = 0;
  326 
  327 	/* Enable interrupt for the last trb of a request */
  328 	if (!req->req.no_interrupt)
  329 		trb_hw->ctrl.ioc = 1;
  330 
  331 	trb_hw->ctrl.chain = 0;
  332 
  333 	wmb();
  334 	return trb;
  335 }
  336 
  337 static int mv_u3d_build_trb_chain(struct mv_u3d_req *req, unsigned *length,
  338 		struct mv_u3d_trb *trb, int *is_last)
  339 {
  340 	u32 temp;
  341 	unsigned int direction;
  342 	struct mv_u3d *u3d;
  343 
  344 	/* how big will this transfer be? */
  345 	*length = min(req->req.length - req->req.actual,
  346 			(unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER);
  347 
  348 	u3d = req->ep->u3d;
  349 
  350 	trb->trb_dma = 0;
  351 
  352 	/* initialize buffer page pointers */
  353 	temp = (u32)(req->req.dma + req->req.actual);
  354 
  355 	trb->trb_hw->buf_addr_lo = cpu_to_le32(temp);
  356 	trb->trb_hw->buf_addr_hi = 0;
  357 	trb->trb_hw->trb_len = cpu_to_le32(*length);
  358 	trb->trb_hw->ctrl.own = 1;
  359 
  360 	if (req->ep->ep_num == 0)
  361 		trb->trb_hw->ctrl.type = TYPE_DATA;
  362 	else
  363 		trb->trb_hw->ctrl.type = TYPE_NORMAL;
  364 
  365 	req->req.actual += *length;
  366 
  367 	direction = mv_u3d_ep_dir(req->ep);
  368 	if (direction == MV_U3D_EP_DIR_IN)
  369 		trb->trb_hw->ctrl.dir = 1;
  370 	else
  371 		trb->trb_hw->ctrl.dir = 0;
  372 
  373 	/* zlp is needed if req->req.zero is set */
  374 	if (req->req.zero) {
  375 		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
  376 			*is_last = 1;
  377 		else
  378 			*is_last = 0;
  379 	} else if (req->req.length == req->req.actual)
  380 		*is_last = 1;
  381 	else
  382 		*is_last = 0;
  383 
  384 	/* Enable interrupt for the last trb of a request */
  385 	if (*is_last && !req->req.no_interrupt)
  386 		trb->trb_hw->ctrl.ioc = 1;
  387 
  388 	if (*is_last)
  389 		trb->trb_hw->ctrl.chain = 0;
  390 	else {
  391 		trb->trb_hw->ctrl.chain = 1;
  392 		dev_dbg(u3d->dev, "chain trb\n");
  393 	}
  394 
  395 	wmb();
  396 
  397 	return 0;
  398 }
  399 
  400 /* generate TRB linked list for a request
  401  * usb controller only supports continous trb chain,
  402  * that trb structure physical address should be continous.
  403  */
  404 static int mv_u3d_req_to_trb(struct mv_u3d_req *req)
  405 {
  406 	unsigned count;
  407 	int is_last;
  408 	struct mv_u3d_trb *trb;
  409 	struct mv_u3d_trb_hw *trb_hw;
  410 	struct mv_u3d *u3d;
  411 	dma_addr_t dma;
  412 	unsigned length;
  413 	unsigned trb_num;
  414 
  415 	u3d = req->ep->u3d;
  416 
  417 	INIT_LIST_HEAD(&req->trb_list);
  418 
  419 	length = req->req.length - req->req.actual;
  420 	/* normally the request transfer length is less than 16KB.
  421 	 * we use buil_trb_one() to optimize it.
  422 	 */
  423 	if (length <= (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER) {
  424 		trb = mv_u3d_build_trb_one(req, &count, &dma);
  425 		list_add_tail(&trb->trb_list, &req->trb_list);
  426 		req->trb_head = trb;
  427 		req->trb_count = 1;
  428 		req->chain = 0;
  429 	} else {
  430 		trb_num = length / MV_U3D_EP_MAX_LENGTH_TRANSFER;
  431 		if (length % MV_U3D_EP_MAX_LENGTH_TRANSFER)
  432 			trb_num++;
  433 
  434 		trb = kcalloc(trb_num, sizeof(*trb), GFP_ATOMIC);
  435 		if (!trb)
  436 			return -ENOMEM;
  437 
  438 		trb_hw = kcalloc(trb_num, sizeof(*trb_hw), GFP_ATOMIC);
  439 		if (!trb_hw) {
  440 			kfree(trb);
  441 			return -ENOMEM;
  442 		}
  443 
  444 		do {
  445 			trb->trb_hw = trb_hw;
  446 			if (mv_u3d_build_trb_chain(req, &count,
  447 						trb, &is_last)) {
  448 				dev_err(u3d->dev,
  449 					"%s, mv_u3d_build_trb_chain fail\n",
  450 					__func__);
  451 				return -EIO;
  452 			}
  453 
  454 			list_add_tail(&trb->trb_list, &req->trb_list);
  455 			req->trb_count++;
  456 			trb++;
  457 			trb_hw++;
  458 		} while (!is_last);
  459 
  460 		req->trb_head = list_entry(req->trb_list.next,
  461 					struct mv_u3d_trb, trb_list);
  462 		req->trb_head->trb_dma = dma_map_single(u3d->gadget.dev.parent,
  463 					req->trb_head->trb_hw,
  464 					trb_num * sizeof(*trb_hw),
  465 					DMA_BIDIRECTIONAL);
  466 		if (dma_mapping_error(u3d->gadget.dev.parent,
  467 					req->trb_head->trb_dma)) {
  468 			kfree(req->trb_head->trb_hw);
  469 			kfree(req->trb_head);
  470 			return -EFAULT;
  471 		}
  472 
  473 		req->chain = 1;
  474 	}
  475 
  476 	return 0;
  477 }
  478 
  479 static int
  480 mv_u3d_start_queue(struct mv_u3d_ep *ep)
  481 {
  482 	struct mv_u3d *u3d = ep->u3d;
  483 	struct mv_u3d_req *req;
  484 	int ret;
  485 
  486 	if (!list_empty(&ep->req_list) && !ep->processing)
  487 		req = list_entry(ep->req_list.next, struct mv_u3d_req, list);
  488 	else
  489 		return 0;
  490 
  491 	ep->processing = 1;
  492 
  493 	/* set up dma mapping */
  494 	ret = usb_gadget_map_request(&u3d->gadget, &req->req,
  495 					mv_u3d_ep_dir(ep));
  496 	if (ret)
  497 		goto break_processing;
  498 
  499 	req->req.status = -EINPROGRESS;
  500 	req->req.actual = 0;
  501 	req->trb_count = 0;
  502 
  503 	/* build trbs */
  504 	ret = mv_u3d_req_to_trb(req);
  505 	if (ret) {
  506 		dev_err(u3d->dev, "%s, mv_u3d_req_to_trb fail\n", __func__);
  507 		goto break_processing;
  508 	}
  509 
  510 	/* and push them to device queue */
  511 	ret = mv_u3d_queue_trb(ep, req);
  512 	if (ret)
  513 		goto break_processing;
  514 
  515 	/* irq handler advances the queue */
  516 	list_add_tail(&req->queue, &ep->queue);
  517 
  518 	return 0;
  519 
  520 break_processing:
  521 	ep->processing = 0;
  522 	return ret;
  523 }
  524 
  525 static int mv_u3d_ep_enable(struct usb_ep *_ep,
  526 		const struct usb_endpoint_descriptor *desc)
  527 {
  528 	struct mv_u3d *u3d;
  529 	struct mv_u3d_ep *ep;
  530 	u16 max = 0;
  531 	unsigned maxburst = 0;
  532 	u32 epxcr, direction;
  533 
  534 	if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
  535 		return -EINVAL;
  536 
  537 	ep = container_of(_ep, struct mv_u3d_ep, ep);
  538 	u3d = ep->u3d;
  539 
  540 	if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN)
  541 		return -ESHUTDOWN;
  542 
  543 	direction = mv_u3d_ep_dir(ep);
  544 	max = le16_to_cpu(desc->wMaxPacketSize);
  545 
  546 	if (!_ep->maxburst)
  547 		_ep->maxburst = 1;
  548 	maxburst = _ep->maxburst;
  549 
  550 	/* Set the max burst size */
  551 	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
  552 	case USB_ENDPOINT_XFER_BULK:
  553 		if (maxburst > 16) {
  554 			dev_dbg(u3d->dev,
  555 				"max burst should not be greater "
  556 				"than 16 on bulk ep\n");
  557 			maxburst = 1;
  558 			_ep->maxburst = maxburst;
  559 		}
  560 		dev_dbg(u3d->dev,
  561 			"maxburst: %d on bulk %s\n", maxburst, ep->name);
  562 		break;
  563 	case USB_ENDPOINT_XFER_CONTROL:
  564 		/* control transfer only supports maxburst as one */
  565 		maxburst = 1;
  566 		_ep->maxburst = maxburst;
  567 		break;
  568 	case USB_ENDPOINT_XFER_INT:
  569 		if (maxburst != 1) {
  570 			dev_dbg(u3d->dev,
  571 				"max burst should be 1 on int ep "
  572 				"if transfer size is not 1024\n");
  573 			maxburst = 1;
  574 			_ep->maxburst = maxburst;
  575 		}
  576 		break;
  577 	case USB_ENDPOINT_XFER_ISOC:
  578 		if (maxburst != 1) {
  579 			dev_dbg(u3d->dev,
  580 				"max burst should be 1 on isoc ep "
  581 				"if transfer size is not 1024\n");
  582 			maxburst = 1;
  583 			_ep->maxburst = maxburst;
  584 		}
  585 		break;
  586 	default:
  587 		goto en_done;
  588 	}
  589 
  590 	ep->ep.maxpacket = max;
  591 	ep->ep.desc = desc;
  592 	ep->enabled = 1;
  593 
  594 	/* Enable the endpoint for Rx or Tx and set the endpoint type */
  595 	if (direction == MV_U3D_EP_DIR_OUT) {
  596 		epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
  597 		epxcr |= MV_U3D_EPXCR_EP_INIT;
  598 		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
  599 		udelay(5);
  600 		epxcr &= ~MV_U3D_EPXCR_EP_INIT;
  601 		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
  602 
  603 		epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
  604 		      | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
  605 		      | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
  606 		      | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
  607 		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
  608 	} else {
  609 		epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
  610 		epxcr |= MV_U3D_EPXCR_EP_INIT;
  611 		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
  612 		udelay(5);
  613 		epxcr &= ~MV_U3D_EPXCR_EP_INIT;
  614 		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
  615 
  616 		epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
  617 		      | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
  618 		      | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
  619 		      | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
  620 		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
  621 	}
  622 
  623 	return 0;
  624 en_done:
  625 	return -EINVAL;
  626 }
  627 
  628 static int  mv_u3d_ep_disable(struct usb_ep *_ep)
  629 {
  630 	struct mv_u3d *u3d;
  631 	struct mv_u3d_ep *ep;
  632 	u32 epxcr, direction;
  633 	unsigned long flags;
  634 
  635 	if (!_ep)
  636 		return -EINVAL;
  637 
  638 	ep = container_of(_ep, struct mv_u3d_ep, ep);
  639 	if (!ep->ep.desc)
  640 		return -EINVAL;
  641 
  642 	u3d = ep->u3d;
  643 
  644 	direction = mv_u3d_ep_dir(ep);
  645 
  646 	/* nuke all pending requests (does flush) */
  647 	spin_lock_irqsave(&u3d->lock, flags);
  648 	mv_u3d_nuke(ep, -ESHUTDOWN);
  649 	spin_unlock_irqrestore(&u3d->lock, flags);
  650 
  651 	/* Disable the endpoint for Rx or Tx and reset the endpoint type */
  652 	if (direction == MV_U3D_EP_DIR_OUT) {
  653 		epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
  654 		epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
  655 		      | USB_ENDPOINT_XFERTYPE_MASK);
  656 		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
  657 	} else {
  658 		epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
  659 		epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
  660 		      | USB_ENDPOINT_XFERTYPE_MASK);
  661 		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
  662 	}
  663 
  664 	ep->enabled = 0;
  665 
  666 	ep->ep.desc = NULL;
  667 	return 0;
  668 }
  669 
  670 static struct usb_request *
  671 mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
  672 {
  673 	struct mv_u3d_req *req = NULL;
  674 
  675 	req = kzalloc(sizeof *req, gfp_flags);
  676 	if (!req)
  677 		return NULL;
  678 
  679 	INIT_LIST_HEAD(&req->queue);
  680 
  681 	return &req->req;
  682 }
  683 
  684 static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req)
  685 {
  686 	struct mv_u3d_req *req = container_of(_req, struct mv_u3d_req, req);
  687 
  688 	kfree(req);
  689 }
  690 
  691 static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep)
  692 {
  693 	struct mv_u3d *u3d;
  694 	u32 direction;
  695 	struct mv_u3d_ep *ep = container_of(_ep, struct mv_u3d_ep, ep);
  696 	unsigned int loops;
  697 	u32 tmp;
  698 
  699 	/* if endpoint is not enabled, cannot flush endpoint */
  700 	if (!ep->enabled)
  701 		return;
  702 
  703 	u3d = ep->u3d;
  704 	direction = mv_u3d_ep_dir(ep);
  705 
  706 	/* ep0 need clear bit after flushing fifo. */
  707 	if (!ep->ep_num) {
  708 		if (direction == MV_U3D_EP_DIR_OUT) {
  709 			tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
  710 			tmp |= MV_U3D_EPXCR_EP_FLUSH;
  711 			iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
  712 			udelay(10);
  713 			tmp &= ~MV_U3D_EPXCR_EP_FLUSH;
  714 			iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
  715 		} else {
  716 			tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
  717 			tmp |= MV_U3D_EPXCR_EP_FLUSH;
  718 			iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
  719 			udelay(10);
  720 			tmp &= ~MV_U3D_EPXCR_EP_FLUSH;
  721 			iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
  722 		}
  723 		return;
  724 	}
  725 
  726 	if (direction == MV_U3D_EP_DIR_OUT) {
  727 		tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
  728 		tmp |= MV_U3D_EPXCR_EP_FLUSH;
  729 		iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
  730 
  731 		/* Wait until flushing completed */
  732 		loops = LOOPS(MV_U3D_FLUSH_TIMEOUT);
  733 		while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0) &
  734 			MV_U3D_EPXCR_EP_FLUSH) {
  735 			/*
  736 			 * EP_FLUSH bit should be cleared to indicate this
  737 			 * operation is complete
  738 			 */
  739 			if (loops == 0) {
  740 				dev_dbg(u3d->dev,
  741 				    "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
  742 				    direction ? "in" : "out");
  743 				return;
  744 			}
  745 			loops--;
  746 			udelay(LOOPS_USEC);
  747 		}
  748 	} else {	/* EP_DIR_IN */
  749 		tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
  750 		tmp |= MV_U3D_EPXCR_EP_FLUSH;
  751 		iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
  752 
  753 		/* Wait until flushing completed */
  754 		loops = LOOPS(MV_U3D_FLUSH_TIMEOUT);
  755 		while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0) &
  756 			MV_U3D_EPXCR_EP_FLUSH) {
  757 			/*
  758 			* EP_FLUSH bit should be cleared to indicate this
  759 			* operation is complete
  760 			*/
  761 			if (loops == 0) {
  762 				dev_dbg(u3d->dev,
  763 				    "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
  764 				    direction ? "in" : "out");
  765 				return;
  766 			}
  767 			loops--;
  768 			udelay(LOOPS_USEC);
  769 		}
  770 	}
  771 }
  772 
  773 /* queues (submits) an I/O request to an endpoint */
  774 static int
  775 mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
  776 {
  777 	struct mv_u3d_ep *ep;
  778 	struct mv_u3d_req *req;
  779 	struct mv_u3d *u3d;
  780 	unsigned long flags;
  781 	int is_first_req = 0;
  782 
  783 	if (unlikely(!_ep || !_req))
  784 		return -EINVAL;
  785 
  786 	ep = container_of(_ep, struct mv_u3d_ep, ep);
  787 	u3d = ep->u3d;
  788 
  789 	req = container_of(_req, struct mv_u3d_req, req);
  790 
  791 	if (!ep->ep_num
  792 		&& u3d->ep0_state == MV_U3D_STATUS_STAGE
  793 		&& !_req->length) {
  794 		dev_dbg(u3d->dev, "ep0 status stage\n");
  795 		u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
  796 		return 0;
  797 	}
  798 
  799 	dev_dbg(u3d->dev, "%s: %s, req: 0x%p\n",
  800 			__func__, _ep->name, req);
  801 
  802 	/* catch various bogus parameters */
  803 	if (!req->req.complete || !req->req.buf
  804 			|| !list_empty(&req->queue)) {
  805 		dev_err(u3d->dev,
  806 			"%s, bad params, _req: 0x%p,"
  807 			"req->req.complete: 0x%p, req->req.buf: 0x%p,"
  808 			"list_empty: 0x%x\n",
  809 			__func__, _req,
  810 			req->req.complete, req->req.buf,
  811 			list_empty(&req->queue));
  812 		return -EINVAL;
  813 	}
  814 	if (unlikely(!ep->ep.desc)) {
  815 		dev_err(u3d->dev, "%s, bad ep\n", __func__);
  816 		return -EINVAL;
  817 	}
  818 	if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
  819 		if (req->req.length > ep->ep.maxpacket)
  820 			return -EMSGSIZE;
  821 	}
  822 
  823 	if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN) {
  824 		dev_err(u3d->dev,
  825 			"bad params of driver/speed\n");
  826 		return -ESHUTDOWN;
  827 	}
  828 
  829 	req->ep = ep;
  830 
  831 	/* Software list handles usb request. */
  832 	spin_lock_irqsave(&ep->req_lock, flags);
  833 	is_first_req = list_empty(&ep->req_list);
  834 	list_add_tail(&req->list, &ep->req_list);
  835 	spin_unlock_irqrestore(&ep->req_lock, flags);
  836 	if (!is_first_req) {
  837 		dev_dbg(u3d->dev, "list is not empty\n");
  838 		return 0;
  839 	}
  840 
  841 	dev_dbg(u3d->dev, "call mv_u3d_start_queue from usb_ep_queue\n");
  842 	spin_lock_irqsave(&u3d->lock, flags);
  843 	mv_u3d_start_queue(ep);
  844 	spin_unlock_irqrestore(&u3d->lock, flags);
  845 	return 0;
  846 }
  847 
  848 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
  849 static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  850 {
  851 	struct mv_u3d_ep *ep;
  852 	struct mv_u3d_req *req;
  853 	struct mv_u3d *u3d;
  854 	struct mv_u3d_ep_context *ep_context;
  855 	struct mv_u3d_req *next_req;
  856 
  857 	unsigned long flags;
  858 	int ret = 0;
  859 
  860 	if (!_ep || !_req)
  861 		return -EINVAL;
  862 
  863 	ep = container_of(_ep, struct mv_u3d_ep, ep);
  864 	u3d = ep->u3d;
  865 
  866 	spin_lock_irqsave(&ep->u3d->lock, flags);
  867 
  868 	/* make sure it's actually queued on this endpoint */
  869 	list_for_each_entry(req, &ep->queue, queue) {
  870 		if (&req->req == _req)
  871 			break;
  872 	}
  873 	if (&req->req != _req) {
  874 		ret = -EINVAL;
  875 		goto out;
  876 	}
  877 
  878 	/* The request is in progress, or completed but not dequeued */
  879 	if (ep->queue.next == &req->queue) {
  880 		_req->status = -ECONNRESET;
  881 		mv_u3d_ep_fifo_flush(_ep);
  882 
  883 		/* The request isn't the last request in this ep queue */
  884 		if (req->queue.next != &ep->queue) {
  885 			dev_dbg(u3d->dev,
  886 				"it is the last request in this ep queue\n");
  887 			ep_context = ep->ep_context;
  888 			next_req = list_entry(req->queue.next,
  889 					struct mv_u3d_req, queue);
  890 
  891 			/* Point first TRB of next request to the EP context. */
  892 			iowrite32((unsigned long) next_req->trb_head,
  893 					&ep_context->trb_addr_lo);
  894 		} else {
  895 			struct mv_u3d_ep_context *ep_context;
  896 			ep_context = ep->ep_context;
  897 			ep_context->trb_addr_lo = 0;
  898 			ep_context->trb_addr_hi = 0;
  899 		}
  900 
  901 	} else
  902 		WARN_ON(1);
  903 
  904 	mv_u3d_done(ep, req, -ECONNRESET);
  905 
  906 	/* remove the req from the ep req list */
  907 	if (!list_empty(&ep->req_list)) {
  908 		struct mv_u3d_req *curr_req;
  909 		curr_req = list_entry(ep->req_list.next,
  910 					struct mv_u3d_req, list);
  911 		if (curr_req == req) {
  912 			list_del_init(&req->list);
  913 			ep->processing = 0;
  914 		}
  915 	}
  916 
  917 out:
  918 	spin_unlock_irqrestore(&ep->u3d->lock, flags);
  919 	return ret;
  920 }
  921 
  922 static void
  923 mv_u3d_ep_set_stall(struct mv_u3d *u3d, u8 ep_num, u8 direction, int stall)
  924 {
  925 	u32 tmp;
  926 	struct mv_u3d_ep *ep = u3d->eps;
  927 
  928 	dev_dbg(u3d->dev, "%s\n", __func__);
  929 	if (direction == MV_U3D_EP_DIR_OUT) {
  930 		tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
  931 		if (stall)
  932 			tmp |= MV_U3D_EPXCR_EP_HALT;
  933 		else
  934 			tmp &= ~MV_U3D_EPXCR_EP_HALT;
  935 		iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
  936 	} else {
  937 		tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
  938 		if (stall)
  939 			tmp |= MV_U3D_EPXCR_EP_HALT;
  940 		else
  941 			tmp &= ~MV_U3D_EPXCR_EP_HALT;
  942 		iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
  943 	}
  944 }
  945 
  946 static int mv_u3d_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
  947 {
  948 	struct mv_u3d_ep *ep;
  949 	unsigned long flags = 0;
  950 	int status = 0;
  951 	struct mv_u3d *u3d;
  952 
  953 	ep = container_of(_ep, struct mv_u3d_ep, ep);
  954 	u3d = ep->u3d;
  955 	if (!ep->ep.desc) {
  956 		status = -EINVAL;
  957 		goto out;
  958 	}
  959 
  960 	if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
  961 		status = -EOPNOTSUPP;
  962 		goto out;
  963 	}
  964 
  965 	/*
  966 	 * Attempt to halt IN ep will fail if any transfer requests
  967 	 * are still queue
  968 	 */
  969 	if (halt && (mv_u3d_ep_dir(ep) == MV_U3D_EP_DIR_IN)
  970 			&& !list_empty(&ep->queue)) {
  971 		status = -EAGAIN;
  972 		goto out;
  973 	}
  974 
  975 	spin_lock_irqsave(&ep->u3d->lock, flags);
  976 	mv_u3d_ep_set_stall(u3d, ep->ep_num, mv_u3d_ep_dir(ep), halt);
  977 	if (halt && wedge)
  978 		ep->wedge = 1;
  979 	else if (!halt)
  980 		ep->wedge = 0;
  981 	spin_unlock_irqrestore(&ep->u3d->lock, flags);
  982 
  983 	if (ep->ep_num == 0)
  984 		u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
  985 out:
  986 	return status;
  987 }
  988 
  989 static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt)
  990 {
  991 	return mv_u3d_ep_set_halt_wedge(_ep, halt, 0);
  992 }
  993 
  994 static int mv_u3d_ep_set_wedge(struct usb_ep *_ep)
  995 {
  996 	return mv_u3d_ep_set_halt_wedge(_ep, 1, 1);
  997 }
  998 
  999 static const struct usb_ep_ops mv_u3d_ep_ops = {
 1000 	.enable		= mv_u3d_ep_enable,
 1001 	.disable	= mv_u3d_ep_disable,
 1002 
 1003 	.alloc_request	= mv_u3d_alloc_request,
 1004 	.free_request	= mv_u3d_free_request,
 1005 
 1006 	.queue		= mv_u3d_ep_queue,
 1007 	.dequeue	= mv_u3d_ep_dequeue,
 1008 
 1009 	.set_wedge	= mv_u3d_ep_set_wedge,
 1010 	.set_halt	= mv_u3d_ep_set_halt,
 1011 	.fifo_flush	= mv_u3d_ep_fifo_flush,
 1012 };
 1013 
 1014 static void mv_u3d_controller_stop(struct mv_u3d *u3d)
 1015 {
 1016 	u32 tmp;
 1017 
 1018 	if (!u3d->clock_gating && u3d->vbus_valid_detect)
 1019 		iowrite32(MV_U3D_INTR_ENABLE_VBUS_VALID,
 1020 				&u3d->vuc_regs->intrenable);
 1021 	else
 1022 		iowrite32(0, &u3d->vuc_regs->intrenable);
 1023 	iowrite32(~0x0, &u3d->vuc_regs->endcomplete);
 1024 	iowrite32(~0x0, &u3d->vuc_regs->trbunderrun);
 1025 	iowrite32(~0x0, &u3d->vuc_regs->trbcomplete);
 1026 	iowrite32(~0x0, &u3d->vuc_regs->linkchange);
 1027 	iowrite32(0x1, &u3d->vuc_regs->setuplock);
 1028 
 1029 	/* Reset the RUN bit in the command register to stop USB */
 1030 	tmp = ioread32(&u3d->op_regs->usbcmd);
 1031 	tmp &= ~MV_U3D_CMD_RUN_STOP;
 1032 	iowrite32(tmp, &u3d->op_regs->usbcmd);
 1033 	dev_dbg(u3d->dev, "after u3d_stop, USBCMD 0x%x\n",
 1034 		ioread32(&u3d->op_regs->usbcmd));
 1035 }
 1036 
 1037 static void mv_u3d_controller_start(struct mv_u3d *u3d)
 1038 {
 1039 	u32 usbintr;
 1040 	u32 temp;
 1041 
 1042 	/* enable link LTSSM state machine */
 1043 	temp = ioread32(&u3d->vuc_regs->ltssm);
 1044 	temp |= MV_U3D_LTSSM_PHY_INIT_DONE;
 1045 	iowrite32(temp, &u3d->vuc_regs->ltssm);
 1046 
 1047 	/* Enable interrupts */
 1048 	usbintr = MV_U3D_INTR_ENABLE_LINK_CHG | MV_U3D_INTR_ENABLE_TXDESC_ERR |
 1049 		MV_U3D_INTR_ENABLE_RXDESC_ERR | MV_U3D_INTR_ENABLE_TX_COMPLETE |
 1050 		MV_U3D_INTR_ENABLE_RX_COMPLETE | MV_U3D_INTR_ENABLE_SETUP |
 1051 		(u3d->vbus_valid_detect ? MV_U3D_INTR_ENABLE_VBUS_VALID : 0);
 1052 	iowrite32(usbintr, &u3d->vuc_regs->intrenable);
 1053 
 1054 	/* Enable ctrl ep */
 1055 	iowrite32(0x1, &u3d->vuc_regs->ctrlepenable);
 1056 
 1057 	/* Set the Run bit in the command register */
 1058 	iowrite32(MV_U3D_CMD_RUN_STOP, &u3d->op_regs->usbcmd);
 1059 	dev_dbg(u3d->dev, "after u3d_start, USBCMD 0x%x\n",
 1060 		ioread32(&u3d->op_regs->usbcmd));
 1061 }
 1062 
 1063 static int mv_u3d_controller_reset(struct mv_u3d *u3d)
 1064 {
 1065 	unsigned int loops;
 1066 	u32 tmp;
 1067 
 1068 	/* Stop the controller */
 1069 	tmp = ioread32(&u3d->op_regs->usbcmd);
 1070 	tmp &= ~MV_U3D_CMD_RUN_STOP;
 1071 	iowrite32(tmp, &u3d->op_regs->usbcmd);
 1072 
 1073 	/* Reset the controller to get default values */
 1074 	iowrite32(MV_U3D_CMD_CTRL_RESET, &u3d->op_regs->usbcmd);
 1075 
 1076 	/* wait for reset to complete */
 1077 	loops = LOOPS(MV_U3D_RESET_TIMEOUT);
 1078 	while (ioread32(&u3d->op_regs->usbcmd) & MV_U3D_CMD_CTRL_RESET) {
 1079 		if (loops == 0) {
 1080 			dev_err(u3d->dev,
 1081 				"Wait for RESET completed TIMEOUT\n");
 1082 			return -ETIMEDOUT;
 1083 		}
 1084 		loops--;
 1085 		udelay(LOOPS_USEC);
 1086 	}
 1087 
 1088 	/* Configure the Endpoint Context Address */
 1089 	iowrite32(u3d->ep_context_dma, &u3d->op_regs->dcbaapl);
 1090 	iowrite32(0, &u3d->op_regs->dcbaaph);
 1091 
 1092 	return 0;
 1093 }
 1094 
 1095 static int mv_u3d_enable(struct mv_u3d *u3d)
 1096 {
 1097 	struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
 1098 	int retval;
 1099 
 1100 	if (u3d->active)
 1101 		return 0;
 1102 
 1103 	if (!u3d->clock_gating) {
 1104 		u3d->active = 1;
 1105 		return 0;
 1106 	}
 1107 
 1108 	dev_dbg(u3d->dev, "enable u3d\n");
 1109 	clk_enable(u3d->clk);
 1110 	if (pdata->phy_init) {
 1111 		retval = pdata->phy_init(u3d->phy_regs);
 1112 		if (retval) {
 1113 			dev_err(u3d->dev,
 1114 				"init phy error %d\n", retval);
 1115 			clk_disable(u3d->clk);
 1116 			return retval;
 1117 		}
 1118 	}
 1119 	u3d->active = 1;
 1120 
 1121 	return 0;
 1122 }
 1123 
 1124 static void mv_u3d_disable(struct mv_u3d *u3d)
 1125 {
 1126 	struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
 1127 	if (u3d->clock_gating && u3d->active) {
 1128 		dev_dbg(u3d->dev, "disable u3d\n");
 1129 		if (pdata->phy_deinit)
 1130 			pdata->phy_deinit(u3d->phy_regs);
 1131 		clk_disable(u3d->clk);
 1132 		u3d->active = 0;
 1133 	}
 1134 }
 1135 
 1136 static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)
 1137 {
 1138 	struct mv_u3d *u3d;
 1139 	unsigned long flags;
 1140 	int retval = 0;
 1141 
 1142 	u3d = container_of(gadget, struct mv_u3d, gadget);
 1143 
 1144 	spin_lock_irqsave(&u3d->lock, flags);
 1145 
 1146 	u3d->vbus_active = (is_active != 0);
 1147 	dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
 1148 		__func__, u3d->softconnect, u3d->vbus_active);
 1149 	/*
 1150 	 * 1. external VBUS detect: we can disable/enable clock on demand.
 1151 	 * 2. UDC VBUS detect: we have to enable clock all the time.
 1152 	 * 3. No VBUS detect: we have to enable clock all the time.
 1153 	 */
 1154 	if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
 1155 		retval = mv_u3d_enable(u3d);
 1156 		if (retval == 0) {
 1157 			/*
 1158 			 * after clock is disabled, we lost all the register
 1159 			 *  context. We have to re-init registers
 1160 			 */
 1161 			mv_u3d_controller_reset(u3d);
 1162 			mv_u3d_ep0_reset(u3d);
 1163 			mv_u3d_controller_start(u3d);
 1164 		}
 1165 	} else if (u3d->driver && u3d->softconnect) {
 1166 		if (!u3d->active)
 1167 			goto out;
 1168 
 1169 		/* stop all the transfer in queue*/
 1170 		mv_u3d_stop_activity(u3d, u3d->driver);
 1171 		mv_u3d_controller_stop(u3d);
 1172 		mv_u3d_disable(u3d);
 1173 	}
 1174 
 1175 out:
 1176 	spin_unlock_irqrestore(&u3d->lock, flags);
 1177 	return retval;
 1178 }
 1179 
 1180 /* constrain controller's VBUS power usage
 1181  * This call is used by gadget drivers during SET_CONFIGURATION calls,
 1182  * reporting how much power the device may consume.  For example, this
 1183  * could affect how quickly batteries are recharged.
 1184  *
 1185  * Returns zero on success, else negative errno.
 1186  */
 1187 static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)
 1188 {
 1189 	struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
 1190 
 1191 	u3d->power = mA;
 1192 
 1193 	return 0;
 1194 }
 1195 
 1196 static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)
 1197 {
 1198 	struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
 1199 	unsigned long flags;
 1200 	int retval = 0;
 1201 
 1202 	spin_lock_irqsave(&u3d->lock, flags);
 1203 
 1204 	dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
 1205 		__func__, u3d->softconnect, u3d->vbus_active);
 1206 	u3d->softconnect = (is_on != 0);
 1207 	if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
 1208 		retval = mv_u3d_enable(u3d);
 1209 		if (retval == 0) {
 1210 			/*
 1211 			 * after clock is disabled, we lost all the register
 1212 			 *  context. We have to re-init registers
 1213 			 */
 1214 			mv_u3d_controller_reset(u3d);
 1215 			mv_u3d_ep0_reset(u3d);
 1216 			mv_u3d_controller_start(u3d);
 1217 		}
 1218 	} else if (u3d->driver && u3d->vbus_active) {
 1219 		/* stop all the transfer in queue*/
 1220 		mv_u3d_stop_activity(u3d, u3d->driver);
 1221 		mv_u3d_controller_stop(u3d);
 1222 		mv_u3d_disable(u3d);
 1223 	}
 1224 
 1225 	spin_unlock_irqrestore(&u3d->lock, flags);
 1226 
 1227 	return retval;
 1228 }
 1229 
 1230 static int mv_u3d_start(struct usb_gadget *g,
 1231 		struct usb_gadget_driver *driver)
 1232 {
 1233 	struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
 1234 	struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
 1235 	unsigned long flags;
 1236 
 1237 	if (u3d->driver)
 1238 		return -EBUSY;
 1239 
 1240 	spin_lock_irqsave(&u3d->lock, flags);
 1241 
 1242 	if (!u3d->clock_gating) {
 1243 		clk_enable(u3d->clk);
 1244 		if (pdata->phy_init)
 1245 			pdata->phy_init(u3d->phy_regs);
 1246 	}
 1247 
 1248 	/* hook up the driver ... */
 1249 	driver->driver.bus = NULL;
 1250 	u3d->driver = driver;
 1251 
 1252 	u3d->ep0_dir = USB_DIR_OUT;
 1253 
 1254 	spin_unlock_irqrestore(&u3d->lock, flags);
 1255 
 1256 	u3d->vbus_valid_detect = 1;
 1257 
 1258 	return 0;
 1259 }
 1260 
 1261 static int mv_u3d_stop(struct usb_gadget *g)
 1262 {
 1263 	struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
 1264 	struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
 1265 	unsigned long flags;
 1266 
 1267 	u3d->vbus_valid_detect = 0;
 1268 	spin_lock_irqsave(&u3d->lock, flags);
 1269 
 1270 	/* enable clock to access controller register */
 1271 	clk_enable(u3d->clk);
 1272 	if (pdata->phy_init)
 1273 		pdata->phy_init(u3d->phy_regs);
 1274 
 1275 	mv_u3d_controller_stop(u3d);
 1276 	/* stop all usb activities */
 1277 	u3d->gadget.speed = USB_SPEED_UNKNOWN;
 1278 	mv_u3d_stop_activity(u3d, NULL);
 1279 	mv_u3d_disable(u3d);
 1280 
 1281 	if (pdata->phy_deinit)
 1282 		pdata->phy_deinit(u3d->phy_regs);
 1283 	clk_disable(u3d->clk);
 1284 
 1285 	spin_unlock_irqrestore(&u3d->lock, flags);
 1286 
 1287 	u3d->driver = NULL;
 1288 
 1289 	return 0;
 1290 }
 1291 
 1292 /* device controller usb_gadget_ops structure */
 1293 static const struct usb_gadget_ops mv_u3d_ops = {
 1294 	/* notify controller that VBUS is powered or not */
 1295 	.vbus_session	= mv_u3d_vbus_session,
 1296 
 1297 	/* constrain controller's VBUS power usage */
 1298 	.vbus_draw	= mv_u3d_vbus_draw,
 1299 
 1300 	.pullup		= mv_u3d_pullup,
 1301 	.udc_start	= mv_u3d_start,
 1302 	.udc_stop	= mv_u3d_stop,
 1303 };
 1304 
 1305 static int mv_u3d_eps_init(struct mv_u3d *u3d)
 1306 {
 1307 	struct mv_u3d_ep	*ep;
 1308 	char name[14];
 1309 	int i;
 1310 
 1311 	/* initialize ep0, ep0 in/out use eps[1] */
 1312 	ep = &u3d->eps[1];
 1313 	ep->u3d = u3d;
 1314 	strncpy(ep->name, "ep0", sizeof(ep->name));
 1315 	ep->ep.name = ep->name;
 1316 	ep->ep.ops = &mv_u3d_ep_ops;
 1317 	ep->wedge = 0;
 1318 	usb_ep_set_maxpacket_limit(&ep->ep, MV_U3D_EP0_MAX_PKT_SIZE);
 1319 	ep->ep.caps.type_control = true;
 1320 	ep->ep.caps.dir_in = true;
 1321 	ep->ep.caps.dir_out = true;
 1322 	ep->ep_num = 0;
 1323 	ep->ep.desc = &mv_u3d_ep0_desc;
 1324 	INIT_LIST_HEAD(&ep->queue);
 1325 	INIT_LIST_HEAD(&ep->req_list);
 1326 	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
 1327 
 1328 	/* add ep0 ep_context */
 1329 	ep->ep_context = &u3d->ep_context[1];
 1330 
 1331 	/* initialize other endpoints */
 1332 	for (i = 2; i < u3d->max_eps * 2; i++) {
 1333 		ep = &u3d->eps[i];
 1334 		if (i & 1) {
 1335 			snprintf(name, sizeof(name), "ep%din", i >> 1);
 1336 			ep->direction = MV_U3D_EP_DIR_IN;
 1337 			ep->ep.caps.dir_in = true;
 1338 		} else {
 1339 			snprintf(name, sizeof(name), "ep%dout", i >> 1);
 1340 			ep->direction = MV_U3D_EP_DIR_OUT;
 1341 			ep->ep.caps.dir_out = true;
 1342 		}
 1343 		ep->u3d = u3d;
 1344 		strncpy(ep->name, name, sizeof(ep->name));
 1345 		ep->ep.name = ep->name;
 1346 
 1347 		ep->ep.caps.type_iso = true;
 1348 		ep->ep.caps.type_bulk = true;
 1349 		ep->ep.caps.type_int = true;
 1350 
 1351 		ep->ep.ops = &mv_u3d_ep_ops;
 1352 		usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
 1353 		ep->ep_num = i / 2;
 1354 
 1355 		INIT_LIST_HEAD(&ep->queue);
 1356 		list_add_tail(&ep->ep.ep_list, &u3d->gadget.ep_list);
 1357 
 1358 		INIT_LIST_HEAD(&ep->req_list);
 1359 		spin_lock_init(&ep->req_lock);
 1360 		ep->ep_context = &u3d->ep_context[i];
 1361 	}
 1362 
 1363 	return 0;
 1364 }
 1365 
 1366 /* delete all endpoint requests, called with spinlock held */
 1367 static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status)
 1368 {
 1369 	/* endpoint fifo flush */
 1370 	mv_u3d_ep_fifo_flush(&ep->ep);
 1371 
 1372 	while (!list_empty(&ep->queue)) {
 1373 		struct mv_u3d_req *req = NULL;
 1374 		req = list_entry(ep->queue.next, struct mv_u3d_req, queue);
 1375 		mv_u3d_done(ep, req, status);
 1376 	}
 1377 }
 1378 
 1379 /* stop all USB activities */
 1380 static
 1381 void mv_u3d_stop_activity(struct mv_u3d *u3d, struct usb_gadget_driver *driver)
 1382 {
 1383 	struct mv_u3d_ep	*ep;
 1384 
 1385 	mv_u3d_nuke(&u3d->eps[1], -ESHUTDOWN);
 1386 
 1387 	list_for_each_entry(ep, &u3d->gadget.ep_list, ep.ep_list) {
 1388 		mv_u3d_nuke(ep, -ESHUTDOWN);
 1389 	}
 1390 
 1391 	/* report disconnect; the driver is already quiesced */
 1392 	if (driver) {
 1393 		spin_unlock(&u3d->lock);
 1394 		driver->disconnect(&u3d->gadget);
 1395 		spin_lock(&u3d->lock);
 1396 	}
 1397 }
 1398 
 1399 static void mv_u3d_irq_process_error(struct mv_u3d *u3d)
 1400 {
 1401 	/* Increment the error count */
 1402 	u3d->errors++;
 1403 	dev_err(u3d->dev, "%s\n", __func__);
 1404 }
 1405 
 1406 static void mv_u3d_irq_process_link_change(struct mv_u3d *u3d)
 1407 {
 1408 	u32 linkchange;
 1409 
 1410 	linkchange = ioread32(&u3d->vuc_regs->linkchange);
 1411 	iowrite32(linkchange, &u3d->vuc_regs->linkchange);
 1412 
 1413 	dev_dbg(u3d->dev, "linkchange: 0x%x\n", linkchange);
 1414 
 1415 	if (linkchange & MV_U3D_LINK_CHANGE_LINK_UP) {
 1416 		dev_dbg(u3d->dev, "link up: ltssm state: 0x%x\n",
 1417 			ioread32(&u3d->vuc_regs->ltssmstate));
 1418 
 1419 		u3d->usb_state = USB_STATE_DEFAULT;
 1420 		u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
 1421 		u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
 1422 
 1423 		/* set speed */
 1424 		u3d->gadget.speed = USB_SPEED_SUPER;
 1425 	}
 1426 
 1427 	if (linkchange & MV_U3D_LINK_CHANGE_SUSPEND) {
 1428 		dev_dbg(u3d->dev, "link suspend\n");
 1429 		u3d->resume_state = u3d->usb_state;
 1430 		u3d->usb_state = USB_STATE_SUSPENDED;
 1431 	}
 1432 
 1433 	if (linkchange & MV_U3D_LINK_CHANGE_RESUME) {
 1434 		dev_dbg(u3d->dev, "link resume\n");
 1435 		u3d->usb_state = u3d->resume_state;
 1436 		u3d->resume_state = 0;
 1437 	}
 1438 
 1439 	if (linkchange & MV_U3D_LINK_CHANGE_WRESET) {
 1440 		dev_dbg(u3d->dev, "warm reset\n");
 1441 		u3d->usb_state = USB_STATE_POWERED;
 1442 	}
 1443 
 1444 	if (linkchange & MV_U3D_LINK_CHANGE_HRESET) {
 1445 		dev_dbg(u3d->dev, "hot reset\n");
 1446 		u3d->usb_state = USB_STATE_DEFAULT;
 1447 	}
 1448 
 1449 	if (linkchange & MV_U3D_LINK_CHANGE_INACT)
 1450 		dev_dbg(u3d->dev, "inactive\n");
 1451 
 1452 	if (linkchange & MV_U3D_LINK_CHANGE_DISABLE_AFTER_U0)
 1453 		dev_dbg(u3d->dev, "ss.disabled\n");
 1454 
 1455 	if (linkchange & MV_U3D_LINK_CHANGE_VBUS_INVALID) {
 1456 		dev_dbg(u3d->dev, "vbus invalid\n");
 1457 		u3d->usb_state = USB_STATE_ATTACHED;
 1458 		u3d->vbus_valid_detect = 1;
 1459 		/* if external vbus detect is not supported,
 1460 		 * we handle it here.
 1461 		 */
 1462 		if (!u3d->vbus) {
 1463 			spin_unlock(&u3d->lock);
 1464 			mv_u3d_vbus_session(&u3d->gadget, 0);
 1465 			spin_lock(&u3d->lock);
 1466 		}
 1467 	}
 1468 }
 1469 
 1470 static void mv_u3d_ch9setaddress(struct mv_u3d *u3d,
 1471 				struct usb_ctrlrequest *setup)
 1472 {
 1473 	u32 tmp;
 1474 
 1475 	if (u3d->usb_state != USB_STATE_DEFAULT) {
 1476 		dev_err(u3d->dev,
 1477 			"%s, cannot setaddr in this state (%d)\n",
 1478 			__func__, u3d->usb_state);
 1479 		goto err;
 1480 	}
 1481 
 1482 	u3d->dev_addr = (u8)setup->wValue;
 1483 
 1484 	dev_dbg(u3d->dev, "%s: 0x%x\n", __func__, u3d->dev_addr);
 1485 
 1486 	if (u3d->dev_addr > 127) {
 1487 		dev_err(u3d->dev,
 1488 			"%s, u3d address is wrong (out of range)\n", __func__);
 1489 		u3d->dev_addr = 0;
 1490 		goto err;
 1491 	}
 1492 
 1493 	/* update usb state */
 1494 	u3d->usb_state = USB_STATE_ADDRESS;
 1495 
 1496 	/* set the new address */
 1497 	tmp = ioread32(&u3d->vuc_regs->devaddrtiebrkr);
 1498 	tmp &= ~0x7F;
 1499 	tmp |= (u32)u3d->dev_addr;
 1500 	iowrite32(tmp, &u3d->vuc_regs->devaddrtiebrkr);
 1501 
 1502 	return;
 1503 err:
 1504 	mv_u3d_ep0_stall(u3d);
 1505 }
 1506 
 1507 static int mv_u3d_is_set_configuration(struct usb_ctrlrequest *setup)
 1508 {
 1509 	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
 1510 		if (setup->bRequest == USB_REQ_SET_CONFIGURATION)
 1511 			return 1;
 1512 
 1513 	return 0;
 1514 }
 1515 
 1516 static void mv_u3d_handle_setup_packet(struct mv_u3d *u3d, u8 ep_num,
 1517 	struct usb_ctrlrequest *setup)
 1518 	__releases(&u3c->lock)
 1519 	__acquires(&u3c->lock)
 1520 {
 1521 	bool delegate = false;
 1522 
 1523 	mv_u3d_nuke(&u3d->eps[ep_num * 2 + MV_U3D_EP_DIR_IN], -ESHUTDOWN);
 1524 
 1525 	dev_dbg(u3d->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
 1526 			setup->bRequestType, setup->bRequest,
 1527 			setup->wValue, setup->wIndex, setup->wLength);
 1528 
 1529 	/* We process some stardard setup requests here */
 1530 	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
 1531 		switch (setup->bRequest) {
 1532 		case USB_REQ_GET_STATUS:
 1533 			delegate = true;
 1534 			break;
 1535 
 1536 		case USB_REQ_SET_ADDRESS:
 1537 			mv_u3d_ch9setaddress(u3d, setup);
 1538 			break;
 1539 
 1540 		case USB_REQ_CLEAR_FEATURE:
 1541 			delegate = true;
 1542 			break;
 1543 
 1544 		case USB_REQ_SET_FEATURE:
 1545 			delegate = true;
 1546 			break;
 1547 
 1548 		default:
 1549 			delegate = true;
 1550 		}
 1551 	} else
 1552 		delegate = true;
 1553 
 1554 	/* delegate USB standard requests to the gadget driver */
 1555 	if (delegate == true) {
 1556 		/* USB requests handled by gadget */
 1557 		if (setup->wLength) {
 1558 			/* DATA phase from gadget, STATUS phase from u3d */
 1559 			u3d->ep0_dir = (setup->bRequestType & USB_DIR_IN)
 1560 					? MV_U3D_EP_DIR_IN : MV_U3D_EP_DIR_OUT;
 1561 			spin_unlock(&u3d->lock);
 1562 			if (u3d->driver->setup(&u3d->gadget,
 1563 				&u3d->local_setup_buff) < 0) {
 1564 				dev_err(u3d->dev, "setup error!\n");
 1565 				mv_u3d_ep0_stall(u3d);
 1566 			}
 1567 			spin_lock(&u3d->lock);
 1568 		} else {
 1569 			/* no DATA phase, STATUS phase from gadget */
 1570 			u3d->ep0_dir = MV_U3D_EP_DIR_IN;
 1571 			u3d->ep0_state = MV_U3D_STATUS_STAGE;
 1572 			spin_unlock(&u3d->lock);
 1573 			if (u3d->driver->setup(&u3d->gadget,
 1574 				&u3d->local_setup_buff) < 0)
 1575 				mv_u3d_ep0_stall(u3d);
 1576 			spin_lock(&u3d->lock);
 1577 		}
 1578 
 1579 		if (mv_u3d_is_set_configuration(setup)) {
 1580 			dev_dbg(u3d->dev, "u3d configured\n");
 1581 			u3d->usb_state = USB_STATE_CONFIGURED;
 1582 		}
 1583 	}
 1584 }
 1585 
 1586 static void mv_u3d_get_setup_data(struct mv_u3d *u3d, u8 ep_num, u8 *buffer_ptr)
 1587 {
 1588 	struct mv_u3d_ep_context *epcontext;
 1589 
 1590 	epcontext = &u3d->ep_context[ep_num * 2 + MV_U3D_EP_DIR_IN];
 1591 
 1592 	/* Copy the setup packet to local buffer */
 1593 	memcpy(buffer_ptr, (u8 *) &epcontext->setup_buffer, 8);
 1594 }
 1595 
 1596 static void mv_u3d_irq_process_setup(struct mv_u3d *u3d)
 1597 {
 1598 	u32 tmp, i;
 1599 	/* Process all Setup packet received interrupts */
 1600 	tmp = ioread32(&u3d->vuc_regs->setuplock);
 1601 	if (tmp) {
 1602 		for (i = 0; i < u3d->max_eps; i++) {
 1603 			if (tmp & (1 << i)) {
 1604 				mv_u3d_get_setup_data(u3d, i,
 1605 					(u8 *)(&u3d->local_setup_buff));
 1606 				mv_u3d_handle_setup_packet(u3d, i,
 1607 					&u3d->local_setup_buff);
 1608 			}
 1609 		}
 1610 	}
 1611 
 1612 	iowrite32(tmp, &u3d->vuc_regs->setuplock);
 1613 }
 1614 
 1615 static void mv_u3d_irq_process_tr_complete(struct mv_u3d *u3d)
 1616 {
 1617 	u32 tmp, bit_pos;
 1618 	int i, ep_num = 0, direction = 0;
 1619 	struct mv_u3d_ep	*curr_ep;
 1620 	struct mv_u3d_req *curr_req, *temp_req;
 1621 	int status;
 1622 
 1623 	tmp = ioread32(&u3d->vuc_regs->endcomplete);
 1624 
 1625 	dev_dbg(u3d->dev, "tr_complete: ep: 0x%x\n", tmp);
 1626 	if (!tmp)
 1627 		return;
 1628 	iowrite32(tmp, &u3d->vuc_regs->endcomplete);
 1629 
 1630 	for (i = 0; i < u3d->max_eps * 2; i++) {
 1631 		ep_num = i >> 1;
 1632 		direction = i % 2;
 1633 
 1634 		bit_pos = 1 << (ep_num + 16 * direction);
 1635 
 1636 		if (!(bit_pos & tmp))
 1637 			continue;
 1638 
 1639 		if (i == 0)
 1640 			curr_ep = &u3d->eps[1];
 1641 		else
 1642 			curr_ep = &u3d->eps[i];
 1643 
 1644 		/* remove req out of ep request list after completion */
 1645 		dev_dbg(u3d->dev, "tr comp: check req_list\n");
 1646 		spin_lock(&curr_ep->req_lock);
 1647 		if (!list_empty(&curr_ep->req_list)) {
 1648 			struct mv_u3d_req *req;
 1649 			req = list_entry(curr_ep->req_list.next,
 1650 						struct mv_u3d_req, list);
 1651 			list_del_init(&req->list);
 1652 			curr_ep->processing = 0;
 1653 		}
 1654 		spin_unlock(&curr_ep->req_lock);
 1655 
 1656 		/* process the req queue until an uncomplete request */
 1657 		list_for_each_entry_safe(curr_req, temp_req,
 1658 			&curr_ep->queue, queue) {
 1659 			status = mv_u3d_process_ep_req(u3d, i, curr_req);
 1660 			if (status)
 1661 				break;
 1662 			/* write back status to req */
 1663 			curr_req->req.status = status;
 1664 
 1665 			/* ep0 request completion */
 1666 			if (ep_num == 0) {
 1667 				mv_u3d_done(curr_ep, curr_req, 0);
 1668 				break;
 1669 			} else {
 1670 				mv_u3d_done(curr_ep, curr_req, status);
 1671 			}
 1672 		}
 1673 
 1674 		dev_dbg(u3d->dev, "call mv_u3d_start_queue from ep complete\n");
 1675 		mv_u3d_start_queue(curr_ep);
 1676 	}
 1677 }
 1678 
 1679 static irqreturn_t mv_u3d_irq(int irq, void *dev)
 1680 {
 1681 	struct mv_u3d *u3d = (struct mv_u3d *)dev;
 1682 	u32 status, intr;
 1683 	u32 bridgesetting;
 1684 	u32 trbunderrun;
 1685 
 1686 	spin_lock(&u3d->lock);
 1687 
 1688 	status = ioread32(&u3d->vuc_regs->intrcause);
 1689 	intr = ioread32(&u3d->vuc_regs->intrenable);
 1690 	status &= intr;
 1691 
 1692 	if (status == 0) {
 1693 		spin_unlock(&u3d->lock);
 1694 		dev_err(u3d->dev, "irq error!\n");
 1695 		return IRQ_NONE;
 1696 	}
 1697 
 1698 	if (status & MV_U3D_USBINT_VBUS_VALID) {
 1699 		bridgesetting = ioread32(&u3d->vuc_regs->bridgesetting);
 1700 		if (bridgesetting & MV_U3D_BRIDGE_SETTING_VBUS_VALID) {
 1701 			/* write vbus valid bit of bridge setting to clear */
 1702 			bridgesetting = MV_U3D_BRIDGE_SETTING_VBUS_VALID;
 1703 			iowrite32(bridgesetting, &u3d->vuc_regs->bridgesetting);
 1704 			dev_dbg(u3d->dev, "vbus valid\n");
 1705 
 1706 			u3d->usb_state = USB_STATE_POWERED;
 1707 			u3d->vbus_valid_detect = 0;
 1708 			/* if external vbus detect is not supported,
 1709 			 * we handle it here.
 1710 			 */
 1711 			if (!u3d->vbus) {
 1712 				spin_unlock(&u3d->lock);
 1713 				mv_u3d_vbus_session(&u3d->gadget, 1);
 1714 				spin_lock(&u3d->lock);
 1715 			}
 1716 		} else
 1717 			dev_err(u3d->dev, "vbus bit is not set\n");
 1718 	}
 1719 
 1720 	/* RX data is already in the 16KB FIFO.*/
 1721 	if (status & MV_U3D_USBINT_UNDER_RUN) {
 1722 		trbunderrun = ioread32(&u3d->vuc_regs->trbunderrun);
 1723 		dev_err(u3d->dev, "under run, ep%d\n", trbunderrun);
 1724 		iowrite32(trbunderrun, &u3d->vuc_regs->trbunderrun);
 1725 		mv_u3d_irq_process_error(u3d);
 1726 	}
 1727 
 1728 	if (status & (MV_U3D_USBINT_RXDESC_ERR | MV_U3D_USBINT_TXDESC_ERR)) {
 1729 		/* write one to clear */
 1730 		iowrite32(status & (MV_U3D_USBINT_RXDESC_ERR
 1731 			| MV_U3D_USBINT_TXDESC_ERR),
 1732 			&u3d->vuc_regs->intrcause);
 1733 		dev_err(u3d->dev, "desc err 0x%x\n", status);
 1734 		mv_u3d_irq_process_error(u3d);
 1735 	}
 1736 
 1737 	if (status & MV_U3D_USBINT_LINK_CHG)
 1738 		mv_u3d_irq_process_link_change(u3d);
 1739 
 1740 	if (status & MV_U3D_USBINT_TX_COMPLETE)
 1741 		mv_u3d_irq_process_tr_complete(u3d);
 1742 
 1743 	if (status & MV_U3D_USBINT_RX_COMPLETE)
 1744 		mv_u3d_irq_process_tr_complete(u3d);
 1745 
 1746 	if (status & MV_U3D_USBINT_SETUP)
 1747 		mv_u3d_irq_process_setup(u3d);
 1748 
 1749 	spin_unlock(&u3d->lock);
 1750 	return IRQ_HANDLED;
 1751 }
 1752 
 1753 static int mv_u3d_remove(struct platform_device *dev)
 1754 {
 1755 	struct mv_u3d *u3d = platform_get_drvdata(dev);
 1756 
 1757 	BUG_ON(u3d == NULL);
 1758 
 1759 	usb_del_gadget_udc(&u3d->gadget);
 1760 
 1761 	/* free memory allocated in probe */
 1762 	dma_pool_destroy(u3d->trb_pool);
 1763 
 1764 	if (u3d->ep_context)
 1765 		dma_free_coherent(&dev->dev, u3d->ep_context_size,
 1766 			u3d->ep_context, u3d->ep_context_dma);
 1767 
 1768 	kfree(u3d->eps);
 1769 
 1770 	if (u3d->irq)
 1771 		free_irq(u3d->irq, u3d);
 1772 
 1773 	if (u3d->cap_regs)
 1774 		iounmap(u3d->cap_regs);
 1775 	u3d->cap_regs = NULL;
 1776 
 1777 	kfree(u3d->status_req);
 1778 
 1779 	clk_put(u3d->clk);
 1780 
 1781 	kfree(u3d);
 1782 
 1783 	return 0;
 1784 }
 1785 
 1786 static int mv_u3d_probe(struct platform_device *dev)
 1787 {
 1788 	struct mv_u3d *u3d = NULL;
 1789 	struct mv_usb_platform_data *pdata = dev_get_platdata(&dev->dev);
 1790 	int retval = 0;
 1791 	struct resource *r;
 1792 	size_t size;
 1793 
 1794 	if (!dev_get_platdata(&dev->dev)) {
 1795 		dev_err(&dev->dev, "missing platform_data\n");
 1796 		retval = -ENODEV;
 1797 		goto err_pdata;
 1798 	}
 1799 
 1800 	u3d = kzalloc(sizeof(*u3d), GFP_KERNEL);
 1801 	if (!u3d) {
 1802 		retval = -ENOMEM;
 1803 		goto err_alloc_private;
 1804 	}
 1805 
 1806 	spin_lock_init(&u3d->lock);
 1807 
 1808 	platform_set_drvdata(dev, u3d);
 1809 
 1810 	u3d->dev = &dev->dev;
 1811 	u3d->vbus = pdata->vbus;
 1812 
 1813 	u3d->clk = clk_get(&dev->dev, NULL);
 1814 	if (IS_ERR(u3d->clk)) {
 1815 		retval = PTR_ERR(u3d->clk);
 1816 		goto err_get_clk;
 1817 	}
 1818 
 1819 	r = platform_get_resource_byname(dev, IORESOURCE_MEM, "capregs");
 1820 	if (!r) {
 1821 		dev_err(&dev->dev, "no I/O memory resource defined\n");
 1822 		retval = -ENODEV;
 1823 		goto err_get_cap_regs;
 1824 	}
 1825 
 1826 	u3d->cap_regs = (struct mv_u3d_cap_regs __iomem *)
 1827 		ioremap(r->start, resource_size(r));
 1828 	if (!u3d->cap_regs) {
 1829 		dev_err(&dev->dev, "failed to map I/O memory\n");
 1830 		retval = -EBUSY;
 1831 		goto err_map_cap_regs;
 1832 	} else {
 1833 		dev_dbg(&dev->dev, "cap_regs address: 0x%lx/0x%lx\n",
 1834 			(unsigned long) r->start,
 1835 			(unsigned long) u3d->cap_regs);
 1836 	}
 1837 
 1838 	/* we will access controller register, so enable the u3d controller */
 1839 	clk_enable(u3d->clk);
 1840 
 1841 	if (pdata->phy_init) {
 1842 		retval = pdata->phy_init(u3d->phy_regs);
 1843 		if (retval) {
 1844 			dev_err(&dev->dev, "init phy error %d\n", retval);
 1845 			goto err_u3d_enable;
 1846 		}
 1847 	}
 1848 
 1849 	u3d->op_regs = (struct mv_u3d_op_regs __iomem *)(u3d->cap_regs
 1850 		+ MV_U3D_USB3_OP_REGS_OFFSET);
 1851 
 1852 	u3d->vuc_regs = (struct mv_u3d_vuc_regs __iomem *)(u3d->cap_regs
 1853 		+ ioread32(&u3d->cap_regs->vuoff));
 1854 
 1855 	u3d->max_eps = 16;
 1856 
 1857 	/*
 1858 	 * some platform will use usb to download image, it may not disconnect
 1859 	 * usb gadget before loading kernel. So first stop u3d here.
 1860 	 */
 1861 	mv_u3d_controller_stop(u3d);
 1862 	iowrite32(0xFFFFFFFF, &u3d->vuc_regs->intrcause);
 1863 
 1864 	if (pdata->phy_deinit)
 1865 		pdata->phy_deinit(u3d->phy_regs);
 1866 	clk_disable(u3d->clk);
 1867 
 1868 	size = u3d->max_eps * sizeof(struct mv_u3d_ep_context) * 2;
 1869 	size = (size + MV_U3D_EP_CONTEXT_ALIGNMENT - 1)
 1870 		& ~(MV_U3D_EP_CONTEXT_ALIGNMENT - 1);
 1871 	u3d->ep_context = dma_alloc_coherent(&dev->dev, size,
 1872 					&u3d->ep_context_dma, GFP_KERNEL);
 1873 	if (!u3d->ep_context) {
 1874 		dev_err(&dev->dev, "allocate ep context memory failed\n");
 1875 		retval = -ENOMEM;
 1876 		goto err_alloc_ep_context;
 1877 	}
 1878 	u3d->ep_context_size = size;
 1879 
 1880 	/* create TRB dma_pool resource */
 1881 	u3d->trb_pool = dma_pool_create("u3d_trb",
 1882 			&dev->dev,
 1883 			sizeof(struct mv_u3d_trb_hw),
 1884 			MV_U3D_TRB_ALIGNMENT,
 1885 			MV_U3D_DMA_BOUNDARY);
 1886 
 1887 	if (!u3d->trb_pool) {
 1888 		retval = -ENOMEM;
 1889 		goto err_alloc_trb_pool;
 1890 	}
 1891 
 1892 	size = u3d->max_eps * sizeof(struct mv_u3d_ep) * 2;
 1893 	u3d->eps = kzalloc(size, GFP_KERNEL);
 1894 	if (!u3d->eps) {
 1895 		retval = -ENOMEM;
 1896 		goto err_alloc_eps;
 1897 	}
 1898 
 1899 	/* initialize ep0 status request structure */
 1900 	u3d->status_req = kzalloc(sizeof(struct mv_u3d_req) + 8, GFP_KERNEL);
 1901 	if (!u3d->status_req) {
 1902 		retval = -ENOMEM;
 1903 		goto err_alloc_status_req;
 1904 	}
 1905 	INIT_LIST_HEAD(&u3d->status_req->queue);
 1906 
 1907 	/* allocate a small amount of memory to get valid address */
 1908 	u3d->status_req->req.buf = (char *)u3d->status_req
 1909 					+ sizeof(struct mv_u3d_req);
 1910 	u3d->status_req->req.dma = virt_to_phys(u3d->status_req->req.buf);
 1911 
 1912 	u3d->resume_state = USB_STATE_NOTATTACHED;
 1913 	u3d->usb_state = USB_STATE_ATTACHED;
 1914 	u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
 1915 	u3d->remote_wakeup = 0;
 1916 
 1917 	r = platform_get_resource(dev, IORESOURCE_IRQ, 0);
 1918 	if (!r) {
 1919 		dev_err(&dev->dev, "no IRQ resource defined\n");
 1920 		retval = -ENODEV;
 1921 		goto err_get_irq;
 1922 	}
 1923 	u3d->irq = r->start;
 1924 	if (request_irq(u3d->irq, mv_u3d_irq,
 1925 		IRQF_SHARED, driver_name, u3d)) {
 1926 		u3d->irq = 0;
 1927 		dev_err(&dev->dev, "Request irq %d for u3d failed\n",
 1928 			u3d->irq);
 1929 		retval = -ENODEV;
 1930 		goto err_request_irq;
 1931 	}
 1932 
 1933 	/* initialize gadget structure */
 1934 	u3d->gadget.ops = &mv_u3d_ops;	/* usb_gadget_ops */
 1935 	u3d->gadget.ep0 = &u3d->eps[1].ep;	/* gadget ep0 */
 1936 	INIT_LIST_HEAD(&u3d->gadget.ep_list);	/* ep_list */
 1937 	u3d->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
 1938 
 1939 	/* the "gadget" abstracts/virtualizes the controller */
 1940 	u3d->gadget.name = driver_name;		/* gadget name */
 1941 
 1942 	mv_u3d_eps_init(u3d);
 1943 
 1944 	/* external vbus detection */
 1945 	if (u3d->vbus) {
 1946 		u3d->clock_gating = 1;
 1947 		dev_err(&dev->dev, "external vbus detection\n");
 1948 	}
 1949 
 1950 	if (!u3d->clock_gating)
 1951 		u3d->vbus_active = 1;
 1952 
 1953 	/* enable usb3 controller vbus detection */
 1954 	u3d->vbus_valid_detect = 1;
 1955 
 1956 	retval = usb_add_gadget_udc(&dev->dev, &u3d->gadget);
 1957 	if (retval)
 1958 		goto err_unregister;
 1959 
 1960 	dev_dbg(&dev->dev, "successful probe usb3 device %s clock gating.\n",
 1961 		u3d->clock_gating ? "with" : "without");
 1962 
 1963 	return 0;
 1964 
 1965 err_unregister:
 1966 	free_irq(u3d->irq, u3d);
 1967 err_request_irq:
 1968 err_get_irq:
 1969 	kfree(u3d->status_req);
 1970 err_alloc_status_req:
 1971 	kfree(u3d->eps);
 1972 err_alloc_eps:
 1973 	dma_pool_destroy(u3d->trb_pool);
 1974 err_alloc_trb_pool:
 1975 	dma_free_coherent(&dev->dev, u3d->ep_context_size,
 1976 		u3d->ep_context, u3d->ep_context_dma);
 1977 err_alloc_ep_context:
 1978 	if (pdata->phy_deinit)
 1979 		pdata->phy_deinit(u3d->phy_regs);
 1980 	clk_disable(u3d->clk);
 1981 err_u3d_enable:
 1982 	iounmap(u3d->cap_regs);
 1983 err_map_cap_regs:
 1984 err_get_cap_regs:
 1985 err_get_clk:
 1986 	clk_put(u3d->clk);
 1987 	kfree(u3d);
 1988 err_alloc_private:
 1989 err_pdata:
 1990 	return retval;
 1991 }
 1992 
 1993 #ifdef CONFIG_PM_SLEEP
 1994 static int mv_u3d_suspend(struct device *dev)
 1995 {
 1996 	struct mv_u3d *u3d = dev_get_drvdata(dev);
 1997 
 1998 	/*
 1999 	 * only cable is unplugged, usb can suspend.
 2000 	 * So do not care about clock_gating == 1, it is handled by
 2001 	 * vbus session.
 2002 	 */
 2003 	if (!u3d->clock_gating) {
 2004 		mv_u3d_controller_stop(u3d);
 2005 
 2006 		spin_lock_irq(&u3d->lock);
 2007 		/* stop all usb activities */
 2008 		mv_u3d_stop_activity(u3d, u3d->driver);
 2009 		spin_unlock_irq(&u3d->lock);
 2010 
 2011 		mv_u3d_disable(u3d);
 2012 	}
 2013 
 2014 	return 0;
 2015 }
 2016 
 2017 static int mv_u3d_resume(struct device *dev)
 2018 {
 2019 	struct mv_u3d *u3d = dev_get_drvdata(dev);
 2020 	int retval;
 2021 
 2022 	if (!u3d->clock_gating) {
 2023 		retval = mv_u3d_enable(u3d);
 2024 		if (retval)
 2025 			return retval;
 2026 
 2027 		if (u3d->driver && u3d->softconnect) {
 2028 			mv_u3d_controller_reset(u3d);
 2029 			mv_u3d_ep0_reset(u3d);
 2030 			mv_u3d_controller_start(u3d);
 2031 		}
 2032 	}
 2033 
 2034 	return 0;
 2035 }
 2036 #endif
 2037 
 2038 static SIMPLE_DEV_PM_OPS(mv_u3d_pm_ops, mv_u3d_suspend, mv_u3d_resume);
 2039 
 2040 static void mv_u3d_shutdown(struct platform_device *dev)
 2041 {
 2042 	struct mv_u3d *u3d = platform_get_drvdata(dev);
 2043 	u32 tmp;
 2044 
 2045 	tmp = ioread32(&u3d->op_regs->usbcmd);
 2046 	tmp &= ~MV_U3D_CMD_RUN_STOP;
 2047 	iowrite32(tmp, &u3d->op_regs->usbcmd);
 2048 }
 2049 
 2050 static struct platform_driver mv_u3d_driver = {
 2051 	.probe		= mv_u3d_probe,
 2052 	.remove		= mv_u3d_remove,
 2053 	.shutdown	= mv_u3d_shutdown,
 2054 	.driver		= {
 2055 		.name	= "mv-u3d",
 2056 		.pm	= &mv_u3d_pm_ops,
 2057 	},
 2058 };
 2059 
 2060 module_platform_driver(mv_u3d_driver);
 2061 MODULE_ALIAS("platform:mv-u3d");
 2062 MODULE_DESCRIPTION(DRIVER_DESC);
 2063 MODULE_AUTHOR("Yu Xu <yuxu@marvell.com>");
 2064 MODULE_LICENSE("GPL");
 2065 
 2066 
 2067 
 2068 
 2069 
 2070 /* LDV_COMMENT_BEGIN_MAIN */
 2071 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
 2072 
 2073 /*###########################################################################*/
 2074 
 2075 /*############## Driver Environment Generator 0.2 output ####################*/
 2076 
 2077 /*###########################################################################*/
 2078 
 2079 
 2080 
 2081 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
 2082 void ldv_check_final_state(void);
 2083 
 2084 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
 2085 void ldv_check_return_value(int res);
 2086 
 2087 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
 2088 void ldv_check_return_value_probe(int res);
 2089 
 2090 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
 2091 void ldv_initialize(void);
 2092 
 2093 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
 2094 void ldv_handler_precall(void);
 2095 
 2096 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
 2097 int nondet_int(void);
 2098 
 2099 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
 2100 int LDV_IN_INTERRUPT;
 2101 
 2102 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
 2103 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
 2104 
 2105 
 2106 
 2107 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
 2108 	/*============================= VARIABLE DECLARATION PART   =============================*/
 2109 	/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2110 	/* content: static int mv_u3d_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)*/
 2111 	/* LDV_COMMENT_BEGIN_PREP */
 2112 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2113 	/* LDV_COMMENT_END_PREP */
 2114 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_ep_enable" */
 2115 	struct usb_ep * var_group1;
 2116 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_ep_enable" */
 2117 	const struct usb_endpoint_descriptor * var_mv_u3d_ep_enable_8_p1;
 2118 	/* LDV_COMMENT_BEGIN_PREP */
 2119 	#ifdef CONFIG_PM_SLEEP
 2120 	#endif
 2121 	/* LDV_COMMENT_END_PREP */
 2122 	/* content: static int mv_u3d_ep_disable(struct usb_ep *_ep)*/
 2123 	/* LDV_COMMENT_BEGIN_PREP */
 2124 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2125 	/* LDV_COMMENT_END_PREP */
 2126 	/* LDV_COMMENT_BEGIN_PREP */
 2127 	#ifdef CONFIG_PM_SLEEP
 2128 	#endif
 2129 	/* LDV_COMMENT_END_PREP */
 2130 	/* content: static struct usb_request * mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)*/
 2131 	/* LDV_COMMENT_BEGIN_PREP */
 2132 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2133 	/* LDV_COMMENT_END_PREP */
 2134 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_alloc_request" */
 2135 	gfp_t  var_mv_u3d_alloc_request_10_p1;
 2136 	/* LDV_COMMENT_BEGIN_PREP */
 2137 	#ifdef CONFIG_PM_SLEEP
 2138 	#endif
 2139 	/* LDV_COMMENT_END_PREP */
 2140 	/* content: static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req)*/
 2141 	/* LDV_COMMENT_BEGIN_PREP */
 2142 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2143 	/* LDV_COMMENT_END_PREP */
 2144 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_free_request" */
 2145 	struct usb_request * var_group2;
 2146 	/* LDV_COMMENT_BEGIN_PREP */
 2147 	#ifdef CONFIG_PM_SLEEP
 2148 	#endif
 2149 	/* LDV_COMMENT_END_PREP */
 2150 	/* content: static int mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)*/
 2151 	/* LDV_COMMENT_BEGIN_PREP */
 2152 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2153 	/* LDV_COMMENT_END_PREP */
 2154 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_ep_queue" */
 2155 	gfp_t  var_mv_u3d_ep_queue_13_p2;
 2156 	/* LDV_COMMENT_BEGIN_PREP */
 2157 	#ifdef CONFIG_PM_SLEEP
 2158 	#endif
 2159 	/* LDV_COMMENT_END_PREP */
 2160 	/* content: static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)*/
 2161 	/* LDV_COMMENT_BEGIN_PREP */
 2162 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2163 	/* LDV_COMMENT_END_PREP */
 2164 	/* LDV_COMMENT_BEGIN_PREP */
 2165 	#ifdef CONFIG_PM_SLEEP
 2166 	#endif
 2167 	/* LDV_COMMENT_END_PREP */
 2168 	/* content: static int mv_u3d_ep_set_wedge(struct usb_ep *_ep)*/
 2169 	/* LDV_COMMENT_BEGIN_PREP */
 2170 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2171 	/* LDV_COMMENT_END_PREP */
 2172 	/* LDV_COMMENT_BEGIN_PREP */
 2173 	#ifdef CONFIG_PM_SLEEP
 2174 	#endif
 2175 	/* LDV_COMMENT_END_PREP */
 2176 	/* content: static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt)*/
 2177 	/* LDV_COMMENT_BEGIN_PREP */
 2178 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2179 	/* LDV_COMMENT_END_PREP */
 2180 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_ep_set_halt" */
 2181 	int  var_mv_u3d_ep_set_halt_17_p1;
 2182 	/* LDV_COMMENT_BEGIN_PREP */
 2183 	#ifdef CONFIG_PM_SLEEP
 2184 	#endif
 2185 	/* LDV_COMMENT_END_PREP */
 2186 	/* content: static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep)*/
 2187 	/* LDV_COMMENT_BEGIN_PREP */
 2188 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2189 	/* LDV_COMMENT_END_PREP */
 2190 	/* LDV_COMMENT_BEGIN_PREP */
 2191 	#ifdef CONFIG_PM_SLEEP
 2192 	#endif
 2193 	/* LDV_COMMENT_END_PREP */
 2194 
 2195 	/** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
 2196 	/* content: static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)*/
 2197 	/* LDV_COMMENT_BEGIN_PREP */
 2198 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2199 	/* LDV_COMMENT_END_PREP */
 2200 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_vbus_session" */
 2201 	struct usb_gadget * var_group3;
 2202 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_vbus_session" */
 2203 	int  var_mv_u3d_vbus_session_24_p1;
 2204 	/* LDV_COMMENT_BEGIN_PREP */
 2205 	#ifdef CONFIG_PM_SLEEP
 2206 	#endif
 2207 	/* LDV_COMMENT_END_PREP */
 2208 	/* content: static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)*/
 2209 	/* LDV_COMMENT_BEGIN_PREP */
 2210 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2211 	/* LDV_COMMENT_END_PREP */
 2212 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_vbus_draw" */
 2213 	unsigned  var_mv_u3d_vbus_draw_25_p1;
 2214 	/* LDV_COMMENT_BEGIN_PREP */
 2215 	#ifdef CONFIG_PM_SLEEP
 2216 	#endif
 2217 	/* LDV_COMMENT_END_PREP */
 2218 	/* content: static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)*/
 2219 	/* LDV_COMMENT_BEGIN_PREP */
 2220 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2221 	/* LDV_COMMENT_END_PREP */
 2222 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_pullup" */
 2223 	int  var_mv_u3d_pullup_26_p1;
 2224 	/* LDV_COMMENT_BEGIN_PREP */
 2225 	#ifdef CONFIG_PM_SLEEP
 2226 	#endif
 2227 	/* LDV_COMMENT_END_PREP */
 2228 	/* content: static int mv_u3d_start(struct usb_gadget *g, struct usb_gadget_driver *driver)*/
 2229 	/* LDV_COMMENT_BEGIN_PREP */
 2230 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2231 	/* LDV_COMMENT_END_PREP */
 2232 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_start" */
 2233 	struct usb_gadget_driver * var_group4;
 2234 	/* LDV_COMMENT_BEGIN_PREP */
 2235 	#ifdef CONFIG_PM_SLEEP
 2236 	#endif
 2237 	/* LDV_COMMENT_END_PREP */
 2238 	/* content: static int mv_u3d_stop(struct usb_gadget *g)*/
 2239 	/* LDV_COMMENT_BEGIN_PREP */
 2240 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2241 	/* LDV_COMMENT_END_PREP */
 2242 	/* LDV_COMMENT_BEGIN_PREP */
 2243 	#ifdef CONFIG_PM_SLEEP
 2244 	#endif
 2245 	/* LDV_COMMENT_END_PREP */
 2246 
 2247 	/** STRUCT: struct type: platform_driver, struct name: mv_u3d_driver **/
 2248 	/* content: static int mv_u3d_probe(struct platform_device *dev)*/
 2249 	/* LDV_COMMENT_BEGIN_PREP */
 2250 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2251 	/* LDV_COMMENT_END_PREP */
 2252 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_probe" */
 2253 	struct platform_device * var_group5;
 2254 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "mv_u3d_probe" */
 2255 	static int res_mv_u3d_probe_41;
 2256 	/* LDV_COMMENT_BEGIN_PREP */
 2257 	#ifdef CONFIG_PM_SLEEP
 2258 	#endif
 2259 	/* LDV_COMMENT_END_PREP */
 2260 	/* content: static int mv_u3d_remove(struct platform_device *dev)*/
 2261 	/* LDV_COMMENT_BEGIN_PREP */
 2262 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2263 	/* LDV_COMMENT_END_PREP */
 2264 	/* LDV_COMMENT_BEGIN_PREP */
 2265 	#ifdef CONFIG_PM_SLEEP
 2266 	#endif
 2267 	/* LDV_COMMENT_END_PREP */
 2268 	/* content: static void mv_u3d_shutdown(struct platform_device *dev)*/
 2269 	/* LDV_COMMENT_BEGIN_PREP */
 2270 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2271 	#ifdef CONFIG_PM_SLEEP
 2272 	#endif
 2273 	/* LDV_COMMENT_END_PREP */
 2274 
 2275 	/** CALLBACK SECTION request_irq **/
 2276 	/* content: static irqreturn_t mv_u3d_irq(int irq, void *dev)*/
 2277 	/* LDV_COMMENT_BEGIN_PREP */
 2278 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2279 	/* LDV_COMMENT_END_PREP */
 2280 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_irq" */
 2281 	int  var_mv_u3d_irq_39_p0;
 2282 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_irq" */
 2283 	void * var_mv_u3d_irq_39_p1;
 2284 	/* LDV_COMMENT_BEGIN_PREP */
 2285 	#ifdef CONFIG_PM_SLEEP
 2286 	#endif
 2287 	/* LDV_COMMENT_END_PREP */
 2288 
 2289 
 2290 
 2291 
 2292 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 2293 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 2294 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 2295 	LDV_IN_INTERRUPT=1;
 2296 
 2297 
 2298 
 2299 
 2300 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 2301 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 2302 	/*============================= FUNCTION CALL SECTION       =============================*/
 2303 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 2304 	ldv_initialize();
 2305 	
 2306 
 2307 	
 2308 
 2309 	int ldv_s_mv_u3d_driver_platform_driver = 0;
 2310 
 2311 	
 2312 
 2313 
 2314 	while(  nondet_int()
 2315 		|| !(ldv_s_mv_u3d_driver_platform_driver == 0)
 2316 	) {
 2317 
 2318 		switch(nondet_int()) {
 2319 
 2320 			case 0: {
 2321 
 2322 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2323 				
 2324 
 2325 				/* content: static int mv_u3d_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)*/
 2326 				/* LDV_COMMENT_BEGIN_PREP */
 2327 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2328 				/* LDV_COMMENT_END_PREP */
 2329 				/* LDV_COMMENT_FUNCTION_CALL Function from field "enable" from driver structure with callbacks "mv_u3d_ep_ops" */
 2330 				ldv_handler_precall();
 2331 				mv_u3d_ep_enable( var_group1, var_mv_u3d_ep_enable_8_p1);
 2332 				/* LDV_COMMENT_BEGIN_PREP */
 2333 				#ifdef CONFIG_PM_SLEEP
 2334 				#endif
 2335 				/* LDV_COMMENT_END_PREP */
 2336 				
 2337 
 2338 				
 2339 
 2340 			}
 2341 
 2342 			break;
 2343 			case 1: {
 2344 
 2345 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2346 				
 2347 
 2348 				/* content: static int mv_u3d_ep_disable(struct usb_ep *_ep)*/
 2349 				/* LDV_COMMENT_BEGIN_PREP */
 2350 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2351 				/* LDV_COMMENT_END_PREP */
 2352 				/* LDV_COMMENT_FUNCTION_CALL Function from field "disable" from driver structure with callbacks "mv_u3d_ep_ops" */
 2353 				ldv_handler_precall();
 2354 				mv_u3d_ep_disable( var_group1);
 2355 				/* LDV_COMMENT_BEGIN_PREP */
 2356 				#ifdef CONFIG_PM_SLEEP
 2357 				#endif
 2358 				/* LDV_COMMENT_END_PREP */
 2359 				
 2360 
 2361 				
 2362 
 2363 			}
 2364 
 2365 			break;
 2366 			case 2: {
 2367 
 2368 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2369 				
 2370 
 2371 				/* content: static struct usb_request * mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)*/
 2372 				/* LDV_COMMENT_BEGIN_PREP */
 2373 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2374 				/* LDV_COMMENT_END_PREP */
 2375 				/* LDV_COMMENT_FUNCTION_CALL Function from field "alloc_request" from driver structure with callbacks "mv_u3d_ep_ops" */
 2376 				ldv_handler_precall();
 2377 				mv_u3d_alloc_request( var_group1, var_mv_u3d_alloc_request_10_p1);
 2378 				/* LDV_COMMENT_BEGIN_PREP */
 2379 				#ifdef CONFIG_PM_SLEEP
 2380 				#endif
 2381 				/* LDV_COMMENT_END_PREP */
 2382 				
 2383 
 2384 				
 2385 
 2386 			}
 2387 
 2388 			break;
 2389 			case 3: {
 2390 
 2391 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2392 				
 2393 
 2394 				/* content: static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req)*/
 2395 				/* LDV_COMMENT_BEGIN_PREP */
 2396 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2397 				/* LDV_COMMENT_END_PREP */
 2398 				/* LDV_COMMENT_FUNCTION_CALL Function from field "free_request" from driver structure with callbacks "mv_u3d_ep_ops" */
 2399 				ldv_handler_precall();
 2400 				mv_u3d_free_request( var_group1, var_group2);
 2401 				/* LDV_COMMENT_BEGIN_PREP */
 2402 				#ifdef CONFIG_PM_SLEEP
 2403 				#endif
 2404 				/* LDV_COMMENT_END_PREP */
 2405 				
 2406 
 2407 				
 2408 
 2409 			}
 2410 
 2411 			break;
 2412 			case 4: {
 2413 
 2414 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2415 				
 2416 
 2417 				/* content: static int mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)*/
 2418 				/* LDV_COMMENT_BEGIN_PREP */
 2419 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2420 				/* LDV_COMMENT_END_PREP */
 2421 				/* LDV_COMMENT_FUNCTION_CALL Function from field "queue" from driver structure with callbacks "mv_u3d_ep_ops" */
 2422 				ldv_handler_precall();
 2423 				mv_u3d_ep_queue( var_group1, var_group2, var_mv_u3d_ep_queue_13_p2);
 2424 				/* LDV_COMMENT_BEGIN_PREP */
 2425 				#ifdef CONFIG_PM_SLEEP
 2426 				#endif
 2427 				/* LDV_COMMENT_END_PREP */
 2428 				
 2429 
 2430 				
 2431 
 2432 			}
 2433 
 2434 			break;
 2435 			case 5: {
 2436 
 2437 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2438 				
 2439 
 2440 				/* content: static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)*/
 2441 				/* LDV_COMMENT_BEGIN_PREP */
 2442 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2443 				/* LDV_COMMENT_END_PREP */
 2444 				/* LDV_COMMENT_FUNCTION_CALL Function from field "dequeue" from driver structure with callbacks "mv_u3d_ep_ops" */
 2445 				ldv_handler_precall();
 2446 				mv_u3d_ep_dequeue( var_group1, var_group2);
 2447 				/* LDV_COMMENT_BEGIN_PREP */
 2448 				#ifdef CONFIG_PM_SLEEP
 2449 				#endif
 2450 				/* LDV_COMMENT_END_PREP */
 2451 				
 2452 
 2453 				
 2454 
 2455 			}
 2456 
 2457 			break;
 2458 			case 6: {
 2459 
 2460 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2461 				
 2462 
 2463 				/* content: static int mv_u3d_ep_set_wedge(struct usb_ep *_ep)*/
 2464 				/* LDV_COMMENT_BEGIN_PREP */
 2465 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2466 				/* LDV_COMMENT_END_PREP */
 2467 				/* LDV_COMMENT_FUNCTION_CALL Function from field "set_wedge" from driver structure with callbacks "mv_u3d_ep_ops" */
 2468 				ldv_handler_precall();
 2469 				mv_u3d_ep_set_wedge( var_group1);
 2470 				/* LDV_COMMENT_BEGIN_PREP */
 2471 				#ifdef CONFIG_PM_SLEEP
 2472 				#endif
 2473 				/* LDV_COMMENT_END_PREP */
 2474 				
 2475 
 2476 				
 2477 
 2478 			}
 2479 
 2480 			break;
 2481 			case 7: {
 2482 
 2483 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2484 				
 2485 
 2486 				/* content: static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt)*/
 2487 				/* LDV_COMMENT_BEGIN_PREP */
 2488 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2489 				/* LDV_COMMENT_END_PREP */
 2490 				/* LDV_COMMENT_FUNCTION_CALL Function from field "set_halt" from driver structure with callbacks "mv_u3d_ep_ops" */
 2491 				ldv_handler_precall();
 2492 				mv_u3d_ep_set_halt( var_group1, var_mv_u3d_ep_set_halt_17_p1);
 2493 				/* LDV_COMMENT_BEGIN_PREP */
 2494 				#ifdef CONFIG_PM_SLEEP
 2495 				#endif
 2496 				/* LDV_COMMENT_END_PREP */
 2497 				
 2498 
 2499 				
 2500 
 2501 			}
 2502 
 2503 			break;
 2504 			case 8: {
 2505 
 2506 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2507 				
 2508 
 2509 				/* content: static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep)*/
 2510 				/* LDV_COMMENT_BEGIN_PREP */
 2511 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2512 				/* LDV_COMMENT_END_PREP */
 2513 				/* LDV_COMMENT_FUNCTION_CALL Function from field "fifo_flush" from driver structure with callbacks "mv_u3d_ep_ops" */
 2514 				ldv_handler_precall();
 2515 				mv_u3d_ep_fifo_flush( var_group1);
 2516 				/* LDV_COMMENT_BEGIN_PREP */
 2517 				#ifdef CONFIG_PM_SLEEP
 2518 				#endif
 2519 				/* LDV_COMMENT_END_PREP */
 2520 				
 2521 
 2522 				
 2523 
 2524 			}
 2525 
 2526 			break;
 2527 			case 9: {
 2528 
 2529 				/** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
 2530 				
 2531 
 2532 				/* content: static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)*/
 2533 				/* LDV_COMMENT_BEGIN_PREP */
 2534 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2535 				/* LDV_COMMENT_END_PREP */
 2536 				/* LDV_COMMENT_FUNCTION_CALL Function from field "vbus_session" from driver structure with callbacks "mv_u3d_ops" */
 2537 				ldv_handler_precall();
 2538 				mv_u3d_vbus_session( var_group3, var_mv_u3d_vbus_session_24_p1);
 2539 				/* LDV_COMMENT_BEGIN_PREP */
 2540 				#ifdef CONFIG_PM_SLEEP
 2541 				#endif
 2542 				/* LDV_COMMENT_END_PREP */
 2543 				
 2544 
 2545 				
 2546 
 2547 			}
 2548 
 2549 			break;
 2550 			case 10: {
 2551 
 2552 				/** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
 2553 				
 2554 
 2555 				/* content: static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)*/
 2556 				/* LDV_COMMENT_BEGIN_PREP */
 2557 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2558 				/* LDV_COMMENT_END_PREP */
 2559 				/* LDV_COMMENT_FUNCTION_CALL Function from field "vbus_draw" from driver structure with callbacks "mv_u3d_ops" */
 2560 				ldv_handler_precall();
 2561 				mv_u3d_vbus_draw( var_group3, var_mv_u3d_vbus_draw_25_p1);
 2562 				/* LDV_COMMENT_BEGIN_PREP */
 2563 				#ifdef CONFIG_PM_SLEEP
 2564 				#endif
 2565 				/* LDV_COMMENT_END_PREP */
 2566 				
 2567 
 2568 				
 2569 
 2570 			}
 2571 
 2572 			break;
 2573 			case 11: {
 2574 
 2575 				/** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
 2576 				
 2577 
 2578 				/* content: static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)*/
 2579 				/* LDV_COMMENT_BEGIN_PREP */
 2580 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2581 				/* LDV_COMMENT_END_PREP */
 2582 				/* LDV_COMMENT_FUNCTION_CALL Function from field "pullup" from driver structure with callbacks "mv_u3d_ops" */
 2583 				ldv_handler_precall();
 2584 				mv_u3d_pullup( var_group3, var_mv_u3d_pullup_26_p1);
 2585 				/* LDV_COMMENT_BEGIN_PREP */
 2586 				#ifdef CONFIG_PM_SLEEP
 2587 				#endif
 2588 				/* LDV_COMMENT_END_PREP */
 2589 				
 2590 
 2591 				
 2592 
 2593 			}
 2594 
 2595 			break;
 2596 			case 12: {
 2597 
 2598 				/** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
 2599 				
 2600 
 2601 				/* content: static int mv_u3d_start(struct usb_gadget *g, struct usb_gadget_driver *driver)*/
 2602 				/* LDV_COMMENT_BEGIN_PREP */
 2603 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2604 				/* LDV_COMMENT_END_PREP */
 2605 				/* LDV_COMMENT_FUNCTION_CALL Function from field "udc_start" from driver structure with callbacks "mv_u3d_ops" */
 2606 				ldv_handler_precall();
 2607 				mv_u3d_start( var_group3, var_group4);
 2608 				/* LDV_COMMENT_BEGIN_PREP */
 2609 				#ifdef CONFIG_PM_SLEEP
 2610 				#endif
 2611 				/* LDV_COMMENT_END_PREP */
 2612 				
 2613 
 2614 				
 2615 
 2616 			}
 2617 
 2618 			break;
 2619 			case 13: {
 2620 
 2621 				/** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
 2622 				
 2623 
 2624 				/* content: static int mv_u3d_stop(struct usb_gadget *g)*/
 2625 				/* LDV_COMMENT_BEGIN_PREP */
 2626 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2627 				/* LDV_COMMENT_END_PREP */
 2628 				/* LDV_COMMENT_FUNCTION_CALL Function from field "udc_stop" from driver structure with callbacks "mv_u3d_ops" */
 2629 				ldv_handler_precall();
 2630 				mv_u3d_stop( var_group3);
 2631 				/* LDV_COMMENT_BEGIN_PREP */
 2632 				#ifdef CONFIG_PM_SLEEP
 2633 				#endif
 2634 				/* LDV_COMMENT_END_PREP */
 2635 				
 2636 
 2637 				
 2638 
 2639 			}
 2640 
 2641 			break;
 2642 			case 14: {
 2643 
 2644 				/** STRUCT: struct type: platform_driver, struct name: mv_u3d_driver **/
 2645 				if(ldv_s_mv_u3d_driver_platform_driver==0) {
 2646 
 2647 				/* content: static int mv_u3d_probe(struct platform_device *dev)*/
 2648 				/* LDV_COMMENT_BEGIN_PREP */
 2649 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2650 				/* LDV_COMMENT_END_PREP */
 2651 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "mv_u3d_driver". Standart function test for correct return result. */
 2652 				res_mv_u3d_probe_41 = mv_u3d_probe( var_group5);
 2653 				 ldv_check_return_value(res_mv_u3d_probe_41);
 2654 				 ldv_check_return_value_probe(res_mv_u3d_probe_41);
 2655 				 if(res_mv_u3d_probe_41) 
 2656 					goto ldv_module_exit;
 2657 				/* LDV_COMMENT_BEGIN_PREP */
 2658 				#ifdef CONFIG_PM_SLEEP
 2659 				#endif
 2660 				/* LDV_COMMENT_END_PREP */
 2661 				ldv_s_mv_u3d_driver_platform_driver++;
 2662 
 2663 				}
 2664 
 2665 			}
 2666 
 2667 			break;
 2668 			case 15: {
 2669 
 2670 				/** STRUCT: struct type: platform_driver, struct name: mv_u3d_driver **/
 2671 				if(ldv_s_mv_u3d_driver_platform_driver==1) {
 2672 
 2673 				/* content: static int mv_u3d_remove(struct platform_device *dev)*/
 2674 				/* LDV_COMMENT_BEGIN_PREP */
 2675 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2676 				/* LDV_COMMENT_END_PREP */
 2677 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "mv_u3d_driver" */
 2678 				ldv_handler_precall();
 2679 				mv_u3d_remove( var_group5);
 2680 				/* LDV_COMMENT_BEGIN_PREP */
 2681 				#ifdef CONFIG_PM_SLEEP
 2682 				#endif
 2683 				/* LDV_COMMENT_END_PREP */
 2684 				ldv_s_mv_u3d_driver_platform_driver++;
 2685 
 2686 				}
 2687 
 2688 			}
 2689 
 2690 			break;
 2691 			case 16: {
 2692 
 2693 				/** STRUCT: struct type: platform_driver, struct name: mv_u3d_driver **/
 2694 				if(ldv_s_mv_u3d_driver_platform_driver==2) {
 2695 
 2696 				/* content: static void mv_u3d_shutdown(struct platform_device *dev)*/
 2697 				/* LDV_COMMENT_BEGIN_PREP */
 2698 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2699 				#ifdef CONFIG_PM_SLEEP
 2700 				#endif
 2701 				/* LDV_COMMENT_END_PREP */
 2702 				/* LDV_COMMENT_FUNCTION_CALL Function from field "shutdown" from driver structure with callbacks "mv_u3d_driver" */
 2703 				ldv_handler_precall();
 2704 				mv_u3d_shutdown( var_group5);
 2705 				ldv_s_mv_u3d_driver_platform_driver=0;
 2706 
 2707 				}
 2708 
 2709 			}
 2710 
 2711 			break;
 2712 			case 17: {
 2713 
 2714 				/** CALLBACK SECTION request_irq **/
 2715 				LDV_IN_INTERRUPT=2;
 2716 
 2717 				/* content: static irqreturn_t mv_u3d_irq(int irq, void *dev)*/
 2718 				/* LDV_COMMENT_BEGIN_PREP */
 2719 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2720 				/* LDV_COMMENT_END_PREP */
 2721 				/* LDV_COMMENT_FUNCTION_CALL */
 2722 				ldv_handler_precall();
 2723 				mv_u3d_irq( var_mv_u3d_irq_39_p0, var_mv_u3d_irq_39_p1);
 2724 				/* LDV_COMMENT_BEGIN_PREP */
 2725 				#ifdef CONFIG_PM_SLEEP
 2726 				#endif
 2727 				/* LDV_COMMENT_END_PREP */
 2728 				LDV_IN_INTERRUPT=1;
 2729 
 2730 				
 2731 
 2732 			}
 2733 
 2734 			break;
 2735 			default: break;
 2736 
 2737 		}
 2738 
 2739 	}
 2740 
 2741 	ldv_module_exit: 
 2742 
 2743 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 2744 	ldv_final: ldv_check_final_state();
 2745 
 2746 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 2747 	return;
 2748 
 2749 }
 2750 #endif
 2751 
 2752 /* LDV_COMMENT_END_MAIN */                 1 
    2 #include <linux/kernel.h>
    3 bool ldv_is_err(const void *ptr);
    4 bool ldv_is_err_or_null(const void *ptr);
    5 void* ldv_err_ptr(long error);
    6 long ldv_ptr_err(const void *ptr);
    7 
    8 #include <linux/module.h>
    9 struct clk;
   10 
   11 extern void ldv_clk_disable_clk(struct clk *clk);
   12 extern int ldv_clk_enable_clk(void);
   13 extern void ldv_clk_disable_clk_of_mv_u3d(struct clk *clk);
   14 extern int ldv_clk_enable_clk_of_mv_u3d(void);
   15 #line 1 "/home/vitaly/ldv-launches/work/current--X--drivers--X--defaultlinux-4.11-rc1.tar.xz--X--320_7a--X--cpachecker/linux-4.11-rc1.tar.xz/csd_deg_dscv/8743/dscv_tempdir/dscv/ri/320_7a/drivers/usb/gadget/udc/mv_u3d_core.c"
   16 
   17 /*
   18  * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
   19  *
   20  * This program is free software; you can redistribute it and/or modify it
   21  * under the terms and conditions of the GNU General Public License,
   22  * version 2, as published by the Free Software Foundation.
   23  */
   24 
   25 #include <linux/module.h>
   26 #include <linux/dma-mapping.h>
   27 #include <linux/dmapool.h>
   28 #include <linux/kernel.h>
   29 #include <linux/delay.h>
   30 #include <linux/ioport.h>
   31 #include <linux/sched.h>
   32 #include <linux/slab.h>
   33 #include <linux/errno.h>
   34 #include <linux/timer.h>
   35 #include <linux/list.h>
   36 #include <linux/notifier.h>
   37 #include <linux/interrupt.h>
   38 #include <linux/moduleparam.h>
   39 #include <linux/device.h>
   40 #include <linux/usb/ch9.h>
   41 #include <linux/usb/gadget.h>
   42 #include <linux/pm.h>
   43 #include <linux/io.h>
   44 #include <linux/irq.h>
   45 #include <linux/platform_device.h>
   46 #include <linux/platform_data/mv_usb.h>
   47 #include <linux/clk.h>
   48 
   49 #include "mv_u3d.h"
   50 
   51 #define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
   52 
   53 static const char driver_name[] = "mv_u3d";
   54 static const char driver_desc[] = DRIVER_DESC;
   55 
   56 static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status);
   57 static void mv_u3d_stop_activity(struct mv_u3d *u3d,
   58 			struct usb_gadget_driver *driver);
   59 
   60 /* for endpoint 0 operations */
   61 static const struct usb_endpoint_descriptor mv_u3d_ep0_desc = {
   62 	.bLength =		USB_DT_ENDPOINT_SIZE,
   63 	.bDescriptorType =	USB_DT_ENDPOINT,
   64 	.bEndpointAddress =	0,
   65 	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
   66 	.wMaxPacketSize =	MV_U3D_EP0_MAX_PKT_SIZE,
   67 };
   68 
   69 static void mv_u3d_ep0_reset(struct mv_u3d *u3d)
   70 {
   71 	struct mv_u3d_ep *ep;
   72 	u32 epxcr;
   73 	int i;
   74 
   75 	for (i = 0; i < 2; i++) {
   76 		ep = &u3d->eps[i];
   77 		ep->u3d = u3d;
   78 
   79 		/* ep0 ep context, ep0 in and out share the same ep context */
   80 		ep->ep_context = &u3d->ep_context[1];
   81 	}
   82 
   83 	/* reset ep state machine */
   84 	/* reset ep0 out */
   85 	epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
   86 	epxcr |= MV_U3D_EPXCR_EP_INIT;
   87 	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
   88 	udelay(5);
   89 	epxcr &= ~MV_U3D_EPXCR_EP_INIT;
   90 	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
   91 
   92 	epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE
   93 		<< MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
   94 		| (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
   95 		| (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
   96 		| MV_U3D_EPXCR_EP_TYPE_CONTROL);
   97 	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr1);
   98 
   99 	/* reset ep0 in */
  100 	epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
  101 	epxcr |= MV_U3D_EPXCR_EP_INIT;
  102 	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
  103 	udelay(5);
  104 	epxcr &= ~MV_U3D_EPXCR_EP_INIT;
  105 	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
  106 
  107 	epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE
  108 		<< MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
  109 		| (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
  110 		| (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
  111 		| MV_U3D_EPXCR_EP_TYPE_CONTROL);
  112 	iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr1);
  113 }
  114 
  115 static void mv_u3d_ep0_stall(struct mv_u3d *u3d)
  116 {
  117 	u32 tmp;
  118 	dev_dbg(u3d->dev, "%s\n", __func__);
  119 
  120 	/* set TX and RX to stall */
  121 	tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
  122 	tmp |= MV_U3D_EPXCR_EP_HALT;
  123 	iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
  124 
  125 	tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
  126 	tmp |= MV_U3D_EPXCR_EP_HALT;
  127 	iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
  128 
  129 	/* update ep0 state */
  130 	u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
  131 	u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
  132 }
  133 
  134 static int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index,
  135 	struct mv_u3d_req *curr_req)
  136 {
  137 	struct mv_u3d_trb	*curr_trb;
  138 	int actual, remaining_length = 0;
  139 	int direction, ep_num;
  140 	int retval = 0;
  141 	u32 tmp, status, length;
  142 
  143 	direction = index % 2;
  144 	ep_num = index / 2;
  145 
  146 	actual = curr_req->req.length;
  147 
  148 	while (!list_empty(&curr_req->trb_list)) {
  149 		curr_trb = list_entry(curr_req->trb_list.next,
  150 					struct mv_u3d_trb, trb_list);
  151 		if (!curr_trb->trb_hw->ctrl.own) {
  152 			dev_err(u3d->dev, "%s, TRB own error!\n",
  153 				u3d->eps[index].name);
  154 			return 1;
  155 		}
  156 
  157 		curr_trb->trb_hw->ctrl.own = 0;
  158 		if (direction == MV_U3D_EP_DIR_OUT)
  159 			tmp = ioread32(&u3d->vuc_regs->rxst[ep_num].statuslo);
  160 		else
  161 			tmp = ioread32(&u3d->vuc_regs->txst[ep_num].statuslo);
  162 
  163 		status = tmp >> MV_U3D_XFERSTATUS_COMPLETE_SHIFT;
  164 		length = tmp & MV_U3D_XFERSTATUS_TRB_LENGTH_MASK;
  165 
  166 		if (status == MV_U3D_COMPLETE_SUCCESS ||
  167 			(status == MV_U3D_COMPLETE_SHORT_PACKET &&
  168 			direction == MV_U3D_EP_DIR_OUT)) {
  169 			remaining_length += length;
  170 			actual -= remaining_length;
  171 		} else {
  172 			dev_err(u3d->dev,
  173 				"complete_tr error: ep=%d %s: error = 0x%x\n",
  174 				index >> 1, direction ? "SEND" : "RECV",
  175 				status);
  176 			retval = -EPROTO;
  177 		}
  178 
  179 		list_del_init(&curr_trb->trb_list);
  180 	}
  181 	if (retval)
  182 		return retval;
  183 
  184 	curr_req->req.actual = actual;
  185 	return 0;
  186 }
  187 
  188 /*
  189  * mv_u3d_done() - retire a request; caller blocked irqs
  190  * @status : request status to be set, only works when
  191  * request is still in progress.
  192  */
  193 static
  194 void mv_u3d_done(struct mv_u3d_ep *ep, struct mv_u3d_req *req, int status)
  195 	__releases(&ep->udc->lock)
  196 	__acquires(&ep->udc->lock)
  197 {
  198 	struct mv_u3d *u3d = (struct mv_u3d *)ep->u3d;
  199 
  200 	dev_dbg(u3d->dev, "mv_u3d_done: remove req->queue\n");
  201 	/* Removed the req from ep queue */
  202 	list_del_init(&req->queue);
  203 
  204 	/* req.status should be set as -EINPROGRESS in ep_queue() */
  205 	if (req->req.status == -EINPROGRESS)
  206 		req->req.status = status;
  207 	else
  208 		status = req->req.status;
  209 
  210 	/* Free trb for the request */
  211 	if (!req->chain)
  212 		dma_pool_free(u3d->trb_pool,
  213 			req->trb_head->trb_hw, req->trb_head->trb_dma);
  214 	else {
  215 		dma_unmap_single(ep->u3d->gadget.dev.parent,
  216 			(dma_addr_t)req->trb_head->trb_dma,
  217 			req->trb_count * sizeof(struct mv_u3d_trb_hw),
  218 			DMA_BIDIRECTIONAL);
  219 		kfree(req->trb_head->trb_hw);
  220 	}
  221 	kfree(req->trb_head);
  222 
  223 	usb_gadget_unmap_request(&u3d->gadget, &req->req, mv_u3d_ep_dir(ep));
  224 
  225 	if (status && (status != -ESHUTDOWN)) {
  226 		dev_dbg(u3d->dev, "complete %s req %p stat %d len %u/%u",
  227 			ep->ep.name, &req->req, status,
  228 			req->req.actual, req->req.length);
  229 	}
  230 
  231 	spin_unlock(&ep->u3d->lock);
  232 
  233 	usb_gadget_giveback_request(&ep->ep, &req->req);
  234 
  235 	spin_lock(&ep->u3d->lock);
  236 }
  237 
  238 static int mv_u3d_queue_trb(struct mv_u3d_ep *ep, struct mv_u3d_req *req)
  239 {
  240 	u32 tmp, direction;
  241 	struct mv_u3d *u3d;
  242 	struct mv_u3d_ep_context *ep_context;
  243 	int retval = 0;
  244 
  245 	u3d = ep->u3d;
  246 	direction = mv_u3d_ep_dir(ep);
  247 
  248 	/* ep0 in and out share the same ep context slot 1*/
  249 	if (ep->ep_num == 0)
  250 		ep_context = &(u3d->ep_context[1]);
  251 	else
  252 		ep_context = &(u3d->ep_context[ep->ep_num * 2 + direction]);
  253 
  254 	/* check if the pipe is empty or not */
  255 	if (!list_empty(&ep->queue)) {
  256 		dev_err(u3d->dev, "add trb to non-empty queue!\n");
  257 		retval = -ENOMEM;
  258 		WARN_ON(1);
  259 	} else {
  260 		ep_context->rsvd0 = cpu_to_le32(1);
  261 		ep_context->rsvd1 = 0;
  262 
  263 		/* Configure the trb address and set the DCS bit.
  264 		 * Both DCS bit and own bit in trb should be set.
  265 		 */
  266 		ep_context->trb_addr_lo =
  267 			cpu_to_le32(req->trb_head->trb_dma | DCS_ENABLE);
  268 		ep_context->trb_addr_hi = 0;
  269 
  270 		/* Ensure that updates to the EP Context will
  271 		 * occure before Ring Bell.
  272 		 */
  273 		wmb();
  274 
  275 		/* ring bell the ep */
  276 		if (ep->ep_num == 0)
  277 			tmp = 0x1;
  278 		else
  279 			tmp = ep->ep_num * 2
  280 				+ ((direction == MV_U3D_EP_DIR_OUT) ? 0 : 1);
  281 
  282 		iowrite32(tmp, &u3d->op_regs->doorbell);
  283 	}
  284 	return retval;
  285 }
  286 
  287 static struct mv_u3d_trb *mv_u3d_build_trb_one(struct mv_u3d_req *req,
  288 				unsigned *length, dma_addr_t *dma)
  289 {
  290 	u32 temp;
  291 	unsigned int direction;
  292 	struct mv_u3d_trb *trb;
  293 	struct mv_u3d_trb_hw *trb_hw;
  294 	struct mv_u3d *u3d;
  295 
  296 	/* how big will this transfer be? */
  297 	*length = req->req.length - req->req.actual;
  298 	BUG_ON(*length > (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER);
  299 
  300 	u3d = req->ep->u3d;
  301 
  302 	trb = kzalloc(sizeof(*trb), GFP_ATOMIC);
  303 	if (!trb)
  304 		return NULL;
  305 
  306 	/*
  307 	 * Be careful that no _GFP_HIGHMEM is set,
  308 	 * or we can not use dma_to_virt
  309 	 * cannot use GFP_KERNEL in spin lock
  310 	 */
  311 	trb_hw = dma_pool_alloc(u3d->trb_pool, GFP_ATOMIC, dma);
  312 	if (!trb_hw) {
  313 		kfree(trb);
  314 		dev_err(u3d->dev,
  315 			"%s, dma_pool_alloc fail\n", __func__);
  316 		return NULL;
  317 	}
  318 	trb->trb_dma = *dma;
  319 	trb->trb_hw = trb_hw;
  320 
  321 	/* initialize buffer page pointers */
  322 	temp = (u32)(req->req.dma + req->req.actual);
  323 
  324 	trb_hw->buf_addr_lo = cpu_to_le32(temp);
  325 	trb_hw->buf_addr_hi = 0;
  326 	trb_hw->trb_len = cpu_to_le32(*length);
  327 	trb_hw->ctrl.own = 1;
  328 
  329 	if (req->ep->ep_num == 0)
  330 		trb_hw->ctrl.type = TYPE_DATA;
  331 	else
  332 		trb_hw->ctrl.type = TYPE_NORMAL;
  333 
  334 	req->req.actual += *length;
  335 
  336 	direction = mv_u3d_ep_dir(req->ep);
  337 	if (direction == MV_U3D_EP_DIR_IN)
  338 		trb_hw->ctrl.dir = 1;
  339 	else
  340 		trb_hw->ctrl.dir = 0;
  341 
  342 	/* Enable interrupt for the last trb of a request */
  343 	if (!req->req.no_interrupt)
  344 		trb_hw->ctrl.ioc = 1;
  345 
  346 	trb_hw->ctrl.chain = 0;
  347 
  348 	wmb();
  349 	return trb;
  350 }
  351 
  352 static int mv_u3d_build_trb_chain(struct mv_u3d_req *req, unsigned *length,
  353 		struct mv_u3d_trb *trb, int *is_last)
  354 {
  355 	u32 temp;
  356 	unsigned int direction;
  357 	struct mv_u3d *u3d;
  358 
  359 	/* how big will this transfer be? */
  360 	*length = min(req->req.length - req->req.actual,
  361 			(unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER);
  362 
  363 	u3d = req->ep->u3d;
  364 
  365 	trb->trb_dma = 0;
  366 
  367 	/* initialize buffer page pointers */
  368 	temp = (u32)(req->req.dma + req->req.actual);
  369 
  370 	trb->trb_hw->buf_addr_lo = cpu_to_le32(temp);
  371 	trb->trb_hw->buf_addr_hi = 0;
  372 	trb->trb_hw->trb_len = cpu_to_le32(*length);
  373 	trb->trb_hw->ctrl.own = 1;
  374 
  375 	if (req->ep->ep_num == 0)
  376 		trb->trb_hw->ctrl.type = TYPE_DATA;
  377 	else
  378 		trb->trb_hw->ctrl.type = TYPE_NORMAL;
  379 
  380 	req->req.actual += *length;
  381 
  382 	direction = mv_u3d_ep_dir(req->ep);
  383 	if (direction == MV_U3D_EP_DIR_IN)
  384 		trb->trb_hw->ctrl.dir = 1;
  385 	else
  386 		trb->trb_hw->ctrl.dir = 0;
  387 
  388 	/* zlp is needed if req->req.zero is set */
  389 	if (req->req.zero) {
  390 		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
  391 			*is_last = 1;
  392 		else
  393 			*is_last = 0;
  394 	} else if (req->req.length == req->req.actual)
  395 		*is_last = 1;
  396 	else
  397 		*is_last = 0;
  398 
  399 	/* Enable interrupt for the last trb of a request */
  400 	if (*is_last && !req->req.no_interrupt)
  401 		trb->trb_hw->ctrl.ioc = 1;
  402 
  403 	if (*is_last)
  404 		trb->trb_hw->ctrl.chain = 0;
  405 	else {
  406 		trb->trb_hw->ctrl.chain = 1;
  407 		dev_dbg(u3d->dev, "chain trb\n");
  408 	}
  409 
  410 	wmb();
  411 
  412 	return 0;
  413 }
  414 
  415 /* generate TRB linked list for a request
  416  * usb controller only supports continous trb chain,
  417  * that trb structure physical address should be continous.
  418  */
  419 static int mv_u3d_req_to_trb(struct mv_u3d_req *req)
  420 {
  421 	unsigned count;
  422 	int is_last;
  423 	struct mv_u3d_trb *trb;
  424 	struct mv_u3d_trb_hw *trb_hw;
  425 	struct mv_u3d *u3d;
  426 	dma_addr_t dma;
  427 	unsigned length;
  428 	unsigned trb_num;
  429 
  430 	u3d = req->ep->u3d;
  431 
  432 	INIT_LIST_HEAD(&req->trb_list);
  433 
  434 	length = req->req.length - req->req.actual;
  435 	/* normally the request transfer length is less than 16KB.
  436 	 * we use buil_trb_one() to optimize it.
  437 	 */
  438 	if (length <= (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER) {
  439 		trb = mv_u3d_build_trb_one(req, &count, &dma);
  440 		list_add_tail(&trb->trb_list, &req->trb_list);
  441 		req->trb_head = trb;
  442 		req->trb_count = 1;
  443 		req->chain = 0;
  444 	} else {
  445 		trb_num = length / MV_U3D_EP_MAX_LENGTH_TRANSFER;
  446 		if (length % MV_U3D_EP_MAX_LENGTH_TRANSFER)
  447 			trb_num++;
  448 
  449 		trb = kcalloc(trb_num, sizeof(*trb), GFP_ATOMIC);
  450 		if (!trb)
  451 			return -ENOMEM;
  452 
  453 		trb_hw = kcalloc(trb_num, sizeof(*trb_hw), GFP_ATOMIC);
  454 		if (!trb_hw) {
  455 			kfree(trb);
  456 			return -ENOMEM;
  457 		}
  458 
  459 		do {
  460 			trb->trb_hw = trb_hw;
  461 			if (mv_u3d_build_trb_chain(req, &count,
  462 						trb, &is_last)) {
  463 				dev_err(u3d->dev,
  464 					"%s, mv_u3d_build_trb_chain fail\n",
  465 					__func__);
  466 				return -EIO;
  467 			}
  468 
  469 			list_add_tail(&trb->trb_list, &req->trb_list);
  470 			req->trb_count++;
  471 			trb++;
  472 			trb_hw++;
  473 		} while (!is_last);
  474 
  475 		req->trb_head = list_entry(req->trb_list.next,
  476 					struct mv_u3d_trb, trb_list);
  477 		req->trb_head->trb_dma = dma_map_single(u3d->gadget.dev.parent,
  478 					req->trb_head->trb_hw,
  479 					trb_num * sizeof(*trb_hw),
  480 					DMA_BIDIRECTIONAL);
  481 		if (dma_mapping_error(u3d->gadget.dev.parent,
  482 					req->trb_head->trb_dma)) {
  483 			kfree(req->trb_head->trb_hw);
  484 			kfree(req->trb_head);
  485 			return -EFAULT;
  486 		}
  487 
  488 		req->chain = 1;
  489 	}
  490 
  491 	return 0;
  492 }
  493 
  494 static int
  495 mv_u3d_start_queue(struct mv_u3d_ep *ep)
  496 {
  497 	struct mv_u3d *u3d = ep->u3d;
  498 	struct mv_u3d_req *req;
  499 	int ret;
  500 
  501 	if (!list_empty(&ep->req_list) && !ep->processing)
  502 		req = list_entry(ep->req_list.next, struct mv_u3d_req, list);
  503 	else
  504 		return 0;
  505 
  506 	ep->processing = 1;
  507 
  508 	/* set up dma mapping */
  509 	ret = usb_gadget_map_request(&u3d->gadget, &req->req,
  510 					mv_u3d_ep_dir(ep));
  511 	if (ret)
  512 		goto break_processing;
  513 
  514 	req->req.status = -EINPROGRESS;
  515 	req->req.actual = 0;
  516 	req->trb_count = 0;
  517 
  518 	/* build trbs */
  519 	ret = mv_u3d_req_to_trb(req);
  520 	if (ret) {
  521 		dev_err(u3d->dev, "%s, mv_u3d_req_to_trb fail\n", __func__);
  522 		goto break_processing;
  523 	}
  524 
  525 	/* and push them to device queue */
  526 	ret = mv_u3d_queue_trb(ep, req);
  527 	if (ret)
  528 		goto break_processing;
  529 
  530 	/* irq handler advances the queue */
  531 	list_add_tail(&req->queue, &ep->queue);
  532 
  533 	return 0;
  534 
  535 break_processing:
  536 	ep->processing = 0;
  537 	return ret;
  538 }
  539 
  540 static int mv_u3d_ep_enable(struct usb_ep *_ep,
  541 		const struct usb_endpoint_descriptor *desc)
  542 {
  543 	struct mv_u3d *u3d;
  544 	struct mv_u3d_ep *ep;
  545 	u16 max = 0;
  546 	unsigned maxburst = 0;
  547 	u32 epxcr, direction;
  548 
  549 	if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
  550 		return -EINVAL;
  551 
  552 	ep = container_of(_ep, struct mv_u3d_ep, ep);
  553 	u3d = ep->u3d;
  554 
  555 	if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN)
  556 		return -ESHUTDOWN;
  557 
  558 	direction = mv_u3d_ep_dir(ep);
  559 	max = le16_to_cpu(desc->wMaxPacketSize);
  560 
  561 	if (!_ep->maxburst)
  562 		_ep->maxburst = 1;
  563 	maxburst = _ep->maxburst;
  564 
  565 	/* Set the max burst size */
  566 	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
  567 	case USB_ENDPOINT_XFER_BULK:
  568 		if (maxburst > 16) {
  569 			dev_dbg(u3d->dev,
  570 				"max burst should not be greater "
  571 				"than 16 on bulk ep\n");
  572 			maxburst = 1;
  573 			_ep->maxburst = maxburst;
  574 		}
  575 		dev_dbg(u3d->dev,
  576 			"maxburst: %d on bulk %s\n", maxburst, ep->name);
  577 		break;
  578 	case USB_ENDPOINT_XFER_CONTROL:
  579 		/* control transfer only supports maxburst as one */
  580 		maxburst = 1;
  581 		_ep->maxburst = maxburst;
  582 		break;
  583 	case USB_ENDPOINT_XFER_INT:
  584 		if (maxburst != 1) {
  585 			dev_dbg(u3d->dev,
  586 				"max burst should be 1 on int ep "
  587 				"if transfer size is not 1024\n");
  588 			maxburst = 1;
  589 			_ep->maxburst = maxburst;
  590 		}
  591 		break;
  592 	case USB_ENDPOINT_XFER_ISOC:
  593 		if (maxburst != 1) {
  594 			dev_dbg(u3d->dev,
  595 				"max burst should be 1 on isoc ep "
  596 				"if transfer size is not 1024\n");
  597 			maxburst = 1;
  598 			_ep->maxburst = maxburst;
  599 		}
  600 		break;
  601 	default:
  602 		goto en_done;
  603 	}
  604 
  605 	ep->ep.maxpacket = max;
  606 	ep->ep.desc = desc;
  607 	ep->enabled = 1;
  608 
  609 	/* Enable the endpoint for Rx or Tx and set the endpoint type */
  610 	if (direction == MV_U3D_EP_DIR_OUT) {
  611 		epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
  612 		epxcr |= MV_U3D_EPXCR_EP_INIT;
  613 		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
  614 		udelay(5);
  615 		epxcr &= ~MV_U3D_EPXCR_EP_INIT;
  616 		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
  617 
  618 		epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
  619 		      | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
  620 		      | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
  621 		      | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
  622 		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
  623 	} else {
  624 		epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
  625 		epxcr |= MV_U3D_EPXCR_EP_INIT;
  626 		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
  627 		udelay(5);
  628 		epxcr &= ~MV_U3D_EPXCR_EP_INIT;
  629 		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
  630 
  631 		epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
  632 		      | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
  633 		      | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
  634 		      | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
  635 		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
  636 	}
  637 
  638 	return 0;
  639 en_done:
  640 	return -EINVAL;
  641 }
  642 
  643 static int  mv_u3d_ep_disable(struct usb_ep *_ep)
  644 {
  645 	struct mv_u3d *u3d;
  646 	struct mv_u3d_ep *ep;
  647 	u32 epxcr, direction;
  648 	unsigned long flags;
  649 
  650 	if (!_ep)
  651 		return -EINVAL;
  652 
  653 	ep = container_of(_ep, struct mv_u3d_ep, ep);
  654 	if (!ep->ep.desc)
  655 		return -EINVAL;
  656 
  657 	u3d = ep->u3d;
  658 
  659 	direction = mv_u3d_ep_dir(ep);
  660 
  661 	/* nuke all pending requests (does flush) */
  662 	spin_lock_irqsave(&u3d->lock, flags);
  663 	mv_u3d_nuke(ep, -ESHUTDOWN);
  664 	spin_unlock_irqrestore(&u3d->lock, flags);
  665 
  666 	/* Disable the endpoint for Rx or Tx and reset the endpoint type */
  667 	if (direction == MV_U3D_EP_DIR_OUT) {
  668 		epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
  669 		epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
  670 		      | USB_ENDPOINT_XFERTYPE_MASK);
  671 		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
  672 	} else {
  673 		epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
  674 		epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
  675 		      | USB_ENDPOINT_XFERTYPE_MASK);
  676 		iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
  677 	}
  678 
  679 	ep->enabled = 0;
  680 
  681 	ep->ep.desc = NULL;
  682 	return 0;
  683 }
  684 
  685 static struct usb_request *
  686 mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
  687 {
  688 	struct mv_u3d_req *req = NULL;
  689 
  690 	req = kzalloc(sizeof *req, gfp_flags);
  691 	if (!req)
  692 		return NULL;
  693 
  694 	INIT_LIST_HEAD(&req->queue);
  695 
  696 	return &req->req;
  697 }
  698 
  699 static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req)
  700 {
  701 	struct mv_u3d_req *req = container_of(_req, struct mv_u3d_req, req);
  702 
  703 	kfree(req);
  704 }
  705 
  706 static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep)
  707 {
  708 	struct mv_u3d *u3d;
  709 	u32 direction;
  710 	struct mv_u3d_ep *ep = container_of(_ep, struct mv_u3d_ep, ep);
  711 	unsigned int loops;
  712 	u32 tmp;
  713 
  714 	/* if endpoint is not enabled, cannot flush endpoint */
  715 	if (!ep->enabled)
  716 		return;
  717 
  718 	u3d = ep->u3d;
  719 	direction = mv_u3d_ep_dir(ep);
  720 
  721 	/* ep0 need clear bit after flushing fifo. */
  722 	if (!ep->ep_num) {
  723 		if (direction == MV_U3D_EP_DIR_OUT) {
  724 			tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
  725 			tmp |= MV_U3D_EPXCR_EP_FLUSH;
  726 			iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
  727 			udelay(10);
  728 			tmp &= ~MV_U3D_EPXCR_EP_FLUSH;
  729 			iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
  730 		} else {
  731 			tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
  732 			tmp |= MV_U3D_EPXCR_EP_FLUSH;
  733 			iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
  734 			udelay(10);
  735 			tmp &= ~MV_U3D_EPXCR_EP_FLUSH;
  736 			iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
  737 		}
  738 		return;
  739 	}
  740 
  741 	if (direction == MV_U3D_EP_DIR_OUT) {
  742 		tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
  743 		tmp |= MV_U3D_EPXCR_EP_FLUSH;
  744 		iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
  745 
  746 		/* Wait until flushing completed */
  747 		loops = LOOPS(MV_U3D_FLUSH_TIMEOUT);
  748 		while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0) &
  749 			MV_U3D_EPXCR_EP_FLUSH) {
  750 			/*
  751 			 * EP_FLUSH bit should be cleared to indicate this
  752 			 * operation is complete
  753 			 */
  754 			if (loops == 0) {
  755 				dev_dbg(u3d->dev,
  756 				    "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
  757 				    direction ? "in" : "out");
  758 				return;
  759 			}
  760 			loops--;
  761 			udelay(LOOPS_USEC);
  762 		}
  763 	} else {	/* EP_DIR_IN */
  764 		tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
  765 		tmp |= MV_U3D_EPXCR_EP_FLUSH;
  766 		iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
  767 
  768 		/* Wait until flushing completed */
  769 		loops = LOOPS(MV_U3D_FLUSH_TIMEOUT);
  770 		while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0) &
  771 			MV_U3D_EPXCR_EP_FLUSH) {
  772 			/*
  773 			* EP_FLUSH bit should be cleared to indicate this
  774 			* operation is complete
  775 			*/
  776 			if (loops == 0) {
  777 				dev_dbg(u3d->dev,
  778 				    "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
  779 				    direction ? "in" : "out");
  780 				return;
  781 			}
  782 			loops--;
  783 			udelay(LOOPS_USEC);
  784 		}
  785 	}
  786 }
  787 
  788 /* queues (submits) an I/O request to an endpoint */
  789 static int
  790 mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
  791 {
  792 	struct mv_u3d_ep *ep;
  793 	struct mv_u3d_req *req;
  794 	struct mv_u3d *u3d;
  795 	unsigned long flags;
  796 	int is_first_req = 0;
  797 
  798 	if (unlikely(!_ep || !_req))
  799 		return -EINVAL;
  800 
  801 	ep = container_of(_ep, struct mv_u3d_ep, ep);
  802 	u3d = ep->u3d;
  803 
  804 	req = container_of(_req, struct mv_u3d_req, req);
  805 
  806 	if (!ep->ep_num
  807 		&& u3d->ep0_state == MV_U3D_STATUS_STAGE
  808 		&& !_req->length) {
  809 		dev_dbg(u3d->dev, "ep0 status stage\n");
  810 		u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
  811 		return 0;
  812 	}
  813 
  814 	dev_dbg(u3d->dev, "%s: %s, req: 0x%p\n",
  815 			__func__, _ep->name, req);
  816 
  817 	/* catch various bogus parameters */
  818 	if (!req->req.complete || !req->req.buf
  819 			|| !list_empty(&req->queue)) {
  820 		dev_err(u3d->dev,
  821 			"%s, bad params, _req: 0x%p,"
  822 			"req->req.complete: 0x%p, req->req.buf: 0x%p,"
  823 			"list_empty: 0x%x\n",
  824 			__func__, _req,
  825 			req->req.complete, req->req.buf,
  826 			list_empty(&req->queue));
  827 		return -EINVAL;
  828 	}
  829 	if (unlikely(!ep->ep.desc)) {
  830 		dev_err(u3d->dev, "%s, bad ep\n", __func__);
  831 		return -EINVAL;
  832 	}
  833 	if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
  834 		if (req->req.length > ep->ep.maxpacket)
  835 			return -EMSGSIZE;
  836 	}
  837 
  838 	if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN) {
  839 		dev_err(u3d->dev,
  840 			"bad params of driver/speed\n");
  841 		return -ESHUTDOWN;
  842 	}
  843 
  844 	req->ep = ep;
  845 
  846 	/* Software list handles usb request. */
  847 	spin_lock_irqsave(&ep->req_lock, flags);
  848 	is_first_req = list_empty(&ep->req_list);
  849 	list_add_tail(&req->list, &ep->req_list);
  850 	spin_unlock_irqrestore(&ep->req_lock, flags);
  851 	if (!is_first_req) {
  852 		dev_dbg(u3d->dev, "list is not empty\n");
  853 		return 0;
  854 	}
  855 
  856 	dev_dbg(u3d->dev, "call mv_u3d_start_queue from usb_ep_queue\n");
  857 	spin_lock_irqsave(&u3d->lock, flags);
  858 	mv_u3d_start_queue(ep);
  859 	spin_unlock_irqrestore(&u3d->lock, flags);
  860 	return 0;
  861 }
  862 
  863 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
  864 static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  865 {
  866 	struct mv_u3d_ep *ep;
  867 	struct mv_u3d_req *req;
  868 	struct mv_u3d *u3d;
  869 	struct mv_u3d_ep_context *ep_context;
  870 	struct mv_u3d_req *next_req;
  871 
  872 	unsigned long flags;
  873 	int ret = 0;
  874 
  875 	if (!_ep || !_req)
  876 		return -EINVAL;
  877 
  878 	ep = container_of(_ep, struct mv_u3d_ep, ep);
  879 	u3d = ep->u3d;
  880 
  881 	spin_lock_irqsave(&ep->u3d->lock, flags);
  882 
  883 	/* make sure it's actually queued on this endpoint */
  884 	list_for_each_entry(req, &ep->queue, queue) {
  885 		if (&req->req == _req)
  886 			break;
  887 	}
  888 	if (&req->req != _req) {
  889 		ret = -EINVAL;
  890 		goto out;
  891 	}
  892 
  893 	/* The request is in progress, or completed but not dequeued */
  894 	if (ep->queue.next == &req->queue) {
  895 		_req->status = -ECONNRESET;
  896 		mv_u3d_ep_fifo_flush(_ep);
  897 
  898 		/* The request isn't the last request in this ep queue */
  899 		if (req->queue.next != &ep->queue) {
  900 			dev_dbg(u3d->dev,
  901 				"it is the last request in this ep queue\n");
  902 			ep_context = ep->ep_context;
  903 			next_req = list_entry(req->queue.next,
  904 					struct mv_u3d_req, queue);
  905 
  906 			/* Point first TRB of next request to the EP context. */
  907 			iowrite32((unsigned long) next_req->trb_head,
  908 					&ep_context->trb_addr_lo);
  909 		} else {
  910 			struct mv_u3d_ep_context *ep_context;
  911 			ep_context = ep->ep_context;
  912 			ep_context->trb_addr_lo = 0;
  913 			ep_context->trb_addr_hi = 0;
  914 		}
  915 
  916 	} else
  917 		WARN_ON(1);
  918 
  919 	mv_u3d_done(ep, req, -ECONNRESET);
  920 
  921 	/* remove the req from the ep req list */
  922 	if (!list_empty(&ep->req_list)) {
  923 		struct mv_u3d_req *curr_req;
  924 		curr_req = list_entry(ep->req_list.next,
  925 					struct mv_u3d_req, list);
  926 		if (curr_req == req) {
  927 			list_del_init(&req->list);
  928 			ep->processing = 0;
  929 		}
  930 	}
  931 
  932 out:
  933 	spin_unlock_irqrestore(&ep->u3d->lock, flags);
  934 	return ret;
  935 }
  936 
  937 static void
  938 mv_u3d_ep_set_stall(struct mv_u3d *u3d, u8 ep_num, u8 direction, int stall)
  939 {
  940 	u32 tmp;
  941 	struct mv_u3d_ep *ep = u3d->eps;
  942 
  943 	dev_dbg(u3d->dev, "%s\n", __func__);
  944 	if (direction == MV_U3D_EP_DIR_OUT) {
  945 		tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
  946 		if (stall)
  947 			tmp |= MV_U3D_EPXCR_EP_HALT;
  948 		else
  949 			tmp &= ~MV_U3D_EPXCR_EP_HALT;
  950 		iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
  951 	} else {
  952 		tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
  953 		if (stall)
  954 			tmp |= MV_U3D_EPXCR_EP_HALT;
  955 		else
  956 			tmp &= ~MV_U3D_EPXCR_EP_HALT;
  957 		iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
  958 	}
  959 }
  960 
  961 static int mv_u3d_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
  962 {
  963 	struct mv_u3d_ep *ep;
  964 	unsigned long flags = 0;
  965 	int status = 0;
  966 	struct mv_u3d *u3d;
  967 
  968 	ep = container_of(_ep, struct mv_u3d_ep, ep);
  969 	u3d = ep->u3d;
  970 	if (!ep->ep.desc) {
  971 		status = -EINVAL;
  972 		goto out;
  973 	}
  974 
  975 	if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
  976 		status = -EOPNOTSUPP;
  977 		goto out;
  978 	}
  979 
  980 	/*
  981 	 * Attempt to halt IN ep will fail if any transfer requests
  982 	 * are still queue
  983 	 */
  984 	if (halt && (mv_u3d_ep_dir(ep) == MV_U3D_EP_DIR_IN)
  985 			&& !list_empty(&ep->queue)) {
  986 		status = -EAGAIN;
  987 		goto out;
  988 	}
  989 
  990 	spin_lock_irqsave(&ep->u3d->lock, flags);
  991 	mv_u3d_ep_set_stall(u3d, ep->ep_num, mv_u3d_ep_dir(ep), halt);
  992 	if (halt && wedge)
  993 		ep->wedge = 1;
  994 	else if (!halt)
  995 		ep->wedge = 0;
  996 	spin_unlock_irqrestore(&ep->u3d->lock, flags);
  997 
  998 	if (ep->ep_num == 0)
  999 		u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
 1000 out:
 1001 	return status;
 1002 }
 1003 
 1004 static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt)
 1005 {
 1006 	return mv_u3d_ep_set_halt_wedge(_ep, halt, 0);
 1007 }
 1008 
 1009 static int mv_u3d_ep_set_wedge(struct usb_ep *_ep)
 1010 {
 1011 	return mv_u3d_ep_set_halt_wedge(_ep, 1, 1);
 1012 }
 1013 
 1014 static const struct usb_ep_ops mv_u3d_ep_ops = {
 1015 	.enable		= mv_u3d_ep_enable,
 1016 	.disable	= mv_u3d_ep_disable,
 1017 
 1018 	.alloc_request	= mv_u3d_alloc_request,
 1019 	.free_request	= mv_u3d_free_request,
 1020 
 1021 	.queue		= mv_u3d_ep_queue,
 1022 	.dequeue	= mv_u3d_ep_dequeue,
 1023 
 1024 	.set_wedge	= mv_u3d_ep_set_wedge,
 1025 	.set_halt	= mv_u3d_ep_set_halt,
 1026 	.fifo_flush	= mv_u3d_ep_fifo_flush,
 1027 };
 1028 
 1029 static void mv_u3d_controller_stop(struct mv_u3d *u3d)
 1030 {
 1031 	u32 tmp;
 1032 
 1033 	if (!u3d->clock_gating && u3d->vbus_valid_detect)
 1034 		iowrite32(MV_U3D_INTR_ENABLE_VBUS_VALID,
 1035 				&u3d->vuc_regs->intrenable);
 1036 	else
 1037 		iowrite32(0, &u3d->vuc_regs->intrenable);
 1038 	iowrite32(~0x0, &u3d->vuc_regs->endcomplete);
 1039 	iowrite32(~0x0, &u3d->vuc_regs->trbunderrun);
 1040 	iowrite32(~0x0, &u3d->vuc_regs->trbcomplete);
 1041 	iowrite32(~0x0, &u3d->vuc_regs->linkchange);
 1042 	iowrite32(0x1, &u3d->vuc_regs->setuplock);
 1043 
 1044 	/* Reset the RUN bit in the command register to stop USB */
 1045 	tmp = ioread32(&u3d->op_regs->usbcmd);
 1046 	tmp &= ~MV_U3D_CMD_RUN_STOP;
 1047 	iowrite32(tmp, &u3d->op_regs->usbcmd);
 1048 	dev_dbg(u3d->dev, "after u3d_stop, USBCMD 0x%x\n",
 1049 		ioread32(&u3d->op_regs->usbcmd));
 1050 }
 1051 
 1052 static void mv_u3d_controller_start(struct mv_u3d *u3d)
 1053 {
 1054 	u32 usbintr;
 1055 	u32 temp;
 1056 
 1057 	/* enable link LTSSM state machine */
 1058 	temp = ioread32(&u3d->vuc_regs->ltssm);
 1059 	temp |= MV_U3D_LTSSM_PHY_INIT_DONE;
 1060 	iowrite32(temp, &u3d->vuc_regs->ltssm);
 1061 
 1062 	/* Enable interrupts */
 1063 	usbintr = MV_U3D_INTR_ENABLE_LINK_CHG | MV_U3D_INTR_ENABLE_TXDESC_ERR |
 1064 		MV_U3D_INTR_ENABLE_RXDESC_ERR | MV_U3D_INTR_ENABLE_TX_COMPLETE |
 1065 		MV_U3D_INTR_ENABLE_RX_COMPLETE | MV_U3D_INTR_ENABLE_SETUP |
 1066 		(u3d->vbus_valid_detect ? MV_U3D_INTR_ENABLE_VBUS_VALID : 0);
 1067 	iowrite32(usbintr, &u3d->vuc_regs->intrenable);
 1068 
 1069 	/* Enable ctrl ep */
 1070 	iowrite32(0x1, &u3d->vuc_regs->ctrlepenable);
 1071 
 1072 	/* Set the Run bit in the command register */
 1073 	iowrite32(MV_U3D_CMD_RUN_STOP, &u3d->op_regs->usbcmd);
 1074 	dev_dbg(u3d->dev, "after u3d_start, USBCMD 0x%x\n",
 1075 		ioread32(&u3d->op_regs->usbcmd));
 1076 }
 1077 
 1078 static int mv_u3d_controller_reset(struct mv_u3d *u3d)
 1079 {
 1080 	unsigned int loops;
 1081 	u32 tmp;
 1082 
 1083 	/* Stop the controller */
 1084 	tmp = ioread32(&u3d->op_regs->usbcmd);
 1085 	tmp &= ~MV_U3D_CMD_RUN_STOP;
 1086 	iowrite32(tmp, &u3d->op_regs->usbcmd);
 1087 
 1088 	/* Reset the controller to get default values */
 1089 	iowrite32(MV_U3D_CMD_CTRL_RESET, &u3d->op_regs->usbcmd);
 1090 
 1091 	/* wait for reset to complete */
 1092 	loops = LOOPS(MV_U3D_RESET_TIMEOUT);
 1093 	while (ioread32(&u3d->op_regs->usbcmd) & MV_U3D_CMD_CTRL_RESET) {
 1094 		if (loops == 0) {
 1095 			dev_err(u3d->dev,
 1096 				"Wait for RESET completed TIMEOUT\n");
 1097 			return -ETIMEDOUT;
 1098 		}
 1099 		loops--;
 1100 		udelay(LOOPS_USEC);
 1101 	}
 1102 
 1103 	/* Configure the Endpoint Context Address */
 1104 	iowrite32(u3d->ep_context_dma, &u3d->op_regs->dcbaapl);
 1105 	iowrite32(0, &u3d->op_regs->dcbaaph);
 1106 
 1107 	return 0;
 1108 }
 1109 
 1110 static int mv_u3d_enable(struct mv_u3d *u3d)
 1111 {
 1112 	struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
 1113 	int retval;
 1114 
 1115 	if (u3d->active)
 1116 		return 0;
 1117 
 1118 	if (!u3d->clock_gating) {
 1119 		u3d->active = 1;
 1120 		return 0;
 1121 	}
 1122 
 1123 	dev_dbg(u3d->dev, "enable u3d\n");
 1124 	clk_enable(u3d->clk);
 1125 	if (pdata->phy_init) {
 1126 		retval = pdata->phy_init(u3d->phy_regs);
 1127 		if (retval) {
 1128 			dev_err(u3d->dev,
 1129 				"init phy error %d\n", retval);
 1130 			clk_disable(u3d->clk);
 1131 			return retval;
 1132 		}
 1133 	}
 1134 	u3d->active = 1;
 1135 
 1136 	return 0;
 1137 }
 1138 
 1139 static void mv_u3d_disable(struct mv_u3d *u3d)
 1140 {
 1141 	struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
 1142 	if (u3d->clock_gating && u3d->active) {
 1143 		dev_dbg(u3d->dev, "disable u3d\n");
 1144 		if (pdata->phy_deinit)
 1145 			pdata->phy_deinit(u3d->phy_regs);
 1146 		clk_disable(u3d->clk);
 1147 		u3d->active = 0;
 1148 	}
 1149 }
 1150 
 1151 static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)
 1152 {
 1153 	struct mv_u3d *u3d;
 1154 	unsigned long flags;
 1155 	int retval = 0;
 1156 
 1157 	u3d = container_of(gadget, struct mv_u3d, gadget);
 1158 
 1159 	spin_lock_irqsave(&u3d->lock, flags);
 1160 
 1161 	u3d->vbus_active = (is_active != 0);
 1162 	dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
 1163 		__func__, u3d->softconnect, u3d->vbus_active);
 1164 	/*
 1165 	 * 1. external VBUS detect: we can disable/enable clock on demand.
 1166 	 * 2. UDC VBUS detect: we have to enable clock all the time.
 1167 	 * 3. No VBUS detect: we have to enable clock all the time.
 1168 	 */
 1169 	if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
 1170 		retval = mv_u3d_enable(u3d);
 1171 		if (retval == 0) {
 1172 			/*
 1173 			 * after clock is disabled, we lost all the register
 1174 			 *  context. We have to re-init registers
 1175 			 */
 1176 			mv_u3d_controller_reset(u3d);
 1177 			mv_u3d_ep0_reset(u3d);
 1178 			mv_u3d_controller_start(u3d);
 1179 		}
 1180 	} else if (u3d->driver && u3d->softconnect) {
 1181 		if (!u3d->active)
 1182 			goto out;
 1183 
 1184 		/* stop all the transfer in queue*/
 1185 		mv_u3d_stop_activity(u3d, u3d->driver);
 1186 		mv_u3d_controller_stop(u3d);
 1187 		mv_u3d_disable(u3d);
 1188 	}
 1189 
 1190 out:
 1191 	spin_unlock_irqrestore(&u3d->lock, flags);
 1192 	return retval;
 1193 }
 1194 
 1195 /* constrain controller's VBUS power usage
 1196  * This call is used by gadget drivers during SET_CONFIGURATION calls,
 1197  * reporting how much power the device may consume.  For example, this
 1198  * could affect how quickly batteries are recharged.
 1199  *
 1200  * Returns zero on success, else negative errno.
 1201  */
 1202 static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)
 1203 {
 1204 	struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
 1205 
 1206 	u3d->power = mA;
 1207 
 1208 	return 0;
 1209 }
 1210 
 1211 static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)
 1212 {
 1213 	struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
 1214 	unsigned long flags;
 1215 	int retval = 0;
 1216 
 1217 	spin_lock_irqsave(&u3d->lock, flags);
 1218 
 1219 	dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
 1220 		__func__, u3d->softconnect, u3d->vbus_active);
 1221 	u3d->softconnect = (is_on != 0);
 1222 	if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
 1223 		retval = mv_u3d_enable(u3d);
 1224 		if (retval == 0) {
 1225 			/*
 1226 			 * after clock is disabled, we lost all the register
 1227 			 *  context. We have to re-init registers
 1228 			 */
 1229 			mv_u3d_controller_reset(u3d);
 1230 			mv_u3d_ep0_reset(u3d);
 1231 			mv_u3d_controller_start(u3d);
 1232 		}
 1233 	} else if (u3d->driver && u3d->vbus_active) {
 1234 		/* stop all the transfer in queue*/
 1235 		mv_u3d_stop_activity(u3d, u3d->driver);
 1236 		mv_u3d_controller_stop(u3d);
 1237 		mv_u3d_disable(u3d);
 1238 	}
 1239 
 1240 	spin_unlock_irqrestore(&u3d->lock, flags);
 1241 
 1242 	return retval;
 1243 }
 1244 
 1245 static int mv_u3d_start(struct usb_gadget *g,
 1246 		struct usb_gadget_driver *driver)
 1247 {
 1248 	struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
 1249 	struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
 1250 	unsigned long flags;
 1251 
 1252 	if (u3d->driver)
 1253 		return -EBUSY;
 1254 
 1255 	spin_lock_irqsave(&u3d->lock, flags);
 1256 
 1257 	if (!u3d->clock_gating) {
 1258 		clk_enable(u3d->clk);
 1259 		if (pdata->phy_init)
 1260 			pdata->phy_init(u3d->phy_regs);
 1261 	}
 1262 
 1263 	/* hook up the driver ... */
 1264 	driver->driver.bus = NULL;
 1265 	u3d->driver = driver;
 1266 
 1267 	u3d->ep0_dir = USB_DIR_OUT;
 1268 
 1269 	spin_unlock_irqrestore(&u3d->lock, flags);
 1270 
 1271 	u3d->vbus_valid_detect = 1;
 1272 
 1273 	return 0;
 1274 }
 1275 
 1276 static int mv_u3d_stop(struct usb_gadget *g)
 1277 {
 1278 	struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
 1279 	struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
 1280 	unsigned long flags;
 1281 
 1282 	u3d->vbus_valid_detect = 0;
 1283 	spin_lock_irqsave(&u3d->lock, flags);
 1284 
 1285 	/* enable clock to access controller register */
 1286 	clk_enable(u3d->clk);
 1287 	if (pdata->phy_init)
 1288 		pdata->phy_init(u3d->phy_regs);
 1289 
 1290 	mv_u3d_controller_stop(u3d);
 1291 	/* stop all usb activities */
 1292 	u3d->gadget.speed = USB_SPEED_UNKNOWN;
 1293 	mv_u3d_stop_activity(u3d, NULL);
 1294 	mv_u3d_disable(u3d);
 1295 
 1296 	if (pdata->phy_deinit)
 1297 		pdata->phy_deinit(u3d->phy_regs);
 1298 	clk_disable(u3d->clk);
 1299 
 1300 	spin_unlock_irqrestore(&u3d->lock, flags);
 1301 
 1302 	u3d->driver = NULL;
 1303 
 1304 	return 0;
 1305 }
 1306 
 1307 /* device controller usb_gadget_ops structure */
 1308 static const struct usb_gadget_ops mv_u3d_ops = {
 1309 	/* notify controller that VBUS is powered or not */
 1310 	.vbus_session	= mv_u3d_vbus_session,
 1311 
 1312 	/* constrain controller's VBUS power usage */
 1313 	.vbus_draw	= mv_u3d_vbus_draw,
 1314 
 1315 	.pullup		= mv_u3d_pullup,
 1316 	.udc_start	= mv_u3d_start,
 1317 	.udc_stop	= mv_u3d_stop,
 1318 };
 1319 
 1320 static int mv_u3d_eps_init(struct mv_u3d *u3d)
 1321 {
 1322 	struct mv_u3d_ep	*ep;
 1323 	char name[14];
 1324 	int i;
 1325 
 1326 	/* initialize ep0, ep0 in/out use eps[1] */
 1327 	ep = &u3d->eps[1];
 1328 	ep->u3d = u3d;
 1329 	strncpy(ep->name, "ep0", sizeof(ep->name));
 1330 	ep->ep.name = ep->name;
 1331 	ep->ep.ops = &mv_u3d_ep_ops;
 1332 	ep->wedge = 0;
 1333 	usb_ep_set_maxpacket_limit(&ep->ep, MV_U3D_EP0_MAX_PKT_SIZE);
 1334 	ep->ep.caps.type_control = true;
 1335 	ep->ep.caps.dir_in = true;
 1336 	ep->ep.caps.dir_out = true;
 1337 	ep->ep_num = 0;
 1338 	ep->ep.desc = &mv_u3d_ep0_desc;
 1339 	INIT_LIST_HEAD(&ep->queue);
 1340 	INIT_LIST_HEAD(&ep->req_list);
 1341 	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
 1342 
 1343 	/* add ep0 ep_context */
 1344 	ep->ep_context = &u3d->ep_context[1];
 1345 
 1346 	/* initialize other endpoints */
 1347 	for (i = 2; i < u3d->max_eps * 2; i++) {
 1348 		ep = &u3d->eps[i];
 1349 		if (i & 1) {
 1350 			snprintf(name, sizeof(name), "ep%din", i >> 1);
 1351 			ep->direction = MV_U3D_EP_DIR_IN;
 1352 			ep->ep.caps.dir_in = true;
 1353 		} else {
 1354 			snprintf(name, sizeof(name), "ep%dout", i >> 1);
 1355 			ep->direction = MV_U3D_EP_DIR_OUT;
 1356 			ep->ep.caps.dir_out = true;
 1357 		}
 1358 		ep->u3d = u3d;
 1359 		strncpy(ep->name, name, sizeof(ep->name));
 1360 		ep->ep.name = ep->name;
 1361 
 1362 		ep->ep.caps.type_iso = true;
 1363 		ep->ep.caps.type_bulk = true;
 1364 		ep->ep.caps.type_int = true;
 1365 
 1366 		ep->ep.ops = &mv_u3d_ep_ops;
 1367 		usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
 1368 		ep->ep_num = i / 2;
 1369 
 1370 		INIT_LIST_HEAD(&ep->queue);
 1371 		list_add_tail(&ep->ep.ep_list, &u3d->gadget.ep_list);
 1372 
 1373 		INIT_LIST_HEAD(&ep->req_list);
 1374 		spin_lock_init(&ep->req_lock);
 1375 		ep->ep_context = &u3d->ep_context[i];
 1376 	}
 1377 
 1378 	return 0;
 1379 }
 1380 
 1381 /* delete all endpoint requests, called with spinlock held */
 1382 static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status)
 1383 {
 1384 	/* endpoint fifo flush */
 1385 	mv_u3d_ep_fifo_flush(&ep->ep);
 1386 
 1387 	while (!list_empty(&ep->queue)) {
 1388 		struct mv_u3d_req *req = NULL;
 1389 		req = list_entry(ep->queue.next, struct mv_u3d_req, queue);
 1390 		mv_u3d_done(ep, req, status);
 1391 	}
 1392 }
 1393 
 1394 /* stop all USB activities */
 1395 static
 1396 void mv_u3d_stop_activity(struct mv_u3d *u3d, struct usb_gadget_driver *driver)
 1397 {
 1398 	struct mv_u3d_ep	*ep;
 1399 
 1400 	mv_u3d_nuke(&u3d->eps[1], -ESHUTDOWN);
 1401 
 1402 	list_for_each_entry(ep, &u3d->gadget.ep_list, ep.ep_list) {
 1403 		mv_u3d_nuke(ep, -ESHUTDOWN);
 1404 	}
 1405 
 1406 	/* report disconnect; the driver is already quiesced */
 1407 	if (driver) {
 1408 		spin_unlock(&u3d->lock);
 1409 		driver->disconnect(&u3d->gadget);
 1410 		spin_lock(&u3d->lock);
 1411 	}
 1412 }
 1413 
 1414 static void mv_u3d_irq_process_error(struct mv_u3d *u3d)
 1415 {
 1416 	/* Increment the error count */
 1417 	u3d->errors++;
 1418 	dev_err(u3d->dev, "%s\n", __func__);
 1419 }
 1420 
 1421 static void mv_u3d_irq_process_link_change(struct mv_u3d *u3d)
 1422 {
 1423 	u32 linkchange;
 1424 
 1425 	linkchange = ioread32(&u3d->vuc_regs->linkchange);
 1426 	iowrite32(linkchange, &u3d->vuc_regs->linkchange);
 1427 
 1428 	dev_dbg(u3d->dev, "linkchange: 0x%x\n", linkchange);
 1429 
 1430 	if (linkchange & MV_U3D_LINK_CHANGE_LINK_UP) {
 1431 		dev_dbg(u3d->dev, "link up: ltssm state: 0x%x\n",
 1432 			ioread32(&u3d->vuc_regs->ltssmstate));
 1433 
 1434 		u3d->usb_state = USB_STATE_DEFAULT;
 1435 		u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
 1436 		u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
 1437 
 1438 		/* set speed */
 1439 		u3d->gadget.speed = USB_SPEED_SUPER;
 1440 	}
 1441 
 1442 	if (linkchange & MV_U3D_LINK_CHANGE_SUSPEND) {
 1443 		dev_dbg(u3d->dev, "link suspend\n");
 1444 		u3d->resume_state = u3d->usb_state;
 1445 		u3d->usb_state = USB_STATE_SUSPENDED;
 1446 	}
 1447 
 1448 	if (linkchange & MV_U3D_LINK_CHANGE_RESUME) {
 1449 		dev_dbg(u3d->dev, "link resume\n");
 1450 		u3d->usb_state = u3d->resume_state;
 1451 		u3d->resume_state = 0;
 1452 	}
 1453 
 1454 	if (linkchange & MV_U3D_LINK_CHANGE_WRESET) {
 1455 		dev_dbg(u3d->dev, "warm reset\n");
 1456 		u3d->usb_state = USB_STATE_POWERED;
 1457 	}
 1458 
 1459 	if (linkchange & MV_U3D_LINK_CHANGE_HRESET) {
 1460 		dev_dbg(u3d->dev, "hot reset\n");
 1461 		u3d->usb_state = USB_STATE_DEFAULT;
 1462 	}
 1463 
 1464 	if (linkchange & MV_U3D_LINK_CHANGE_INACT)
 1465 		dev_dbg(u3d->dev, "inactive\n");
 1466 
 1467 	if (linkchange & MV_U3D_LINK_CHANGE_DISABLE_AFTER_U0)
 1468 		dev_dbg(u3d->dev, "ss.disabled\n");
 1469 
 1470 	if (linkchange & MV_U3D_LINK_CHANGE_VBUS_INVALID) {
 1471 		dev_dbg(u3d->dev, "vbus invalid\n");
 1472 		u3d->usb_state = USB_STATE_ATTACHED;
 1473 		u3d->vbus_valid_detect = 1;
 1474 		/* if external vbus detect is not supported,
 1475 		 * we handle it here.
 1476 		 */
 1477 		if (!u3d->vbus) {
 1478 			spin_unlock(&u3d->lock);
 1479 			mv_u3d_vbus_session(&u3d->gadget, 0);
 1480 			spin_lock(&u3d->lock);
 1481 		}
 1482 	}
 1483 }
 1484 
 1485 static void mv_u3d_ch9setaddress(struct mv_u3d *u3d,
 1486 				struct usb_ctrlrequest *setup)
 1487 {
 1488 	u32 tmp;
 1489 
 1490 	if (u3d->usb_state != USB_STATE_DEFAULT) {
 1491 		dev_err(u3d->dev,
 1492 			"%s, cannot setaddr in this state (%d)\n",
 1493 			__func__, u3d->usb_state);
 1494 		goto err;
 1495 	}
 1496 
 1497 	u3d->dev_addr = (u8)setup->wValue;
 1498 
 1499 	dev_dbg(u3d->dev, "%s: 0x%x\n", __func__, u3d->dev_addr);
 1500 
 1501 	if (u3d->dev_addr > 127) {
 1502 		dev_err(u3d->dev,
 1503 			"%s, u3d address is wrong (out of range)\n", __func__);
 1504 		u3d->dev_addr = 0;
 1505 		goto err;
 1506 	}
 1507 
 1508 	/* update usb state */
 1509 	u3d->usb_state = USB_STATE_ADDRESS;
 1510 
 1511 	/* set the new address */
 1512 	tmp = ioread32(&u3d->vuc_regs->devaddrtiebrkr);
 1513 	tmp &= ~0x7F;
 1514 	tmp |= (u32)u3d->dev_addr;
 1515 	iowrite32(tmp, &u3d->vuc_regs->devaddrtiebrkr);
 1516 
 1517 	return;
 1518 err:
 1519 	mv_u3d_ep0_stall(u3d);
 1520 }
 1521 
 1522 static int mv_u3d_is_set_configuration(struct usb_ctrlrequest *setup)
 1523 {
 1524 	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
 1525 		if (setup->bRequest == USB_REQ_SET_CONFIGURATION)
 1526 			return 1;
 1527 
 1528 	return 0;
 1529 }
 1530 
 1531 static void mv_u3d_handle_setup_packet(struct mv_u3d *u3d, u8 ep_num,
 1532 	struct usb_ctrlrequest *setup)
 1533 	__releases(&u3c->lock)
 1534 	__acquires(&u3c->lock)
 1535 {
 1536 	bool delegate = false;
 1537 
 1538 	mv_u3d_nuke(&u3d->eps[ep_num * 2 + MV_U3D_EP_DIR_IN], -ESHUTDOWN);
 1539 
 1540 	dev_dbg(u3d->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
 1541 			setup->bRequestType, setup->bRequest,
 1542 			setup->wValue, setup->wIndex, setup->wLength);
 1543 
 1544 	/* We process some stardard setup requests here */
 1545 	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
 1546 		switch (setup->bRequest) {
 1547 		case USB_REQ_GET_STATUS:
 1548 			delegate = true;
 1549 			break;
 1550 
 1551 		case USB_REQ_SET_ADDRESS:
 1552 			mv_u3d_ch9setaddress(u3d, setup);
 1553 			break;
 1554 
 1555 		case USB_REQ_CLEAR_FEATURE:
 1556 			delegate = true;
 1557 			break;
 1558 
 1559 		case USB_REQ_SET_FEATURE:
 1560 			delegate = true;
 1561 			break;
 1562 
 1563 		default:
 1564 			delegate = true;
 1565 		}
 1566 	} else
 1567 		delegate = true;
 1568 
 1569 	/* delegate USB standard requests to the gadget driver */
 1570 	if (delegate == true) {
 1571 		/* USB requests handled by gadget */
 1572 		if (setup->wLength) {
 1573 			/* DATA phase from gadget, STATUS phase from u3d */
 1574 			u3d->ep0_dir = (setup->bRequestType & USB_DIR_IN)
 1575 					? MV_U3D_EP_DIR_IN : MV_U3D_EP_DIR_OUT;
 1576 			spin_unlock(&u3d->lock);
 1577 			if (u3d->driver->setup(&u3d->gadget,
 1578 				&u3d->local_setup_buff) < 0) {
 1579 				dev_err(u3d->dev, "setup error!\n");
 1580 				mv_u3d_ep0_stall(u3d);
 1581 			}
 1582 			spin_lock(&u3d->lock);
 1583 		} else {
 1584 			/* no DATA phase, STATUS phase from gadget */
 1585 			u3d->ep0_dir = MV_U3D_EP_DIR_IN;
 1586 			u3d->ep0_state = MV_U3D_STATUS_STAGE;
 1587 			spin_unlock(&u3d->lock);
 1588 			if (u3d->driver->setup(&u3d->gadget,
 1589 				&u3d->local_setup_buff) < 0)
 1590 				mv_u3d_ep0_stall(u3d);
 1591 			spin_lock(&u3d->lock);
 1592 		}
 1593 
 1594 		if (mv_u3d_is_set_configuration(setup)) {
 1595 			dev_dbg(u3d->dev, "u3d configured\n");
 1596 			u3d->usb_state = USB_STATE_CONFIGURED;
 1597 		}
 1598 	}
 1599 }
 1600 
 1601 static void mv_u3d_get_setup_data(struct mv_u3d *u3d, u8 ep_num, u8 *buffer_ptr)
 1602 {
 1603 	struct mv_u3d_ep_context *epcontext;
 1604 
 1605 	epcontext = &u3d->ep_context[ep_num * 2 + MV_U3D_EP_DIR_IN];
 1606 
 1607 	/* Copy the setup packet to local buffer */
 1608 	memcpy(buffer_ptr, (u8 *) &epcontext->setup_buffer, 8);
 1609 }
 1610 
 1611 static void mv_u3d_irq_process_setup(struct mv_u3d *u3d)
 1612 {
 1613 	u32 tmp, i;
 1614 	/* Process all Setup packet received interrupts */
 1615 	tmp = ioread32(&u3d->vuc_regs->setuplock);
 1616 	if (tmp) {
 1617 		for (i = 0; i < u3d->max_eps; i++) {
 1618 			if (tmp & (1 << i)) {
 1619 				mv_u3d_get_setup_data(u3d, i,
 1620 					(u8 *)(&u3d->local_setup_buff));
 1621 				mv_u3d_handle_setup_packet(u3d, i,
 1622 					&u3d->local_setup_buff);
 1623 			}
 1624 		}
 1625 	}
 1626 
 1627 	iowrite32(tmp, &u3d->vuc_regs->setuplock);
 1628 }
 1629 
 1630 static void mv_u3d_irq_process_tr_complete(struct mv_u3d *u3d)
 1631 {
 1632 	u32 tmp, bit_pos;
 1633 	int i, ep_num = 0, direction = 0;
 1634 	struct mv_u3d_ep	*curr_ep;
 1635 	struct mv_u3d_req *curr_req, *temp_req;
 1636 	int status;
 1637 
 1638 	tmp = ioread32(&u3d->vuc_regs->endcomplete);
 1639 
 1640 	dev_dbg(u3d->dev, "tr_complete: ep: 0x%x\n", tmp);
 1641 	if (!tmp)
 1642 		return;
 1643 	iowrite32(tmp, &u3d->vuc_regs->endcomplete);
 1644 
 1645 	for (i = 0; i < u3d->max_eps * 2; i++) {
 1646 		ep_num = i >> 1;
 1647 		direction = i % 2;
 1648 
 1649 		bit_pos = 1 << (ep_num + 16 * direction);
 1650 
 1651 		if (!(bit_pos & tmp))
 1652 			continue;
 1653 
 1654 		if (i == 0)
 1655 			curr_ep = &u3d->eps[1];
 1656 		else
 1657 			curr_ep = &u3d->eps[i];
 1658 
 1659 		/* remove req out of ep request list after completion */
 1660 		dev_dbg(u3d->dev, "tr comp: check req_list\n");
 1661 		spin_lock(&curr_ep->req_lock);
 1662 		if (!list_empty(&curr_ep->req_list)) {
 1663 			struct mv_u3d_req *req;
 1664 			req = list_entry(curr_ep->req_list.next,
 1665 						struct mv_u3d_req, list);
 1666 			list_del_init(&req->list);
 1667 			curr_ep->processing = 0;
 1668 		}
 1669 		spin_unlock(&curr_ep->req_lock);
 1670 
 1671 		/* process the req queue until an uncomplete request */
 1672 		list_for_each_entry_safe(curr_req, temp_req,
 1673 			&curr_ep->queue, queue) {
 1674 			status = mv_u3d_process_ep_req(u3d, i, curr_req);
 1675 			if (status)
 1676 				break;
 1677 			/* write back status to req */
 1678 			curr_req->req.status = status;
 1679 
 1680 			/* ep0 request completion */
 1681 			if (ep_num == 0) {
 1682 				mv_u3d_done(curr_ep, curr_req, 0);
 1683 				break;
 1684 			} else {
 1685 				mv_u3d_done(curr_ep, curr_req, status);
 1686 			}
 1687 		}
 1688 
 1689 		dev_dbg(u3d->dev, "call mv_u3d_start_queue from ep complete\n");
 1690 		mv_u3d_start_queue(curr_ep);
 1691 	}
 1692 }
 1693 
 1694 static irqreturn_t mv_u3d_irq(int irq, void *dev)
 1695 {
 1696 	struct mv_u3d *u3d = (struct mv_u3d *)dev;
 1697 	u32 status, intr;
 1698 	u32 bridgesetting;
 1699 	u32 trbunderrun;
 1700 
 1701 	spin_lock(&u3d->lock);
 1702 
 1703 	status = ioread32(&u3d->vuc_regs->intrcause);
 1704 	intr = ioread32(&u3d->vuc_regs->intrenable);
 1705 	status &= intr;
 1706 
 1707 	if (status == 0) {
 1708 		spin_unlock(&u3d->lock);
 1709 		dev_err(u3d->dev, "irq error!\n");
 1710 		return IRQ_NONE;
 1711 	}
 1712 
 1713 	if (status & MV_U3D_USBINT_VBUS_VALID) {
 1714 		bridgesetting = ioread32(&u3d->vuc_regs->bridgesetting);
 1715 		if (bridgesetting & MV_U3D_BRIDGE_SETTING_VBUS_VALID) {
 1716 			/* write vbus valid bit of bridge setting to clear */
 1717 			bridgesetting = MV_U3D_BRIDGE_SETTING_VBUS_VALID;
 1718 			iowrite32(bridgesetting, &u3d->vuc_regs->bridgesetting);
 1719 			dev_dbg(u3d->dev, "vbus valid\n");
 1720 
 1721 			u3d->usb_state = USB_STATE_POWERED;
 1722 			u3d->vbus_valid_detect = 0;
 1723 			/* if external vbus detect is not supported,
 1724 			 * we handle it here.
 1725 			 */
 1726 			if (!u3d->vbus) {
 1727 				spin_unlock(&u3d->lock);
 1728 				mv_u3d_vbus_session(&u3d->gadget, 1);
 1729 				spin_lock(&u3d->lock);
 1730 			}
 1731 		} else
 1732 			dev_err(u3d->dev, "vbus bit is not set\n");
 1733 	}
 1734 
 1735 	/* RX data is already in the 16KB FIFO.*/
 1736 	if (status & MV_U3D_USBINT_UNDER_RUN) {
 1737 		trbunderrun = ioread32(&u3d->vuc_regs->trbunderrun);
 1738 		dev_err(u3d->dev, "under run, ep%d\n", trbunderrun);
 1739 		iowrite32(trbunderrun, &u3d->vuc_regs->trbunderrun);
 1740 		mv_u3d_irq_process_error(u3d);
 1741 	}
 1742 
 1743 	if (status & (MV_U3D_USBINT_RXDESC_ERR | MV_U3D_USBINT_TXDESC_ERR)) {
 1744 		/* write one to clear */
 1745 		iowrite32(status & (MV_U3D_USBINT_RXDESC_ERR
 1746 			| MV_U3D_USBINT_TXDESC_ERR),
 1747 			&u3d->vuc_regs->intrcause);
 1748 		dev_err(u3d->dev, "desc err 0x%x\n", status);
 1749 		mv_u3d_irq_process_error(u3d);
 1750 	}
 1751 
 1752 	if (status & MV_U3D_USBINT_LINK_CHG)
 1753 		mv_u3d_irq_process_link_change(u3d);
 1754 
 1755 	if (status & MV_U3D_USBINT_TX_COMPLETE)
 1756 		mv_u3d_irq_process_tr_complete(u3d);
 1757 
 1758 	if (status & MV_U3D_USBINT_RX_COMPLETE)
 1759 		mv_u3d_irq_process_tr_complete(u3d);
 1760 
 1761 	if (status & MV_U3D_USBINT_SETUP)
 1762 		mv_u3d_irq_process_setup(u3d);
 1763 
 1764 	spin_unlock(&u3d->lock);
 1765 	return IRQ_HANDLED;
 1766 }
 1767 
 1768 static int mv_u3d_remove(struct platform_device *dev)
 1769 {
 1770 	struct mv_u3d *u3d = platform_get_drvdata(dev);
 1771 
 1772 	BUG_ON(u3d == NULL);
 1773 
 1774 	usb_del_gadget_udc(&u3d->gadget);
 1775 
 1776 	/* free memory allocated in probe */
 1777 	dma_pool_destroy(u3d->trb_pool);
 1778 
 1779 	if (u3d->ep_context)
 1780 		dma_free_coherent(&dev->dev, u3d->ep_context_size,
 1781 			u3d->ep_context, u3d->ep_context_dma);
 1782 
 1783 	kfree(u3d->eps);
 1784 
 1785 	if (u3d->irq)
 1786 		free_irq(u3d->irq, u3d);
 1787 
 1788 	if (u3d->cap_regs)
 1789 		iounmap(u3d->cap_regs);
 1790 	u3d->cap_regs = NULL;
 1791 
 1792 	kfree(u3d->status_req);
 1793 
 1794 	clk_put(u3d->clk);
 1795 
 1796 	kfree(u3d);
 1797 
 1798 	return 0;
 1799 }
 1800 
 1801 static int mv_u3d_probe(struct platform_device *dev)
 1802 {
 1803 	struct mv_u3d *u3d = NULL;
 1804 	struct mv_usb_platform_data *pdata = dev_get_platdata(&dev->dev);
 1805 	int retval = 0;
 1806 	struct resource *r;
 1807 	size_t size;
 1808 
 1809 	if (!dev_get_platdata(&dev->dev)) {
 1810 		dev_err(&dev->dev, "missing platform_data\n");
 1811 		retval = -ENODEV;
 1812 		goto err_pdata;
 1813 	}
 1814 
 1815 	u3d = kzalloc(sizeof(*u3d), GFP_KERNEL);
 1816 	if (!u3d) {
 1817 		retval = -ENOMEM;
 1818 		goto err_alloc_private;
 1819 	}
 1820 
 1821 	spin_lock_init(&u3d->lock);
 1822 
 1823 	platform_set_drvdata(dev, u3d);
 1824 
 1825 	u3d->dev = &dev->dev;
 1826 	u3d->vbus = pdata->vbus;
 1827 
 1828 	u3d->clk = clk_get(&dev->dev, NULL);
 1829 	if (IS_ERR(u3d->clk)) {
 1830 		retval = PTR_ERR(u3d->clk);
 1831 		goto err_get_clk;
 1832 	}
 1833 
 1834 	r = platform_get_resource_byname(dev, IORESOURCE_MEM, "capregs");
 1835 	if (!r) {
 1836 		dev_err(&dev->dev, "no I/O memory resource defined\n");
 1837 		retval = -ENODEV;
 1838 		goto err_get_cap_regs;
 1839 	}
 1840 
 1841 	u3d->cap_regs = (struct mv_u3d_cap_regs __iomem *)
 1842 		ioremap(r->start, resource_size(r));
 1843 	if (!u3d->cap_regs) {
 1844 		dev_err(&dev->dev, "failed to map I/O memory\n");
 1845 		retval = -EBUSY;
 1846 		goto err_map_cap_regs;
 1847 	} else {
 1848 		dev_dbg(&dev->dev, "cap_regs address: 0x%lx/0x%lx\n",
 1849 			(unsigned long) r->start,
 1850 			(unsigned long) u3d->cap_regs);
 1851 	}
 1852 
 1853 	/* we will access controller register, so enable the u3d controller */
 1854 	clk_enable(u3d->clk);
 1855 
 1856 	if (pdata->phy_init) {
 1857 		retval = pdata->phy_init(u3d->phy_regs);
 1858 		if (retval) {
 1859 			dev_err(&dev->dev, "init phy error %d\n", retval);
 1860 			goto err_u3d_enable;
 1861 		}
 1862 	}
 1863 
 1864 	u3d->op_regs = (struct mv_u3d_op_regs __iomem *)(u3d->cap_regs
 1865 		+ MV_U3D_USB3_OP_REGS_OFFSET);
 1866 
 1867 	u3d->vuc_regs = (struct mv_u3d_vuc_regs __iomem *)(u3d->cap_regs
 1868 		+ ioread32(&u3d->cap_regs->vuoff));
 1869 
 1870 	u3d->max_eps = 16;
 1871 
 1872 	/*
 1873 	 * some platform will use usb to download image, it may not disconnect
 1874 	 * usb gadget before loading kernel. So first stop u3d here.
 1875 	 */
 1876 	mv_u3d_controller_stop(u3d);
 1877 	iowrite32(0xFFFFFFFF, &u3d->vuc_regs->intrcause);
 1878 
 1879 	if (pdata->phy_deinit)
 1880 		pdata->phy_deinit(u3d->phy_regs);
 1881 	clk_disable(u3d->clk);
 1882 
 1883 	size = u3d->max_eps * sizeof(struct mv_u3d_ep_context) * 2;
 1884 	size = (size + MV_U3D_EP_CONTEXT_ALIGNMENT - 1)
 1885 		& ~(MV_U3D_EP_CONTEXT_ALIGNMENT - 1);
 1886 	u3d->ep_context = dma_alloc_coherent(&dev->dev, size,
 1887 					&u3d->ep_context_dma, GFP_KERNEL);
 1888 	if (!u3d->ep_context) {
 1889 		dev_err(&dev->dev, "allocate ep context memory failed\n");
 1890 		retval = -ENOMEM;
 1891 		goto err_alloc_ep_context;
 1892 	}
 1893 	u3d->ep_context_size = size;
 1894 
 1895 	/* create TRB dma_pool resource */
 1896 	u3d->trb_pool = dma_pool_create("u3d_trb",
 1897 			&dev->dev,
 1898 			sizeof(struct mv_u3d_trb_hw),
 1899 			MV_U3D_TRB_ALIGNMENT,
 1900 			MV_U3D_DMA_BOUNDARY);
 1901 
 1902 	if (!u3d->trb_pool) {
 1903 		retval = -ENOMEM;
 1904 		goto err_alloc_trb_pool;
 1905 	}
 1906 
 1907 	size = u3d->max_eps * sizeof(struct mv_u3d_ep) * 2;
 1908 	u3d->eps = kzalloc(size, GFP_KERNEL);
 1909 	if (!u3d->eps) {
 1910 		retval = -ENOMEM;
 1911 		goto err_alloc_eps;
 1912 	}
 1913 
 1914 	/* initialize ep0 status request structure */
 1915 	u3d->status_req = kzalloc(sizeof(struct mv_u3d_req) + 8, GFP_KERNEL);
 1916 	if (!u3d->status_req) {
 1917 		retval = -ENOMEM;
 1918 		goto err_alloc_status_req;
 1919 	}
 1920 	INIT_LIST_HEAD(&u3d->status_req->queue);
 1921 
 1922 	/* allocate a small amount of memory to get valid address */
 1923 	u3d->status_req->req.buf = (char *)u3d->status_req
 1924 					+ sizeof(struct mv_u3d_req);
 1925 	u3d->status_req->req.dma = virt_to_phys(u3d->status_req->req.buf);
 1926 
 1927 	u3d->resume_state = USB_STATE_NOTATTACHED;
 1928 	u3d->usb_state = USB_STATE_ATTACHED;
 1929 	u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
 1930 	u3d->remote_wakeup = 0;
 1931 
 1932 	r = platform_get_resource(dev, IORESOURCE_IRQ, 0);
 1933 	if (!r) {
 1934 		dev_err(&dev->dev, "no IRQ resource defined\n");
 1935 		retval = -ENODEV;
 1936 		goto err_get_irq;
 1937 	}
 1938 	u3d->irq = r->start;
 1939 	if (request_irq(u3d->irq, mv_u3d_irq,
 1940 		IRQF_SHARED, driver_name, u3d)) {
 1941 		u3d->irq = 0;
 1942 		dev_err(&dev->dev, "Request irq %d for u3d failed\n",
 1943 			u3d->irq);
 1944 		retval = -ENODEV;
 1945 		goto err_request_irq;
 1946 	}
 1947 
 1948 	/* initialize gadget structure */
 1949 	u3d->gadget.ops = &mv_u3d_ops;	/* usb_gadget_ops */
 1950 	u3d->gadget.ep0 = &u3d->eps[1].ep;	/* gadget ep0 */
 1951 	INIT_LIST_HEAD(&u3d->gadget.ep_list);	/* ep_list */
 1952 	u3d->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
 1953 
 1954 	/* the "gadget" abstracts/virtualizes the controller */
 1955 	u3d->gadget.name = driver_name;		/* gadget name */
 1956 
 1957 	mv_u3d_eps_init(u3d);
 1958 
 1959 	/* external vbus detection */
 1960 	if (u3d->vbus) {
 1961 		u3d->clock_gating = 1;
 1962 		dev_err(&dev->dev, "external vbus detection\n");
 1963 	}
 1964 
 1965 	if (!u3d->clock_gating)
 1966 		u3d->vbus_active = 1;
 1967 
 1968 	/* enable usb3 controller vbus detection */
 1969 	u3d->vbus_valid_detect = 1;
 1970 
 1971 	retval = usb_add_gadget_udc(&dev->dev, &u3d->gadget);
 1972 	if (retval)
 1973 		goto err_unregister;
 1974 
 1975 	dev_dbg(&dev->dev, "successful probe usb3 device %s clock gating.\n",
 1976 		u3d->clock_gating ? "with" : "without");
 1977 
 1978 	return 0;
 1979 
 1980 err_unregister:
 1981 	free_irq(u3d->irq, u3d);
 1982 err_request_irq:
 1983 err_get_irq:
 1984 	kfree(u3d->status_req);
 1985 err_alloc_status_req:
 1986 	kfree(u3d->eps);
 1987 err_alloc_eps:
 1988 	dma_pool_destroy(u3d->trb_pool);
 1989 err_alloc_trb_pool:
 1990 	dma_free_coherent(&dev->dev, u3d->ep_context_size,
 1991 		u3d->ep_context, u3d->ep_context_dma);
 1992 err_alloc_ep_context:
 1993 	if (pdata->phy_deinit)
 1994 		pdata->phy_deinit(u3d->phy_regs);
 1995 	clk_disable(u3d->clk);
 1996 err_u3d_enable:
 1997 	iounmap(u3d->cap_regs);
 1998 err_map_cap_regs:
 1999 err_get_cap_regs:
 2000 err_get_clk:
 2001 	clk_put(u3d->clk);
 2002 	kfree(u3d);
 2003 err_alloc_private:
 2004 err_pdata:
 2005 	return retval;
 2006 }
 2007 
 2008 #ifdef CONFIG_PM_SLEEP
 2009 static int mv_u3d_suspend(struct device *dev)
 2010 {
 2011 	struct mv_u3d *u3d = dev_get_drvdata(dev);
 2012 
 2013 	/*
 2014 	 * only cable is unplugged, usb can suspend.
 2015 	 * So do not care about clock_gating == 1, it is handled by
 2016 	 * vbus session.
 2017 	 */
 2018 	if (!u3d->clock_gating) {
 2019 		mv_u3d_controller_stop(u3d);
 2020 
 2021 		spin_lock_irq(&u3d->lock);
 2022 		/* stop all usb activities */
 2023 		mv_u3d_stop_activity(u3d, u3d->driver);
 2024 		spin_unlock_irq(&u3d->lock);
 2025 
 2026 		mv_u3d_disable(u3d);
 2027 	}
 2028 
 2029 	return 0;
 2030 }
 2031 
 2032 static int mv_u3d_resume(struct device *dev)
 2033 {
 2034 	struct mv_u3d *u3d = dev_get_drvdata(dev);
 2035 	int retval;
 2036 
 2037 	if (!u3d->clock_gating) {
 2038 		retval = mv_u3d_enable(u3d);
 2039 		if (retval)
 2040 			return retval;
 2041 
 2042 		if (u3d->driver && u3d->softconnect) {
 2043 			mv_u3d_controller_reset(u3d);
 2044 			mv_u3d_ep0_reset(u3d);
 2045 			mv_u3d_controller_start(u3d);
 2046 		}
 2047 	}
 2048 
 2049 	return 0;
 2050 }
 2051 #endif
 2052 
 2053 static SIMPLE_DEV_PM_OPS(mv_u3d_pm_ops, mv_u3d_suspend, mv_u3d_resume);
 2054 
 2055 static void mv_u3d_shutdown(struct platform_device *dev)
 2056 {
 2057 	struct mv_u3d *u3d = platform_get_drvdata(dev);
 2058 	u32 tmp;
 2059 
 2060 	tmp = ioread32(&u3d->op_regs->usbcmd);
 2061 	tmp &= ~MV_U3D_CMD_RUN_STOP;
 2062 	iowrite32(tmp, &u3d->op_regs->usbcmd);
 2063 }
 2064 
 2065 static struct platform_driver mv_u3d_driver = {
 2066 	.probe		= mv_u3d_probe,
 2067 	.remove		= mv_u3d_remove,
 2068 	.shutdown	= mv_u3d_shutdown,
 2069 	.driver		= {
 2070 		.name	= "mv-u3d",
 2071 		.pm	= &mv_u3d_pm_ops,
 2072 	},
 2073 };
 2074 
 2075 module_platform_driver(mv_u3d_driver);
 2076 MODULE_ALIAS("platform:mv-u3d");
 2077 MODULE_DESCRIPTION(DRIVER_DESC);
 2078 MODULE_AUTHOR("Yu Xu <yuxu@marvell.com>");
 2079 MODULE_LICENSE("GPL");
 2080 
 2081 
 2082 
 2083 
 2084 
 2085 /* LDV_COMMENT_BEGIN_MAIN */
 2086 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
 2087 
 2088 /*###########################################################################*/
 2089 
 2090 /*############## Driver Environment Generator 0.2 output ####################*/
 2091 
 2092 /*###########################################################################*/
 2093 
 2094 
 2095 
 2096 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
 2097 void ldv_check_final_state(void);
 2098 
 2099 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
 2100 void ldv_check_return_value(int res);
 2101 
 2102 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
 2103 void ldv_check_return_value_probe(int res);
 2104 
 2105 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
 2106 void ldv_initialize(void);
 2107 
 2108 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
 2109 void ldv_handler_precall(void);
 2110 
 2111 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
 2112 int nondet_int(void);
 2113 
 2114 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
 2115 int LDV_IN_INTERRUPT;
 2116 
 2117 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
 2118 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
 2119 
 2120 
 2121 
 2122 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
 2123 	/*============================= VARIABLE DECLARATION PART   =============================*/
 2124 	/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2125 	/* content: static int mv_u3d_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)*/
 2126 	/* LDV_COMMENT_BEGIN_PREP */
 2127 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2128 	/* LDV_COMMENT_END_PREP */
 2129 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_ep_enable" */
 2130 	struct usb_ep * var_group1;
 2131 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_ep_enable" */
 2132 	const struct usb_endpoint_descriptor * var_mv_u3d_ep_enable_8_p1;
 2133 	/* LDV_COMMENT_BEGIN_PREP */
 2134 	#ifdef CONFIG_PM_SLEEP
 2135 	#endif
 2136 	/* LDV_COMMENT_END_PREP */
 2137 	/* content: static int mv_u3d_ep_disable(struct usb_ep *_ep)*/
 2138 	/* LDV_COMMENT_BEGIN_PREP */
 2139 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2140 	/* LDV_COMMENT_END_PREP */
 2141 	/* LDV_COMMENT_BEGIN_PREP */
 2142 	#ifdef CONFIG_PM_SLEEP
 2143 	#endif
 2144 	/* LDV_COMMENT_END_PREP */
 2145 	/* content: static struct usb_request * mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)*/
 2146 	/* LDV_COMMENT_BEGIN_PREP */
 2147 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2148 	/* LDV_COMMENT_END_PREP */
 2149 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_alloc_request" */
 2150 	gfp_t  var_mv_u3d_alloc_request_10_p1;
 2151 	/* LDV_COMMENT_BEGIN_PREP */
 2152 	#ifdef CONFIG_PM_SLEEP
 2153 	#endif
 2154 	/* LDV_COMMENT_END_PREP */
 2155 	/* content: static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req)*/
 2156 	/* LDV_COMMENT_BEGIN_PREP */
 2157 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2158 	/* LDV_COMMENT_END_PREP */
 2159 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_free_request" */
 2160 	struct usb_request * var_group2;
 2161 	/* LDV_COMMENT_BEGIN_PREP */
 2162 	#ifdef CONFIG_PM_SLEEP
 2163 	#endif
 2164 	/* LDV_COMMENT_END_PREP */
 2165 	/* content: static int mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)*/
 2166 	/* LDV_COMMENT_BEGIN_PREP */
 2167 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2168 	/* LDV_COMMENT_END_PREP */
 2169 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_ep_queue" */
 2170 	gfp_t  var_mv_u3d_ep_queue_13_p2;
 2171 	/* LDV_COMMENT_BEGIN_PREP */
 2172 	#ifdef CONFIG_PM_SLEEP
 2173 	#endif
 2174 	/* LDV_COMMENT_END_PREP */
 2175 	/* content: static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)*/
 2176 	/* LDV_COMMENT_BEGIN_PREP */
 2177 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2178 	/* LDV_COMMENT_END_PREP */
 2179 	/* LDV_COMMENT_BEGIN_PREP */
 2180 	#ifdef CONFIG_PM_SLEEP
 2181 	#endif
 2182 	/* LDV_COMMENT_END_PREP */
 2183 	/* content: static int mv_u3d_ep_set_wedge(struct usb_ep *_ep)*/
 2184 	/* LDV_COMMENT_BEGIN_PREP */
 2185 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2186 	/* LDV_COMMENT_END_PREP */
 2187 	/* LDV_COMMENT_BEGIN_PREP */
 2188 	#ifdef CONFIG_PM_SLEEP
 2189 	#endif
 2190 	/* LDV_COMMENT_END_PREP */
 2191 	/* content: static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt)*/
 2192 	/* LDV_COMMENT_BEGIN_PREP */
 2193 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2194 	/* LDV_COMMENT_END_PREP */
 2195 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_ep_set_halt" */
 2196 	int  var_mv_u3d_ep_set_halt_17_p1;
 2197 	/* LDV_COMMENT_BEGIN_PREP */
 2198 	#ifdef CONFIG_PM_SLEEP
 2199 	#endif
 2200 	/* LDV_COMMENT_END_PREP */
 2201 	/* content: static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep)*/
 2202 	/* LDV_COMMENT_BEGIN_PREP */
 2203 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2204 	/* LDV_COMMENT_END_PREP */
 2205 	/* LDV_COMMENT_BEGIN_PREP */
 2206 	#ifdef CONFIG_PM_SLEEP
 2207 	#endif
 2208 	/* LDV_COMMENT_END_PREP */
 2209 
 2210 	/** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
 2211 	/* content: static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)*/
 2212 	/* LDV_COMMENT_BEGIN_PREP */
 2213 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2214 	/* LDV_COMMENT_END_PREP */
 2215 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_vbus_session" */
 2216 	struct usb_gadget * var_group3;
 2217 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_vbus_session" */
 2218 	int  var_mv_u3d_vbus_session_24_p1;
 2219 	/* LDV_COMMENT_BEGIN_PREP */
 2220 	#ifdef CONFIG_PM_SLEEP
 2221 	#endif
 2222 	/* LDV_COMMENT_END_PREP */
 2223 	/* content: static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)*/
 2224 	/* LDV_COMMENT_BEGIN_PREP */
 2225 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2226 	/* LDV_COMMENT_END_PREP */
 2227 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_vbus_draw" */
 2228 	unsigned  var_mv_u3d_vbus_draw_25_p1;
 2229 	/* LDV_COMMENT_BEGIN_PREP */
 2230 	#ifdef CONFIG_PM_SLEEP
 2231 	#endif
 2232 	/* LDV_COMMENT_END_PREP */
 2233 	/* content: static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)*/
 2234 	/* LDV_COMMENT_BEGIN_PREP */
 2235 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2236 	/* LDV_COMMENT_END_PREP */
 2237 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_pullup" */
 2238 	int  var_mv_u3d_pullup_26_p1;
 2239 	/* LDV_COMMENT_BEGIN_PREP */
 2240 	#ifdef CONFIG_PM_SLEEP
 2241 	#endif
 2242 	/* LDV_COMMENT_END_PREP */
 2243 	/* content: static int mv_u3d_start(struct usb_gadget *g, struct usb_gadget_driver *driver)*/
 2244 	/* LDV_COMMENT_BEGIN_PREP */
 2245 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2246 	/* LDV_COMMENT_END_PREP */
 2247 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_start" */
 2248 	struct usb_gadget_driver * var_group4;
 2249 	/* LDV_COMMENT_BEGIN_PREP */
 2250 	#ifdef CONFIG_PM_SLEEP
 2251 	#endif
 2252 	/* LDV_COMMENT_END_PREP */
 2253 	/* content: static int mv_u3d_stop(struct usb_gadget *g)*/
 2254 	/* LDV_COMMENT_BEGIN_PREP */
 2255 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2256 	/* LDV_COMMENT_END_PREP */
 2257 	/* LDV_COMMENT_BEGIN_PREP */
 2258 	#ifdef CONFIG_PM_SLEEP
 2259 	#endif
 2260 	/* LDV_COMMENT_END_PREP */
 2261 
 2262 	/** STRUCT: struct type: platform_driver, struct name: mv_u3d_driver **/
 2263 	/* content: static int mv_u3d_probe(struct platform_device *dev)*/
 2264 	/* LDV_COMMENT_BEGIN_PREP */
 2265 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2266 	/* LDV_COMMENT_END_PREP */
 2267 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_probe" */
 2268 	struct platform_device * var_group5;
 2269 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "mv_u3d_probe" */
 2270 	static int res_mv_u3d_probe_41;
 2271 	/* LDV_COMMENT_BEGIN_PREP */
 2272 	#ifdef CONFIG_PM_SLEEP
 2273 	#endif
 2274 	/* LDV_COMMENT_END_PREP */
 2275 	/* content: static int mv_u3d_remove(struct platform_device *dev)*/
 2276 	/* LDV_COMMENT_BEGIN_PREP */
 2277 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2278 	/* LDV_COMMENT_END_PREP */
 2279 	/* LDV_COMMENT_BEGIN_PREP */
 2280 	#ifdef CONFIG_PM_SLEEP
 2281 	#endif
 2282 	/* LDV_COMMENT_END_PREP */
 2283 	/* content: static void mv_u3d_shutdown(struct platform_device *dev)*/
 2284 	/* LDV_COMMENT_BEGIN_PREP */
 2285 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2286 	#ifdef CONFIG_PM_SLEEP
 2287 	#endif
 2288 	/* LDV_COMMENT_END_PREP */
 2289 
 2290 	/** CALLBACK SECTION request_irq **/
 2291 	/* content: static irqreturn_t mv_u3d_irq(int irq, void *dev)*/
 2292 	/* LDV_COMMENT_BEGIN_PREP */
 2293 	#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2294 	/* LDV_COMMENT_END_PREP */
 2295 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_irq" */
 2296 	int  var_mv_u3d_irq_39_p0;
 2297 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_irq" */
 2298 	void * var_mv_u3d_irq_39_p1;
 2299 	/* LDV_COMMENT_BEGIN_PREP */
 2300 	#ifdef CONFIG_PM_SLEEP
 2301 	#endif
 2302 	/* LDV_COMMENT_END_PREP */
 2303 
 2304 
 2305 
 2306 
 2307 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 2308 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 2309 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 2310 	LDV_IN_INTERRUPT=1;
 2311 
 2312 
 2313 
 2314 
 2315 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 2316 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 2317 	/*============================= FUNCTION CALL SECTION       =============================*/
 2318 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 2319 	ldv_initialize();
 2320 	
 2321 
 2322 	
 2323 
 2324 	int ldv_s_mv_u3d_driver_platform_driver = 0;
 2325 
 2326 	
 2327 
 2328 
 2329 	while(  nondet_int()
 2330 		|| !(ldv_s_mv_u3d_driver_platform_driver == 0)
 2331 	) {
 2332 
 2333 		switch(nondet_int()) {
 2334 
 2335 			case 0: {
 2336 
 2337 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2338 				
 2339 
 2340 				/* content: static int mv_u3d_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)*/
 2341 				/* LDV_COMMENT_BEGIN_PREP */
 2342 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2343 				/* LDV_COMMENT_END_PREP */
 2344 				/* LDV_COMMENT_FUNCTION_CALL Function from field "enable" from driver structure with callbacks "mv_u3d_ep_ops" */
 2345 				ldv_handler_precall();
 2346 				mv_u3d_ep_enable( var_group1, var_mv_u3d_ep_enable_8_p1);
 2347 				/* LDV_COMMENT_BEGIN_PREP */
 2348 				#ifdef CONFIG_PM_SLEEP
 2349 				#endif
 2350 				/* LDV_COMMENT_END_PREP */
 2351 				
 2352 
 2353 				
 2354 
 2355 			}
 2356 
 2357 			break;
 2358 			case 1: {
 2359 
 2360 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2361 				
 2362 
 2363 				/* content: static int mv_u3d_ep_disable(struct usb_ep *_ep)*/
 2364 				/* LDV_COMMENT_BEGIN_PREP */
 2365 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2366 				/* LDV_COMMENT_END_PREP */
 2367 				/* LDV_COMMENT_FUNCTION_CALL Function from field "disable" from driver structure with callbacks "mv_u3d_ep_ops" */
 2368 				ldv_handler_precall();
 2369 				mv_u3d_ep_disable( var_group1);
 2370 				/* LDV_COMMENT_BEGIN_PREP */
 2371 				#ifdef CONFIG_PM_SLEEP
 2372 				#endif
 2373 				/* LDV_COMMENT_END_PREP */
 2374 				
 2375 
 2376 				
 2377 
 2378 			}
 2379 
 2380 			break;
 2381 			case 2: {
 2382 
 2383 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2384 				
 2385 
 2386 				/* content: static struct usb_request * mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)*/
 2387 				/* LDV_COMMENT_BEGIN_PREP */
 2388 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2389 				/* LDV_COMMENT_END_PREP */
 2390 				/* LDV_COMMENT_FUNCTION_CALL Function from field "alloc_request" from driver structure with callbacks "mv_u3d_ep_ops" */
 2391 				ldv_handler_precall();
 2392 				mv_u3d_alloc_request( var_group1, var_mv_u3d_alloc_request_10_p1);
 2393 				/* LDV_COMMENT_BEGIN_PREP */
 2394 				#ifdef CONFIG_PM_SLEEP
 2395 				#endif
 2396 				/* LDV_COMMENT_END_PREP */
 2397 				
 2398 
 2399 				
 2400 
 2401 			}
 2402 
 2403 			break;
 2404 			case 3: {
 2405 
 2406 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2407 				
 2408 
 2409 				/* content: static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req)*/
 2410 				/* LDV_COMMENT_BEGIN_PREP */
 2411 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2412 				/* LDV_COMMENT_END_PREP */
 2413 				/* LDV_COMMENT_FUNCTION_CALL Function from field "free_request" from driver structure with callbacks "mv_u3d_ep_ops" */
 2414 				ldv_handler_precall();
 2415 				mv_u3d_free_request( var_group1, var_group2);
 2416 				/* LDV_COMMENT_BEGIN_PREP */
 2417 				#ifdef CONFIG_PM_SLEEP
 2418 				#endif
 2419 				/* LDV_COMMENT_END_PREP */
 2420 				
 2421 
 2422 				
 2423 
 2424 			}
 2425 
 2426 			break;
 2427 			case 4: {
 2428 
 2429 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2430 				
 2431 
 2432 				/* content: static int mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)*/
 2433 				/* LDV_COMMENT_BEGIN_PREP */
 2434 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2435 				/* LDV_COMMENT_END_PREP */
 2436 				/* LDV_COMMENT_FUNCTION_CALL Function from field "queue" from driver structure with callbacks "mv_u3d_ep_ops" */
 2437 				ldv_handler_precall();
 2438 				mv_u3d_ep_queue( var_group1, var_group2, var_mv_u3d_ep_queue_13_p2);
 2439 				/* LDV_COMMENT_BEGIN_PREP */
 2440 				#ifdef CONFIG_PM_SLEEP
 2441 				#endif
 2442 				/* LDV_COMMENT_END_PREP */
 2443 				
 2444 
 2445 				
 2446 
 2447 			}
 2448 
 2449 			break;
 2450 			case 5: {
 2451 
 2452 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2453 				
 2454 
 2455 				/* content: static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)*/
 2456 				/* LDV_COMMENT_BEGIN_PREP */
 2457 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2458 				/* LDV_COMMENT_END_PREP */
 2459 				/* LDV_COMMENT_FUNCTION_CALL Function from field "dequeue" from driver structure with callbacks "mv_u3d_ep_ops" */
 2460 				ldv_handler_precall();
 2461 				mv_u3d_ep_dequeue( var_group1, var_group2);
 2462 				/* LDV_COMMENT_BEGIN_PREP */
 2463 				#ifdef CONFIG_PM_SLEEP
 2464 				#endif
 2465 				/* LDV_COMMENT_END_PREP */
 2466 				
 2467 
 2468 				
 2469 
 2470 			}
 2471 
 2472 			break;
 2473 			case 6: {
 2474 
 2475 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2476 				
 2477 
 2478 				/* content: static int mv_u3d_ep_set_wedge(struct usb_ep *_ep)*/
 2479 				/* LDV_COMMENT_BEGIN_PREP */
 2480 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2481 				/* LDV_COMMENT_END_PREP */
 2482 				/* LDV_COMMENT_FUNCTION_CALL Function from field "set_wedge" from driver structure with callbacks "mv_u3d_ep_ops" */
 2483 				ldv_handler_precall();
 2484 				mv_u3d_ep_set_wedge( var_group1);
 2485 				/* LDV_COMMENT_BEGIN_PREP */
 2486 				#ifdef CONFIG_PM_SLEEP
 2487 				#endif
 2488 				/* LDV_COMMENT_END_PREP */
 2489 				
 2490 
 2491 				
 2492 
 2493 			}
 2494 
 2495 			break;
 2496 			case 7: {
 2497 
 2498 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2499 				
 2500 
 2501 				/* content: static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt)*/
 2502 				/* LDV_COMMENT_BEGIN_PREP */
 2503 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2504 				/* LDV_COMMENT_END_PREP */
 2505 				/* LDV_COMMENT_FUNCTION_CALL Function from field "set_halt" from driver structure with callbacks "mv_u3d_ep_ops" */
 2506 				ldv_handler_precall();
 2507 				mv_u3d_ep_set_halt( var_group1, var_mv_u3d_ep_set_halt_17_p1);
 2508 				/* LDV_COMMENT_BEGIN_PREP */
 2509 				#ifdef CONFIG_PM_SLEEP
 2510 				#endif
 2511 				/* LDV_COMMENT_END_PREP */
 2512 				
 2513 
 2514 				
 2515 
 2516 			}
 2517 
 2518 			break;
 2519 			case 8: {
 2520 
 2521 				/** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
 2522 				
 2523 
 2524 				/* content: static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep)*/
 2525 				/* LDV_COMMENT_BEGIN_PREP */
 2526 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2527 				/* LDV_COMMENT_END_PREP */
 2528 				/* LDV_COMMENT_FUNCTION_CALL Function from field "fifo_flush" from driver structure with callbacks "mv_u3d_ep_ops" */
 2529 				ldv_handler_precall();
 2530 				mv_u3d_ep_fifo_flush( var_group1);
 2531 				/* LDV_COMMENT_BEGIN_PREP */
 2532 				#ifdef CONFIG_PM_SLEEP
 2533 				#endif
 2534 				/* LDV_COMMENT_END_PREP */
 2535 				
 2536 
 2537 				
 2538 
 2539 			}
 2540 
 2541 			break;
 2542 			case 9: {
 2543 
 2544 				/** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
 2545 				
 2546 
 2547 				/* content: static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)*/
 2548 				/* LDV_COMMENT_BEGIN_PREP */
 2549 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2550 				/* LDV_COMMENT_END_PREP */
 2551 				/* LDV_COMMENT_FUNCTION_CALL Function from field "vbus_session" from driver structure with callbacks "mv_u3d_ops" */
 2552 				ldv_handler_precall();
 2553 				mv_u3d_vbus_session( var_group3, var_mv_u3d_vbus_session_24_p1);
 2554 				/* LDV_COMMENT_BEGIN_PREP */
 2555 				#ifdef CONFIG_PM_SLEEP
 2556 				#endif
 2557 				/* LDV_COMMENT_END_PREP */
 2558 				
 2559 
 2560 				
 2561 
 2562 			}
 2563 
 2564 			break;
 2565 			case 10: {
 2566 
 2567 				/** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
 2568 				
 2569 
 2570 				/* content: static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)*/
 2571 				/* LDV_COMMENT_BEGIN_PREP */
 2572 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2573 				/* LDV_COMMENT_END_PREP */
 2574 				/* LDV_COMMENT_FUNCTION_CALL Function from field "vbus_draw" from driver structure with callbacks "mv_u3d_ops" */
 2575 				ldv_handler_precall();
 2576 				mv_u3d_vbus_draw( var_group3, var_mv_u3d_vbus_draw_25_p1);
 2577 				/* LDV_COMMENT_BEGIN_PREP */
 2578 				#ifdef CONFIG_PM_SLEEP
 2579 				#endif
 2580 				/* LDV_COMMENT_END_PREP */
 2581 				
 2582 
 2583 				
 2584 
 2585 			}
 2586 
 2587 			break;
 2588 			case 11: {
 2589 
 2590 				/** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
 2591 				
 2592 
 2593 				/* content: static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)*/
 2594 				/* LDV_COMMENT_BEGIN_PREP */
 2595 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2596 				/* LDV_COMMENT_END_PREP */
 2597 				/* LDV_COMMENT_FUNCTION_CALL Function from field "pullup" from driver structure with callbacks "mv_u3d_ops" */
 2598 				ldv_handler_precall();
 2599 				mv_u3d_pullup( var_group3, var_mv_u3d_pullup_26_p1);
 2600 				/* LDV_COMMENT_BEGIN_PREP */
 2601 				#ifdef CONFIG_PM_SLEEP
 2602 				#endif
 2603 				/* LDV_COMMENT_END_PREP */
 2604 				
 2605 
 2606 				
 2607 
 2608 			}
 2609 
 2610 			break;
 2611 			case 12: {
 2612 
 2613 				/** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
 2614 				
 2615 
 2616 				/* content: static int mv_u3d_start(struct usb_gadget *g, struct usb_gadget_driver *driver)*/
 2617 				/* LDV_COMMENT_BEGIN_PREP */
 2618 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2619 				/* LDV_COMMENT_END_PREP */
 2620 				/* LDV_COMMENT_FUNCTION_CALL Function from field "udc_start" from driver structure with callbacks "mv_u3d_ops" */
 2621 				ldv_handler_precall();
 2622 				mv_u3d_start( var_group3, var_group4);
 2623 				/* LDV_COMMENT_BEGIN_PREP */
 2624 				#ifdef CONFIG_PM_SLEEP
 2625 				#endif
 2626 				/* LDV_COMMENT_END_PREP */
 2627 				
 2628 
 2629 				
 2630 
 2631 			}
 2632 
 2633 			break;
 2634 			case 13: {
 2635 
 2636 				/** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
 2637 				
 2638 
 2639 				/* content: static int mv_u3d_stop(struct usb_gadget *g)*/
 2640 				/* LDV_COMMENT_BEGIN_PREP */
 2641 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2642 				/* LDV_COMMENT_END_PREP */
 2643 				/* LDV_COMMENT_FUNCTION_CALL Function from field "udc_stop" from driver structure with callbacks "mv_u3d_ops" */
 2644 				ldv_handler_precall();
 2645 				mv_u3d_stop( var_group3);
 2646 				/* LDV_COMMENT_BEGIN_PREP */
 2647 				#ifdef CONFIG_PM_SLEEP
 2648 				#endif
 2649 				/* LDV_COMMENT_END_PREP */
 2650 				
 2651 
 2652 				
 2653 
 2654 			}
 2655 
 2656 			break;
 2657 			case 14: {
 2658 
 2659 				/** STRUCT: struct type: platform_driver, struct name: mv_u3d_driver **/
 2660 				if(ldv_s_mv_u3d_driver_platform_driver==0) {
 2661 
 2662 				/* content: static int mv_u3d_probe(struct platform_device *dev)*/
 2663 				/* LDV_COMMENT_BEGIN_PREP */
 2664 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2665 				/* LDV_COMMENT_END_PREP */
 2666 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "mv_u3d_driver". Standart function test for correct return result. */
 2667 				res_mv_u3d_probe_41 = mv_u3d_probe( var_group5);
 2668 				 ldv_check_return_value(res_mv_u3d_probe_41);
 2669 				 ldv_check_return_value_probe(res_mv_u3d_probe_41);
 2670 				 if(res_mv_u3d_probe_41) 
 2671 					goto ldv_module_exit;
 2672 				/* LDV_COMMENT_BEGIN_PREP */
 2673 				#ifdef CONFIG_PM_SLEEP
 2674 				#endif
 2675 				/* LDV_COMMENT_END_PREP */
 2676 				ldv_s_mv_u3d_driver_platform_driver++;
 2677 
 2678 				}
 2679 
 2680 			}
 2681 
 2682 			break;
 2683 			case 15: {
 2684 
 2685 				/** STRUCT: struct type: platform_driver, struct name: mv_u3d_driver **/
 2686 				if(ldv_s_mv_u3d_driver_platform_driver==1) {
 2687 
 2688 				/* content: static int mv_u3d_remove(struct platform_device *dev)*/
 2689 				/* LDV_COMMENT_BEGIN_PREP */
 2690 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2691 				/* LDV_COMMENT_END_PREP */
 2692 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "mv_u3d_driver" */
 2693 				ldv_handler_precall();
 2694 				mv_u3d_remove( var_group5);
 2695 				/* LDV_COMMENT_BEGIN_PREP */
 2696 				#ifdef CONFIG_PM_SLEEP
 2697 				#endif
 2698 				/* LDV_COMMENT_END_PREP */
 2699 				ldv_s_mv_u3d_driver_platform_driver++;
 2700 
 2701 				}
 2702 
 2703 			}
 2704 
 2705 			break;
 2706 			case 16: {
 2707 
 2708 				/** STRUCT: struct type: platform_driver, struct name: mv_u3d_driver **/
 2709 				if(ldv_s_mv_u3d_driver_platform_driver==2) {
 2710 
 2711 				/* content: static void mv_u3d_shutdown(struct platform_device *dev)*/
 2712 				/* LDV_COMMENT_BEGIN_PREP */
 2713 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2714 				#ifdef CONFIG_PM_SLEEP
 2715 				#endif
 2716 				/* LDV_COMMENT_END_PREP */
 2717 				/* LDV_COMMENT_FUNCTION_CALL Function from field "shutdown" from driver structure with callbacks "mv_u3d_driver" */
 2718 				ldv_handler_precall();
 2719 				mv_u3d_shutdown( var_group5);
 2720 				ldv_s_mv_u3d_driver_platform_driver=0;
 2721 
 2722 				}
 2723 
 2724 			}
 2725 
 2726 			break;
 2727 			case 17: {
 2728 
 2729 				/** CALLBACK SECTION request_irq **/
 2730 				LDV_IN_INTERRUPT=2;
 2731 
 2732 				/* content: static irqreturn_t mv_u3d_irq(int irq, void *dev)*/
 2733 				/* LDV_COMMENT_BEGIN_PREP */
 2734 				#define DRIVER_DESC		"Marvell PXA USB3.0 Device Controller driver"
 2735 				/* LDV_COMMENT_END_PREP */
 2736 				/* LDV_COMMENT_FUNCTION_CALL */
 2737 				ldv_handler_precall();
 2738 				mv_u3d_irq( var_mv_u3d_irq_39_p0, var_mv_u3d_irq_39_p1);
 2739 				/* LDV_COMMENT_BEGIN_PREP */
 2740 				#ifdef CONFIG_PM_SLEEP
 2741 				#endif
 2742 				/* LDV_COMMENT_END_PREP */
 2743 				LDV_IN_INTERRUPT=1;
 2744 
 2745 				
 2746 
 2747 			}
 2748 
 2749 			break;
 2750 			default: break;
 2751 
 2752 		}
 2753 
 2754 	}
 2755 
 2756 	ldv_module_exit: 
 2757 
 2758 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 2759 	ldv_final: ldv_check_final_state();
 2760 
 2761 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 2762 	return;
 2763 
 2764 }
 2765 #endif
 2766 
 2767 /* LDV_COMMENT_END_MAIN */
 2768 
 2769 #line 15 "/home/vitaly/ldv-launches/work/current--X--drivers--X--defaultlinux-4.11-rc1.tar.xz--X--320_7a--X--cpachecker/linux-4.11-rc1.tar.xz/csd_deg_dscv/8743/dscv_tempdir/dscv/ri/320_7a/drivers/usb/gadget/udc/mv_u3d_core.o.c.prepared"                 1 
    2 #include <verifier/rcv.h>
    3 #include <kernel-model/ERR.inc>
    4 
    5 struct clk;
    6 
    7 
    8 /* LDV_COMMENT_CHANGE_STATE Initialize counter to zero. */
    9 int ldv_counter_clk = 0;
   10 
   11 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_clk_disable_clk') Release. */
   12 void ldv_clk_disable_clk(struct clk *clk)
   13 {
   14     /* LDV_COMMENT_CHANGE_STATE Increase counter. */
   15     ldv_counter_clk = 0;
   16 }
   17 
   18 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_clk_enable_clk') Reset counter. */
   19 int ldv_clk_enable_clk(void)
   20 {
   21  int retval = ldv_undef_int();
   22  if (!retval)
   23  {
   24   /* LDV_COMMENT_CHANGE_STATE Increase counter. */
   25   ldv_counter_clk = 1;
   26  }
   27  return retval;
   28 }
   29 
   30 
   31 /* LDV_COMMENT_CHANGE_STATE Initialize counter to zero. */
   32 int ldv_counter_clk_of_mv_u3d = 0;
   33 
   34 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_clk_disable_clk_of_mv_u3d') Release. */
   35 void ldv_clk_disable_clk_of_mv_u3d(struct clk *clk)
   36 {
   37     /* LDV_COMMENT_CHANGE_STATE Increase counter. */
   38     ldv_counter_clk_of_mv_u3d = 0;
   39 }
   40 
   41 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_clk_enable_clk_of_mv_u3d') Reset counter. */
   42 int ldv_clk_enable_clk_of_mv_u3d(void)
   43 {
   44  int retval = ldv_undef_int();
   45  if (!retval)
   46  {
   47   /* LDV_COMMENT_CHANGE_STATE Increase counter. */
   48   ldv_counter_clk_of_mv_u3d = 1;
   49  }
   50  return retval;
   51 }
   52 
   53 
   54 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all clk are freed at the end */
   55 void ldv_check_final_state(void)
   56 {
   57   /* LDV_COMMENT_ASSERT Spin 'clk' must be unlocked at the end */
   58   ldv_assert(ldv_counter_clk == 0);
   59   /* LDV_COMMENT_ASSERT Spin 'clk_of_mv_u3d' must be unlocked at the end */
   60   ldv_assert(ldv_counter_clk_of_mv_u3d == 0);
   61 }                 1 #ifndef _LDV_ERR_
    2 #define _LDV_ERR_
    3 
    4 #include <linux/kernel.h>
    5 
    6 /* LDV_COMMENT_MODEL_FUNCTION_DEFENITION(name='ldv_is_err') This function return result of checking if pointer is impossible. */
    7 bool ldv_is_err(const void *ptr)
    8 {
    9 /*LDV_COMMENT_RETURN Return value of function ldv_is_err_val().*/
   10 	return ((unsigned long)ptr > LDV_PTR_MAX);
   11 }
   12 
   13 /* LDV_COMMENT_MODEL_FUNCTION_DEFENITION(name='ldv_err_ptr') This function return pointer. */
   14 void* ldv_err_ptr(long error)
   15 {
   16 /*LDV_COMMENT_RETURN Return error pointer.*/
   17 	return (void *)(LDV_PTR_MAX - error);
   18 }
   19 
   20 /* LDV_COMMENT_MODEL_FUNCTION_DEFENITION(name='ldv_ptr_err') This function return error if pointer is impossible. */
   21 long ldv_ptr_err(const void *ptr)
   22 {
   23 /*LDV_COMMENT_RETURN Return error code.*/
   24 	return (long)(LDV_PTR_MAX - (unsigned long)ptr);
   25 }
   26 
   27 /* LDV_COMMENT_MODEL_FUNCTION_DEFENITION(name='ldv_is_err_or_null') This function check if pointer is impossible or null. */
   28 bool ldv_is_err_or_null(const void *ptr)
   29 {
   30 /*LDV_COMMENT_RETURN Return 0 if pointer is possible and not zero, and 1 in other cases*/
   31 	return !ptr || ldv_is_err((unsigned long)ptr);
   32 }
   33 
   34 #endif /* _LDV_ERR_ */                 1 #ifndef _LDV_RCV_H_
    2 #define _LDV_RCV_H_
    3 
    4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
    5    label like the standard assert(). */
    6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
    7 
    8 /* The error label wrapper. It is used because of some static verifiers (like
    9    BLAST) don't accept multiple error labels through a program. */
   10 static inline void ldv_error(void)
   11 {
   12   LDV_ERROR: goto LDV_ERROR;
   13 }
   14 
   15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
   16    avoided by verifiers. */
   17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
   18 
   19 /* Infinite loop, that causes verifiers to skip such paths. */
   20 static inline void ldv_stop(void) {
   21   LDV_STOP: goto LDV_STOP;
   22 }
   23 
   24 /* Special nondeterministic functions. */
   25 int ldv_undef_int(void);
   26 void *ldv_undef_ptr(void);
   27 unsigned long ldv_undef_ulong(void);
   28 long ldv_undef_long(void);
   29 /* Return nondeterministic negative integer number. */
   30 static inline int ldv_undef_int_negative(void)
   31 {
   32   int ret = ldv_undef_int();
   33 
   34   ldv_assume(ret < 0);
   35 
   36   return ret;
   37 }
   38 /* Return nondeterministic nonpositive integer number. */
   39 static inline int ldv_undef_int_nonpositive(void)
   40 {
   41   int ret = ldv_undef_int();
   42 
   43   ldv_assume(ret <= 0);
   44 
   45   return ret;
   46 }
   47 
   48 /* Add explicit model for __builin_expect GCC function. Without the model a
   49    return value will be treated as nondetermined by verifiers. */
   50 static inline long __builtin_expect(long exp, long c)
   51 {
   52   return exp;
   53 }
   54 
   55 /* This function causes the program to exit abnormally. GCC implements this
   56 function by using a target-dependent mechanism (such as intentionally executing
   57 an illegal instruction) or by calling abort. The mechanism used may vary from
   58 release to release so you should not rely on any particular implementation.
   59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
   60 static inline void __builtin_trap(void)
   61 {
   62   ldv_assert(0);
   63 }
   64 
   65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
   66 #define LDV_PTR_MAX 2012
   67 
   68 #endif /* _LDV_RCV_H_ */                 1 /*
    2  * device.h - generic, centralized driver model
    3  *
    4  * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
    5  * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
    6  * Copyright (c) 2008-2009 Novell Inc.
    7  *
    8  * This file is released under the GPLv2
    9  *
   10  * See Documentation/driver-model/ for more information.
   11  */
   12 
   13 #ifndef _DEVICE_H_
   14 #define _DEVICE_H_
   15 
   16 #include <linux/ioport.h>
   17 #include <linux/kobject.h>
   18 #include <linux/klist.h>
   19 #include <linux/list.h>
   20 #include <linux/lockdep.h>
   21 #include <linux/compiler.h>
   22 #include <linux/types.h>
   23 #include <linux/mutex.h>
   24 #include <linux/pinctrl/devinfo.h>
   25 #include <linux/pm.h>
   26 #include <linux/atomic.h>
   27 #include <linux/ratelimit.h>
   28 #include <linux/uidgid.h>
   29 #include <linux/gfp.h>
   30 #include <asm/device.h>
   31 
   32 struct device;
   33 struct device_private;
   34 struct device_driver;
   35 struct driver_private;
   36 struct module;
   37 struct class;
   38 struct subsys_private;
   39 struct bus_type;
   40 struct device_node;
   41 struct fwnode_handle;
   42 struct iommu_ops;
   43 struct iommu_group;
   44 struct iommu_fwspec;
   45 
   46 struct bus_attribute {
   47 	struct attribute	attr;
   48 	ssize_t (*show)(struct bus_type *bus, char *buf);
   49 	ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
   50 };
   51 
   52 #define BUS_ATTR(_name, _mode, _show, _store)	\
   53 	struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
   54 #define BUS_ATTR_RW(_name) \
   55 	struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
   56 #define BUS_ATTR_RO(_name) \
   57 	struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
   58 
   59 extern int __must_check bus_create_file(struct bus_type *,
   60 					struct bus_attribute *);
   61 extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
   62 
   63 /**
   64  * struct bus_type - The bus type of the device
   65  *
   66  * @name:	The name of the bus.
   67  * @dev_name:	Used for subsystems to enumerate devices like ("foo%u", dev->id).
   68  * @dev_root:	Default device to use as the parent.
   69  * @dev_attrs:	Default attributes of the devices on the bus.
   70  * @bus_groups:	Default attributes of the bus.
   71  * @dev_groups:	Default attributes of the devices on the bus.
   72  * @drv_groups: Default attributes of the device drivers on the bus.
   73  * @match:	Called, perhaps multiple times, whenever a new device or driver
   74  *		is added for this bus. It should return a positive value if the
   75  *		given device can be handled by the given driver and zero
   76  *		otherwise. It may also return error code if determining that
   77  *		the driver supports the device is not possible. In case of
   78  *		-EPROBE_DEFER it will queue the device for deferred probing.
   79  * @uevent:	Called when a device is added, removed, or a few other things
   80  *		that generate uevents to add the environment variables.
   81  * @probe:	Called when a new device or driver add to this bus, and callback
   82  *		the specific driver's probe to initial the matched device.
   83  * @remove:	Called when a device removed from this bus.
   84  * @shutdown:	Called at shut-down time to quiesce the device.
   85  *
   86  * @online:	Called to put the device back online (after offlining it).
   87  * @offline:	Called to put the device offline for hot-removal. May fail.
   88  *
   89  * @suspend:	Called when a device on this bus wants to go to sleep mode.
   90  * @resume:	Called to bring a device on this bus out of sleep mode.
   91  * @num_vf:	Called to find out how many virtual functions a device on this
   92  *		bus supports.
   93  * @pm:		Power management operations of this bus, callback the specific
   94  *		device driver's pm-ops.
   95  * @iommu_ops:  IOMMU specific operations for this bus, used to attach IOMMU
   96  *              driver implementations to a bus and allow the driver to do
   97  *              bus-specific setup
   98  * @p:		The private data of the driver core, only the driver core can
   99  *		touch this.
  100  * @lock_key:	Lock class key for use by the lock validator
  101  *
  102  * A bus is a channel between the processor and one or more devices. For the
  103  * purposes of the device model, all devices are connected via a bus, even if
  104  * it is an internal, virtual, "platform" bus. Buses can plug into each other.
  105  * A USB controller is usually a PCI device, for example. The device model
  106  * represents the actual connections between buses and the devices they control.
  107  * A bus is represented by the bus_type structure. It contains the name, the
  108  * default attributes, the bus' methods, PM operations, and the driver core's
  109  * private data.
  110  */
  111 struct bus_type {
  112 	const char		*name;
  113 	const char		*dev_name;
  114 	struct device		*dev_root;
  115 	struct device_attribute	*dev_attrs;	/* use dev_groups instead */
  116 	const struct attribute_group **bus_groups;
  117 	const struct attribute_group **dev_groups;
  118 	const struct attribute_group **drv_groups;
  119 
  120 	int (*match)(struct device *dev, struct device_driver *drv);
  121 	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
  122 	int (*probe)(struct device *dev);
  123 	int (*remove)(struct device *dev);
  124 	void (*shutdown)(struct device *dev);
  125 
  126 	int (*online)(struct device *dev);
  127 	int (*offline)(struct device *dev);
  128 
  129 	int (*suspend)(struct device *dev, pm_message_t state);
  130 	int (*resume)(struct device *dev);
  131 
  132 	int (*num_vf)(struct device *dev);
  133 
  134 	const struct dev_pm_ops *pm;
  135 
  136 	const struct iommu_ops *iommu_ops;
  137 
  138 	struct subsys_private *p;
  139 	struct lock_class_key lock_key;
  140 };
  141 
  142 extern int __must_check bus_register(struct bus_type *bus);
  143 
  144 extern void bus_unregister(struct bus_type *bus);
  145 
  146 extern int __must_check bus_rescan_devices(struct bus_type *bus);
  147 
  148 /* iterator helpers for buses */
  149 struct subsys_dev_iter {
  150 	struct klist_iter		ki;
  151 	const struct device_type	*type;
  152 };
  153 void subsys_dev_iter_init(struct subsys_dev_iter *iter,
  154 			 struct bus_type *subsys,
  155 			 struct device *start,
  156 			 const struct device_type *type);
  157 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
  158 void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
  159 
  160 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
  161 		     int (*fn)(struct device *dev, void *data));
  162 struct device *bus_find_device(struct bus_type *bus, struct device *start,
  163 			       void *data,
  164 			       int (*match)(struct device *dev, void *data));
  165 struct device *bus_find_device_by_name(struct bus_type *bus,
  166 				       struct device *start,
  167 				       const char *name);
  168 struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
  169 					struct device *hint);
  170 int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
  171 		     void *data, int (*fn)(struct device_driver *, void *));
  172 void bus_sort_breadthfirst(struct bus_type *bus,
  173 			   int (*compare)(const struct device *a,
  174 					  const struct device *b));
  175 /*
  176  * Bus notifiers: Get notified of addition/removal of devices
  177  * and binding/unbinding of drivers to devices.
  178  * In the long run, it should be a replacement for the platform
  179  * notify hooks.
  180  */
  181 struct notifier_block;
  182 
  183 extern int bus_register_notifier(struct bus_type *bus,
  184 				 struct notifier_block *nb);
  185 extern int bus_unregister_notifier(struct bus_type *bus,
  186 				   struct notifier_block *nb);
  187 
  188 /* All 4 notifers below get called with the target struct device *
  189  * as an argument. Note that those functions are likely to be called
  190  * with the device lock held in the core, so be careful.
  191  */
  192 #define BUS_NOTIFY_ADD_DEVICE		0x00000001 /* device added */
  193 #define BUS_NOTIFY_DEL_DEVICE		0x00000002 /* device to be removed */
  194 #define BUS_NOTIFY_REMOVED_DEVICE	0x00000003 /* device removed */
  195 #define BUS_NOTIFY_BIND_DRIVER		0x00000004 /* driver about to be
  196 						      bound */
  197 #define BUS_NOTIFY_BOUND_DRIVER		0x00000005 /* driver bound to device */
  198 #define BUS_NOTIFY_UNBIND_DRIVER	0x00000006 /* driver about to be
  199 						      unbound */
  200 #define BUS_NOTIFY_UNBOUND_DRIVER	0x00000007 /* driver is unbound
  201 						      from the device */
  202 #define BUS_NOTIFY_DRIVER_NOT_BOUND	0x00000008 /* driver fails to be bound */
  203 
  204 extern struct kset *bus_get_kset(struct bus_type *bus);
  205 extern struct klist *bus_get_device_klist(struct bus_type *bus);
  206 
  207 /**
  208  * enum probe_type - device driver probe type to try
  209  *	Device drivers may opt in for special handling of their
  210  *	respective probe routines. This tells the core what to
  211  *	expect and prefer.
  212  *
  213  * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
  214  *	whether probed synchronously or asynchronously.
  215  * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
  216  *	probing order is not essential for booting the system may
  217  *	opt into executing their probes asynchronously.
  218  * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
  219  *	their probe routines to run synchronously with driver and
  220  *	device registration (with the exception of -EPROBE_DEFER
  221  *	handling - re-probing always ends up being done asynchronously).
  222  *
  223  * Note that the end goal is to switch the kernel to use asynchronous
  224  * probing by default, so annotating drivers with
  225  * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
  226  * to speed up boot process while we are validating the rest of the
  227  * drivers.
  228  */
  229 enum probe_type {
  230 	PROBE_DEFAULT_STRATEGY,
  231 	PROBE_PREFER_ASYNCHRONOUS,
  232 	PROBE_FORCE_SYNCHRONOUS,
  233 };
  234 
  235 /**
  236  * struct device_driver - The basic device driver structure
  237  * @name:	Name of the device driver.
  238  * @bus:	The bus which the device of this driver belongs to.
  239  * @owner:	The module owner.
  240  * @mod_name:	Used for built-in modules.
  241  * @suppress_bind_attrs: Disables bind/unbind via sysfs.
  242  * @probe_type:	Type of the probe (synchronous or asynchronous) to use.
  243  * @of_match_table: The open firmware table.
  244  * @acpi_match_table: The ACPI match table.
  245  * @probe:	Called to query the existence of a specific device,
  246  *		whether this driver can work with it, and bind the driver
  247  *		to a specific device.
  248  * @remove:	Called when the device is removed from the system to
  249  *		unbind a device from this driver.
  250  * @shutdown:	Called at shut-down time to quiesce the device.
  251  * @suspend:	Called to put the device to sleep mode. Usually to a
  252  *		low power state.
  253  * @resume:	Called to bring a device from sleep mode.
  254  * @groups:	Default attributes that get created by the driver core
  255  *		automatically.
  256  * @pm:		Power management operations of the device which matched
  257  *		this driver.
  258  * @p:		Driver core's private data, no one other than the driver
  259  *		core can touch this.
  260  *
  261  * The device driver-model tracks all of the drivers known to the system.
  262  * The main reason for this tracking is to enable the driver core to match
  263  * up drivers with new devices. Once drivers are known objects within the
  264  * system, however, a number of other things become possible. Device drivers
  265  * can export information and configuration variables that are independent
  266  * of any specific device.
  267  */
  268 struct device_driver {
  269 	const char		*name;
  270 	struct bus_type		*bus;
  271 
  272 	struct module		*owner;
  273 	const char		*mod_name;	/* used for built-in modules */
  274 
  275 	bool suppress_bind_attrs;	/* disables bind/unbind via sysfs */
  276 	enum probe_type probe_type;
  277 
  278 	const struct of_device_id	*of_match_table;
  279 	const struct acpi_device_id	*acpi_match_table;
  280 
  281 	int (*probe) (struct device *dev);
  282 	int (*remove) (struct device *dev);
  283 	void (*shutdown) (struct device *dev);
  284 	int (*suspend) (struct device *dev, pm_message_t state);
  285 	int (*resume) (struct device *dev);
  286 	const struct attribute_group **groups;
  287 
  288 	const struct dev_pm_ops *pm;
  289 
  290 	struct driver_private *p;
  291 };
  292 
  293 
  294 extern int __must_check driver_register(struct device_driver *drv);
  295 extern void driver_unregister(struct device_driver *drv);
  296 
  297 extern struct device_driver *driver_find(const char *name,
  298 					 struct bus_type *bus);
  299 extern int driver_probe_done(void);
  300 extern void wait_for_device_probe(void);
  301 
  302 
  303 /* sysfs interface for exporting driver attributes */
  304 
  305 struct driver_attribute {
  306 	struct attribute attr;
  307 	ssize_t (*show)(struct device_driver *driver, char *buf);
  308 	ssize_t (*store)(struct device_driver *driver, const char *buf,
  309 			 size_t count);
  310 };
  311 
  312 #define DRIVER_ATTR(_name, _mode, _show, _store) \
  313 	struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
  314 #define DRIVER_ATTR_RW(_name) \
  315 	struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
  316 #define DRIVER_ATTR_RO(_name) \
  317 	struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
  318 #define DRIVER_ATTR_WO(_name) \
  319 	struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
  320 
  321 extern int __must_check driver_create_file(struct device_driver *driver,
  322 					const struct driver_attribute *attr);
  323 extern void driver_remove_file(struct device_driver *driver,
  324 			       const struct driver_attribute *attr);
  325 
  326 extern int __must_check driver_for_each_device(struct device_driver *drv,
  327 					       struct device *start,
  328 					       void *data,
  329 					       int (*fn)(struct device *dev,
  330 							 void *));
  331 struct device *driver_find_device(struct device_driver *drv,
  332 				  struct device *start, void *data,
  333 				  int (*match)(struct device *dev, void *data));
  334 
  335 /**
  336  * struct subsys_interface - interfaces to device functions
  337  * @name:       name of the device function
  338  * @subsys:     subsytem of the devices to attach to
  339  * @node:       the list of functions registered at the subsystem
  340  * @add_dev:    device hookup to device function handler
  341  * @remove_dev: device hookup to device function handler
  342  *
  343  * Simple interfaces attached to a subsystem. Multiple interfaces can
  344  * attach to a subsystem and its devices. Unlike drivers, they do not
  345  * exclusively claim or control devices. Interfaces usually represent
  346  * a specific functionality of a subsystem/class of devices.
  347  */
  348 struct subsys_interface {
  349 	const char *name;
  350 	struct bus_type *subsys;
  351 	struct list_head node;
  352 	int (*add_dev)(struct device *dev, struct subsys_interface *sif);
  353 	void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
  354 };
  355 
  356 int subsys_interface_register(struct subsys_interface *sif);
  357 void subsys_interface_unregister(struct subsys_interface *sif);
  358 
  359 int subsys_system_register(struct bus_type *subsys,
  360 			   const struct attribute_group **groups);
  361 int subsys_virtual_register(struct bus_type *subsys,
  362 			    const struct attribute_group **groups);
  363 
  364 /**
  365  * struct class - device classes
  366  * @name:	Name of the class.
  367  * @owner:	The module owner.
  368  * @class_attrs: Default attributes of this class.
  369  * @class_groups: Default attributes of this class.
  370  * @dev_groups:	Default attributes of the devices that belong to the class.
  371  * @dev_kobj:	The kobject that represents this class and links it into the hierarchy.
  372  * @dev_uevent:	Called when a device is added, removed from this class, or a
  373  *		few other things that generate uevents to add the environment
  374  *		variables.
  375  * @devnode:	Callback to provide the devtmpfs.
  376  * @class_release: Called to release this class.
  377  * @dev_release: Called to release the device.
  378  * @suspend:	Used to put the device to sleep mode, usually to a low power
  379  *		state.
  380  * @resume:	Used to bring the device from the sleep mode.
  381  * @ns_type:	Callbacks so sysfs can detemine namespaces.
  382  * @namespace:	Namespace of the device belongs to this class.
  383  * @pm:		The default device power management operations of this class.
  384  * @p:		The private data of the driver core, no one other than the
  385  *		driver core can touch this.
  386  *
  387  * A class is a higher-level view of a device that abstracts out low-level
  388  * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
  389  * at the class level, they are all simply disks. Classes allow user space
  390  * to work with devices based on what they do, rather than how they are
  391  * connected or how they work.
  392  */
  393 struct class {
  394 	const char		*name;
  395 	struct module		*owner;
  396 
  397 	struct class_attribute		*class_attrs;
  398 	const struct attribute_group	**class_groups;
  399 	const struct attribute_group	**dev_groups;
  400 	struct kobject			*dev_kobj;
  401 
  402 	int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
  403 	char *(*devnode)(struct device *dev, umode_t *mode);
  404 
  405 	void (*class_release)(struct class *class);
  406 	void (*dev_release)(struct device *dev);
  407 
  408 	int (*suspend)(struct device *dev, pm_message_t state);
  409 	int (*resume)(struct device *dev);
  410 
  411 	const struct kobj_ns_type_operations *ns_type;
  412 	const void *(*namespace)(struct device *dev);
  413 
  414 	const struct dev_pm_ops *pm;
  415 
  416 	struct subsys_private *p;
  417 };
  418 
  419 struct class_dev_iter {
  420 	struct klist_iter		ki;
  421 	const struct device_type	*type;
  422 };
  423 
  424 extern struct kobject *sysfs_dev_block_kobj;
  425 extern struct kobject *sysfs_dev_char_kobj;
  426 extern int __must_check __class_register(struct class *class,
  427 					 struct lock_class_key *key);
  428 extern void class_unregister(struct class *class);
  429 
  430 /* This is a #define to keep the compiler from merging different
  431  * instances of the __key variable */
  432 #define class_register(class)			\
  433 ({						\
  434 	static struct lock_class_key __key;	\
  435 	__class_register(class, &__key);	\
  436 })
  437 
  438 struct class_compat;
  439 struct class_compat *class_compat_register(const char *name);
  440 void class_compat_unregister(struct class_compat *cls);
  441 int class_compat_create_link(struct class_compat *cls, struct device *dev,
  442 			     struct device *device_link);
  443 void class_compat_remove_link(struct class_compat *cls, struct device *dev,
  444 			      struct device *device_link);
  445 
  446 extern void class_dev_iter_init(struct class_dev_iter *iter,
  447 				struct class *class,
  448 				struct device *start,
  449 				const struct device_type *type);
  450 extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
  451 extern void class_dev_iter_exit(struct class_dev_iter *iter);
  452 
  453 extern int class_for_each_device(struct class *class, struct device *start,
  454 				 void *data,
  455 				 int (*fn)(struct device *dev, void *data));
  456 extern struct device *class_find_device(struct class *class,
  457 					struct device *start, const void *data,
  458 					int (*match)(struct device *, const void *));
  459 
  460 struct class_attribute {
  461 	struct attribute attr;
  462 	ssize_t (*show)(struct class *class, struct class_attribute *attr,
  463 			char *buf);
  464 	ssize_t (*store)(struct class *class, struct class_attribute *attr,
  465 			const char *buf, size_t count);
  466 };
  467 
  468 #define CLASS_ATTR(_name, _mode, _show, _store) \
  469 	struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store)
  470 #define CLASS_ATTR_RW(_name) \
  471 	struct class_attribute class_attr_##_name = __ATTR_RW(_name)
  472 #define CLASS_ATTR_RO(_name) \
  473 	struct class_attribute class_attr_##_name = __ATTR_RO(_name)
  474 #define CLASS_ATTR_WO(_name) \
  475 	struct class_attribute class_attr_##_name = __ATTR_WO(_name)
  476 
  477 extern int __must_check class_create_file_ns(struct class *class,
  478 					     const struct class_attribute *attr,
  479 					     const void *ns);
  480 extern void class_remove_file_ns(struct class *class,
  481 				 const struct class_attribute *attr,
  482 				 const void *ns);
  483 
  484 static inline int __must_check class_create_file(struct class *class,
  485 					const struct class_attribute *attr)
  486 {
  487 	return class_create_file_ns(class, attr, NULL);
  488 }
  489 
  490 static inline void class_remove_file(struct class *class,
  491 				     const struct class_attribute *attr)
  492 {
  493 	return class_remove_file_ns(class, attr, NULL);
  494 }
  495 
  496 /* Simple class attribute that is just a static string */
  497 struct class_attribute_string {
  498 	struct class_attribute attr;
  499 	char *str;
  500 };
  501 
  502 /* Currently read-only only */
  503 #define _CLASS_ATTR_STRING(_name, _mode, _str) \
  504 	{ __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
  505 #define CLASS_ATTR_STRING(_name, _mode, _str) \
  506 	struct class_attribute_string class_attr_##_name = \
  507 		_CLASS_ATTR_STRING(_name, _mode, _str)
  508 
  509 extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
  510                         char *buf);
  511 
  512 struct class_interface {
  513 	struct list_head	node;
  514 	struct class		*class;
  515 
  516 	int (*add_dev)		(struct device *, struct class_interface *);
  517 	void (*remove_dev)	(struct device *, struct class_interface *);
  518 };
  519 
  520 extern int __must_check class_interface_register(struct class_interface *);
  521 extern void class_interface_unregister(struct class_interface *);
  522 
  523 extern struct class * __must_check __class_create(struct module *owner,
  524 						  const char *name,
  525 						  struct lock_class_key *key);
  526 extern void class_destroy(struct class *cls);
  527 
  528 /* This is a #define to keep the compiler from merging different
  529  * instances of the __key variable */
  530 #define class_create(owner, name)		\
  531 ({						\
  532 	static struct lock_class_key __key;	\
  533 	__class_create(owner, name, &__key);	\
  534 })
  535 
  536 /*
  537  * The type of device, "struct device" is embedded in. A class
  538  * or bus can contain devices of different types
  539  * like "partitions" and "disks", "mouse" and "event".
  540  * This identifies the device type and carries type-specific
  541  * information, equivalent to the kobj_type of a kobject.
  542  * If "name" is specified, the uevent will contain it in
  543  * the DEVTYPE variable.
  544  */
  545 struct device_type {
  546 	const char *name;
  547 	const struct attribute_group **groups;
  548 	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
  549 	char *(*devnode)(struct device *dev, umode_t *mode,
  550 			 kuid_t *uid, kgid_t *gid);
  551 	void (*release)(struct device *dev);
  552 
  553 	const struct dev_pm_ops *pm;
  554 };
  555 
  556 /* interface for exporting device attributes */
  557 struct device_attribute {
  558 	struct attribute	attr;
  559 	ssize_t (*show)(struct device *dev, struct device_attribute *attr,
  560 			char *buf);
  561 	ssize_t (*store)(struct device *dev, struct device_attribute *attr,
  562 			 const char *buf, size_t count);
  563 };
  564 
  565 struct dev_ext_attribute {
  566 	struct device_attribute attr;
  567 	void *var;
  568 };
  569 
  570 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
  571 			  char *buf);
  572 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
  573 			   const char *buf, size_t count);
  574 ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
  575 			char *buf);
  576 ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
  577 			 const char *buf, size_t count);
  578 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
  579 			char *buf);
  580 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
  581 			 const char *buf, size_t count);
  582 
  583 #define DEVICE_ATTR(_name, _mode, _show, _store) \
  584 	struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
  585 #define DEVICE_ATTR_RW(_name) \
  586 	struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
  587 #define DEVICE_ATTR_RO(_name) \
  588 	struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
  589 #define DEVICE_ATTR_WO(_name) \
  590 	struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
  591 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \
  592 	struct dev_ext_attribute dev_attr_##_name = \
  593 		{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
  594 #define DEVICE_INT_ATTR(_name, _mode, _var) \
  595 	struct dev_ext_attribute dev_attr_##_name = \
  596 		{ __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
  597 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \
  598 	struct dev_ext_attribute dev_attr_##_name = \
  599 		{ __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
  600 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
  601 	struct device_attribute dev_attr_##_name =		\
  602 		__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
  603 
  604 extern int device_create_file(struct device *device,
  605 			      const struct device_attribute *entry);
  606 extern void device_remove_file(struct device *dev,
  607 			       const struct device_attribute *attr);
  608 extern bool device_remove_file_self(struct device *dev,
  609 				    const struct device_attribute *attr);
  610 extern int __must_check device_create_bin_file(struct device *dev,
  611 					const struct bin_attribute *attr);
  612 extern void device_remove_bin_file(struct device *dev,
  613 				   const struct bin_attribute *attr);
  614 
  615 /* device resource management */
  616 typedef void (*dr_release_t)(struct device *dev, void *res);
  617 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
  618 
  619 #ifdef CONFIG_DEBUG_DEVRES
  620 extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
  621 				 int nid, const char *name) __malloc;
  622 #define devres_alloc(release, size, gfp) \
  623 	__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
  624 #define devres_alloc_node(release, size, gfp, nid) \
  625 	__devres_alloc_node(release, size, gfp, nid, #release)
  626 #else
  627 extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
  628 			       int nid) __malloc;
  629 static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
  630 {
  631 	return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
  632 }
  633 #endif
  634 
  635 extern void devres_for_each_res(struct device *dev, dr_release_t release,
  636 				dr_match_t match, void *match_data,
  637 				void (*fn)(struct device *, void *, void *),
  638 				void *data);
  639 extern void devres_free(void *res);
  640 extern void devres_add(struct device *dev, void *res);
  641 extern void *devres_find(struct device *dev, dr_release_t release,
  642 			 dr_match_t match, void *match_data);
  643 extern void *devres_get(struct device *dev, void *new_res,
  644 			dr_match_t match, void *match_data);
  645 extern void *devres_remove(struct device *dev, dr_release_t release,
  646 			   dr_match_t match, void *match_data);
  647 extern int devres_destroy(struct device *dev, dr_release_t release,
  648 			  dr_match_t match, void *match_data);
  649 extern int devres_release(struct device *dev, dr_release_t release,
  650 			  dr_match_t match, void *match_data);
  651 
  652 /* devres group */
  653 extern void * __must_check devres_open_group(struct device *dev, void *id,
  654 					     gfp_t gfp);
  655 extern void devres_close_group(struct device *dev, void *id);
  656 extern void devres_remove_group(struct device *dev, void *id);
  657 extern int devres_release_group(struct device *dev, void *id);
  658 
  659 /* managed devm_k.alloc/kfree for device drivers */
  660 extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
  661 extern __printf(3, 0)
  662 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
  663 		      va_list ap) __malloc;
  664 extern __printf(3, 4)
  665 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc;
  666 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
  667 {
  668 	return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
  669 }
  670 static inline void *devm_kmalloc_array(struct device *dev,
  671 				       size_t n, size_t size, gfp_t flags)
  672 {
  673 	if (size != 0 && n > SIZE_MAX / size)
  674 		return NULL;
  675 	return devm_kmalloc(dev, n * size, flags);
  676 }
  677 static inline void *devm_kcalloc(struct device *dev,
  678 				 size_t n, size_t size, gfp_t flags)
  679 {
  680 	return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
  681 }
  682 extern void devm_kfree(struct device *dev, void *p);
  683 extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
  684 extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
  685 			  gfp_t gfp);
  686 
  687 extern unsigned long devm_get_free_pages(struct device *dev,
  688 					 gfp_t gfp_mask, unsigned int order);
  689 extern void devm_free_pages(struct device *dev, unsigned long addr);
  690 
  691 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
  692 
  693 /* allows to add/remove a custom action to devres stack */
  694 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
  695 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
  696 
  697 static inline int devm_add_action_or_reset(struct device *dev,
  698 					   void (*action)(void *), void *data)
  699 {
  700 	int ret;
  701 
  702 	ret = devm_add_action(dev, action, data);
  703 	if (ret)
  704 		action(data);
  705 
  706 	return ret;
  707 }
  708 
  709 /**
  710  * devm_alloc_percpu - Resource-managed alloc_percpu
  711  * @dev: Device to allocate per-cpu memory for
  712  * @type: Type to allocate per-cpu memory for
  713  *
  714  * Managed alloc_percpu. Per-cpu memory allocated with this function is
  715  * automatically freed on driver detach.
  716  *
  717  * RETURNS:
  718  * Pointer to allocated memory on success, NULL on failure.
  719  */
  720 #define devm_alloc_percpu(dev, type)      \
  721 	((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
  722 						      __alignof__(type)))
  723 
  724 void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
  725 				   size_t align);
  726 void devm_free_percpu(struct device *dev, void __percpu *pdata);
  727 
  728 struct device_dma_parameters {
  729 	/*
  730 	 * a low level driver may set these to teach IOMMU code about
  731 	 * sg limitations.
  732 	 */
  733 	unsigned int max_segment_size;
  734 	unsigned long segment_boundary_mask;
  735 };
  736 
  737 /**
  738  * enum device_link_state - Device link states.
  739  * @DL_STATE_NONE: The presence of the drivers is not being tracked.
  740  * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
  741  * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not.
  742  * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present).
  743  * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present.
  744  * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding.
  745  */
  746 enum device_link_state {
  747 	DL_STATE_NONE = -1,
  748 	DL_STATE_DORMANT = 0,
  749 	DL_STATE_AVAILABLE,
  750 	DL_STATE_CONSUMER_PROBE,
  751 	DL_STATE_ACTIVE,
  752 	DL_STATE_SUPPLIER_UNBIND,
  753 };
  754 
  755 /*
  756  * Device link flags.
  757  *
  758  * STATELESS: The core won't track the presence of supplier/consumer drivers.
  759  * AUTOREMOVE: Remove this link automatically on consumer driver unbind.
  760  * PM_RUNTIME: If set, the runtime PM framework will use this link.
  761  * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
  762  */
  763 #define DL_FLAG_STATELESS	BIT(0)
  764 #define DL_FLAG_AUTOREMOVE	BIT(1)
  765 #define DL_FLAG_PM_RUNTIME	BIT(2)
  766 #define DL_FLAG_RPM_ACTIVE	BIT(3)
  767 
  768 /**
  769  * struct device_link - Device link representation.
  770  * @supplier: The device on the supplier end of the link.
  771  * @s_node: Hook to the supplier device's list of links to consumers.
  772  * @consumer: The device on the consumer end of the link.
  773  * @c_node: Hook to the consumer device's list of links to suppliers.
  774  * @status: The state of the link (with respect to the presence of drivers).
  775  * @flags: Link flags.
  776  * @rpm_active: Whether or not the consumer device is runtime-PM-active.
  777  * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
  778  */
  779 struct device_link {
  780 	struct device *supplier;
  781 	struct list_head s_node;
  782 	struct device *consumer;
  783 	struct list_head c_node;
  784 	enum device_link_state status;
  785 	u32 flags;
  786 	bool rpm_active;
  787 #ifdef CONFIG_SRCU
  788 	struct rcu_head rcu_head;
  789 #endif
  790 };
  791 
  792 /**
  793  * enum dl_dev_state - Device driver presence tracking information.
  794  * @DL_DEV_NO_DRIVER: There is no driver attached to the device.
  795  * @DL_DEV_PROBING: A driver is probing.
  796  * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device.
  797  * @DL_DEV_UNBINDING: The driver is unbinding from the device.
  798  */
  799 enum dl_dev_state {
  800 	DL_DEV_NO_DRIVER = 0,
  801 	DL_DEV_PROBING,
  802 	DL_DEV_DRIVER_BOUND,
  803 	DL_DEV_UNBINDING,
  804 };
  805 
  806 /**
  807  * struct dev_links_info - Device data related to device links.
  808  * @suppliers: List of links to supplier devices.
  809  * @consumers: List of links to consumer devices.
  810  * @status: Driver status information.
  811  */
  812 struct dev_links_info {
  813 	struct list_head suppliers;
  814 	struct list_head consumers;
  815 	enum dl_dev_state status;
  816 };
  817 
  818 /**
  819  * struct device - The basic device structure
  820  * @parent:	The device's "parent" device, the device to which it is attached.
  821  * 		In most cases, a parent device is some sort of bus or host
  822  * 		controller. If parent is NULL, the device, is a top-level device,
  823  * 		which is not usually what you want.
  824  * @p:		Holds the private data of the driver core portions of the device.
  825  * 		See the comment of the struct device_private for detail.
  826  * @kobj:	A top-level, abstract class from which other classes are derived.
  827  * @init_name:	Initial name of the device.
  828  * @type:	The type of device.
  829  * 		This identifies the device type and carries type-specific
  830  * 		information.
  831  * @mutex:	Mutex to synchronize calls to its driver.
  832  * @bus:	Type of bus device is on.
  833  * @driver:	Which driver has allocated this
  834  * @platform_data: Platform data specific to the device.
  835  * 		Example: For devices on custom boards, as typical of embedded
  836  * 		and SOC based hardware, Linux often uses platform_data to point
  837  * 		to board-specific structures describing devices and how they
  838  * 		are wired.  That can include what ports are available, chip
  839  * 		variants, which GPIO pins act in what additional roles, and so
  840  * 		on.  This shrinks the "Board Support Packages" (BSPs) and
  841  * 		minimizes board-specific #ifdefs in drivers.
  842  * @driver_data: Private pointer for driver specific info.
  843  * @links:	Links to suppliers and consumers of this device.
  844  * @power:	For device power management.
  845  * 		See Documentation/power/admin-guide/devices.rst for details.
  846  * @pm_domain:	Provide callbacks that are executed during system suspend,
  847  * 		hibernation, system resume and during runtime PM transitions
  848  * 		along with subsystem-level and driver-level callbacks.
  849  * @pins:	For device pin management.
  850  *		See Documentation/pinctrl.txt for details.
  851  * @msi_list:	Hosts MSI descriptors
  852  * @msi_domain: The generic MSI domain this device is using.
  853  * @numa_node:	NUMA node this device is close to.
  854  * @dma_mask:	Dma mask (if dma'ble device).
  855  * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
  856  * 		hardware supports 64-bit addresses for consistent allocations
  857  * 		such descriptors.
  858  * @dma_pfn_offset: offset of DMA memory range relatively of RAM
  859  * @dma_parms:	A low level driver may set these to teach IOMMU code about
  860  * 		segment limitations.
  861  * @dma_pools:	Dma pools (if dma'ble device).
  862  * @dma_mem:	Internal for coherent mem override.
  863  * @cma_area:	Contiguous memory area for dma allocations
  864  * @archdata:	For arch-specific additions.
  865  * @of_node:	Associated device tree node.
  866  * @fwnode:	Associated device node supplied by platform firmware.
  867  * @devt:	For creating the sysfs "dev".
  868  * @id:		device instance
  869  * @devres_lock: Spinlock to protect the resource of the device.
  870  * @devres_head: The resources list of the device.
  871  * @knode_class: The node used to add the device to the class list.
  872  * @class:	The class of the device.
  873  * @groups:	Optional attribute groups.
  874  * @release:	Callback to free the device after all references have
  875  * 		gone away. This should be set by the allocator of the
  876  * 		device (i.e. the bus driver that discovered the device).
  877  * @iommu_group: IOMMU group the device belongs to.
  878  * @iommu_fwspec: IOMMU-specific properties supplied by firmware.
  879  *
  880  * @offline_disabled: If set, the device is permanently online.
  881  * @offline:	Set after successful invocation of bus type's .offline().
  882  *
  883  * At the lowest level, every device in a Linux system is represented by an
  884  * instance of struct device. The device structure contains the information
  885  * that the device model core needs to model the system. Most subsystems,
  886  * however, track additional information about the devices they host. As a
  887  * result, it is rare for devices to be represented by bare device structures;
  888  * instead, that structure, like kobject structures, is usually embedded within
  889  * a higher-level representation of the device.
  890  */
  891 struct device {
  892 	struct device		*parent;
  893 
  894 	struct device_private	*p;
  895 
  896 	struct kobject kobj;
  897 	const char		*init_name; /* initial name of the device */
  898 	const struct device_type *type;
  899 
  900 	struct mutex		mutex;	/* mutex to synchronize calls to
  901 					 * its driver.
  902 					 */
  903 
  904 	struct bus_type	*bus;		/* type of bus device is on */
  905 	struct device_driver *driver;	/* which driver has allocated this
  906 					   device */
  907 	void		*platform_data;	/* Platform specific data, device
  908 					   core doesn't touch it */
  909 	void		*driver_data;	/* Driver data, set and get with
  910 					   dev_set/get_drvdata */
  911 	struct dev_links_info	links;
  912 	struct dev_pm_info	power;
  913 	struct dev_pm_domain	*pm_domain;
  914 
  915 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  916 	struct irq_domain	*msi_domain;
  917 #endif
  918 #ifdef CONFIG_PINCTRL
  919 	struct dev_pin_info	*pins;
  920 #endif
  921 #ifdef CONFIG_GENERIC_MSI_IRQ
  922 	struct list_head	msi_list;
  923 #endif
  924 
  925 #ifdef CONFIG_NUMA
  926 	int		numa_node;	/* NUMA node this device is close to */
  927 #endif
  928 	const struct dma_map_ops *dma_ops;
  929 	u64		*dma_mask;	/* dma mask (if dma'able device) */
  930 	u64		coherent_dma_mask;/* Like dma_mask, but for
  931 					     alloc_coherent mappings as
  932 					     not all hardware supports
  933 					     64 bit addresses for consistent
  934 					     allocations such descriptors. */
  935 	unsigned long	dma_pfn_offset;
  936 
  937 	struct device_dma_parameters *dma_parms;
  938 
  939 	struct list_head	dma_pools;	/* dma pools (if dma'ble) */
  940 
  941 	struct dma_coherent_mem	*dma_mem; /* internal for coherent mem
  942 					     override */
  943 #ifdef CONFIG_DMA_CMA
  944 	struct cma *cma_area;		/* contiguous memory area for dma
  945 					   allocations */
  946 #endif
  947 	/* arch specific additions */
  948 	struct dev_archdata	archdata;
  949 
  950 	struct device_node	*of_node; /* associated device tree node */
  951 	struct fwnode_handle	*fwnode; /* firmware device node */
  952 
  953 	dev_t			devt;	/* dev_t, creates the sysfs "dev" */
  954 	u32			id;	/* device instance */
  955 
  956 	spinlock_t		devres_lock;
  957 	struct list_head	devres_head;
  958 
  959 	struct klist_node	knode_class;
  960 	struct class		*class;
  961 	const struct attribute_group **groups;	/* optional groups */
  962 
  963 	void	(*release)(struct device *dev);
  964 	struct iommu_group	*iommu_group;
  965 	struct iommu_fwspec	*iommu_fwspec;
  966 
  967 	bool			offline_disabled:1;
  968 	bool			offline:1;
  969 };
  970 
  971 static inline struct device *kobj_to_dev(struct kobject *kobj)
  972 {
  973 	return container_of(kobj, struct device, kobj);
  974 }
  975 
  976 /* Get the wakeup routines, which depend on struct device */
  977 #include <linux/pm_wakeup.h>
  978 
  979 static inline const char *dev_name(const struct device *dev)
  980 {
  981 	/* Use the init name until the kobject becomes available */
  982 	if (dev->init_name)
  983 		return dev->init_name;
  984 
  985 	return kobject_name(&dev->kobj);
  986 }
  987 
  988 extern __printf(2, 3)
  989 int dev_set_name(struct device *dev, const char *name, ...);
  990 
  991 #ifdef CONFIG_NUMA
  992 static inline int dev_to_node(struct device *dev)
  993 {
  994 	return dev->numa_node;
  995 }
  996 static inline void set_dev_node(struct device *dev, int node)
  997 {
  998 	dev->numa_node = node;
  999 }
 1000 #else
 1001 static inline int dev_to_node(struct device *dev)
 1002 {
 1003 	return -1;
 1004 }
 1005 static inline void set_dev_node(struct device *dev, int node)
 1006 {
 1007 }
 1008 #endif
 1009 
 1010 static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
 1011 {
 1012 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
 1013 	return dev->msi_domain;
 1014 #else
 1015 	return NULL;
 1016 #endif
 1017 }
 1018 
 1019 static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
 1020 {
 1021 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
 1022 	dev->msi_domain = d;
 1023 #endif
 1024 }
 1025 
 1026 static inline void *dev_get_drvdata(const struct device *dev)
 1027 {
 1028 	return dev->driver_data;
 1029 }
 1030 
 1031 static inline void dev_set_drvdata(struct device *dev, void *data)
 1032 {
 1033 	dev->driver_data = data;
 1034 }
 1035 
 1036 static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
 1037 {
 1038 	return dev ? dev->power.subsys_data : NULL;
 1039 }
 1040 
 1041 static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
 1042 {
 1043 	return dev->kobj.uevent_suppress;
 1044 }
 1045 
 1046 static inline void dev_set_uevent_suppress(struct device *dev, int val)
 1047 {
 1048 	dev->kobj.uevent_suppress = val;
 1049 }
 1050 
 1051 static inline int device_is_registered(struct device *dev)
 1052 {
 1053 	return dev->kobj.state_in_sysfs;
 1054 }
 1055 
 1056 static inline void device_enable_async_suspend(struct device *dev)
 1057 {
 1058 	if (!dev->power.is_prepared)
 1059 		dev->power.async_suspend = true;
 1060 }
 1061 
 1062 static inline void device_disable_async_suspend(struct device *dev)
 1063 {
 1064 	if (!dev->power.is_prepared)
 1065 		dev->power.async_suspend = false;
 1066 }
 1067 
 1068 static inline bool device_async_suspend_enabled(struct device *dev)
 1069 {
 1070 	return !!dev->power.async_suspend;
 1071 }
 1072 
 1073 static inline void dev_pm_syscore_device(struct device *dev, bool val)
 1074 {
 1075 #ifdef CONFIG_PM_SLEEP
 1076 	dev->power.syscore = val;
 1077 #endif
 1078 }
 1079 
 1080 static inline void device_lock(struct device *dev)
 1081 {
 1082 	mutex_lock(&dev->mutex);
 1083 }
 1084 
 1085 static inline int device_lock_interruptible(struct device *dev)
 1086 {
 1087 	return mutex_lock_interruptible(&dev->mutex);
 1088 }
 1089 
 1090 static inline int device_trylock(struct device *dev)
 1091 {
 1092 	return mutex_trylock(&dev->mutex);
 1093 }
 1094 
 1095 static inline void device_unlock(struct device *dev)
 1096 {
 1097 	mutex_unlock(&dev->mutex);
 1098 }
 1099 
 1100 static inline void device_lock_assert(struct device *dev)
 1101 {
 1102 	lockdep_assert_held(&dev->mutex);
 1103 }
 1104 
 1105 static inline struct device_node *dev_of_node(struct device *dev)
 1106 {
 1107 	if (!IS_ENABLED(CONFIG_OF))
 1108 		return NULL;
 1109 	return dev->of_node;
 1110 }
 1111 
 1112 void driver_init(void);
 1113 
 1114 /*
 1115  * High level routines for use by the bus drivers
 1116  */
 1117 extern int __must_check device_register(struct device *dev);
 1118 extern void device_unregister(struct device *dev);
 1119 extern void device_initialize(struct device *dev);
 1120 extern int __must_check device_add(struct device *dev);
 1121 extern void device_del(struct device *dev);
 1122 extern int device_for_each_child(struct device *dev, void *data,
 1123 		     int (*fn)(struct device *dev, void *data));
 1124 extern int device_for_each_child_reverse(struct device *dev, void *data,
 1125 		     int (*fn)(struct device *dev, void *data));
 1126 extern struct device *device_find_child(struct device *dev, void *data,
 1127 				int (*match)(struct device *dev, void *data));
 1128 extern int device_rename(struct device *dev, const char *new_name);
 1129 extern int device_move(struct device *dev, struct device *new_parent,
 1130 		       enum dpm_order dpm_order);
 1131 extern const char *device_get_devnode(struct device *dev,
 1132 				      umode_t *mode, kuid_t *uid, kgid_t *gid,
 1133 				      const char **tmp);
 1134 
 1135 static inline bool device_supports_offline(struct device *dev)
 1136 {
 1137 	return dev->bus && dev->bus->offline && dev->bus->online;
 1138 }
 1139 
 1140 extern void lock_device_hotplug(void);
 1141 extern void unlock_device_hotplug(void);
 1142 extern int lock_device_hotplug_sysfs(void);
 1143 void assert_held_device_hotplug(void);
 1144 extern int device_offline(struct device *dev);
 1145 extern int device_online(struct device *dev);
 1146 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
 1147 extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
 1148 
 1149 static inline int dev_num_vf(struct device *dev)
 1150 {
 1151 	if (dev->bus && dev->bus->num_vf)
 1152 		return dev->bus->num_vf(dev);
 1153 	return 0;
 1154 }
 1155 
 1156 /*
 1157  * Root device objects for grouping under /sys/devices
 1158  */
 1159 extern struct device *__root_device_register(const char *name,
 1160 					     struct module *owner);
 1161 
 1162 /* This is a macro to avoid include problems with THIS_MODULE */
 1163 #define root_device_register(name) \
 1164 	__root_device_register(name, THIS_MODULE)
 1165 
 1166 extern void root_device_unregister(struct device *root);
 1167 
 1168 static inline void *dev_get_platdata(const struct device *dev)
 1169 {
 1170 	return dev->platform_data;
 1171 }
 1172 
 1173 /*
 1174  * Manual binding of a device to driver. See drivers/base/bus.c
 1175  * for information on use.
 1176  */
 1177 extern int __must_check device_bind_driver(struct device *dev);
 1178 extern void device_release_driver(struct device *dev);
 1179 extern int  __must_check device_attach(struct device *dev);
 1180 extern int __must_check driver_attach(struct device_driver *drv);
 1181 extern void device_initial_probe(struct device *dev);
 1182 extern int __must_check device_reprobe(struct device *dev);
 1183 
 1184 extern bool device_is_bound(struct device *dev);
 1185 
 1186 /*
 1187  * Easy functions for dynamically creating devices on the fly
 1188  */
 1189 extern __printf(5, 0)
 1190 struct device *device_create_vargs(struct class *cls, struct device *parent,
 1191 				   dev_t devt, void *drvdata,
 1192 				   const char *fmt, va_list vargs);
 1193 extern __printf(5, 6)
 1194 struct device *device_create(struct class *cls, struct device *parent,
 1195 			     dev_t devt, void *drvdata,
 1196 			     const char *fmt, ...);
 1197 extern __printf(6, 7)
 1198 struct device *device_create_with_groups(struct class *cls,
 1199 			     struct device *parent, dev_t devt, void *drvdata,
 1200 			     const struct attribute_group **groups,
 1201 			     const char *fmt, ...);
 1202 extern void device_destroy(struct class *cls, dev_t devt);
 1203 
 1204 /*
 1205  * Platform "fixup" functions - allow the platform to have their say
 1206  * about devices and actions that the general device layer doesn't
 1207  * know about.
 1208  */
 1209 /* Notify platform of device discovery */
 1210 extern int (*platform_notify)(struct device *dev);
 1211 
 1212 extern int (*platform_notify_remove)(struct device *dev);
 1213 
 1214 
 1215 /*
 1216  * get_device - atomically increment the reference count for the device.
 1217  *
 1218  */
 1219 extern struct device *get_device(struct device *dev);
 1220 extern void put_device(struct device *dev);
 1221 
 1222 #ifdef CONFIG_DEVTMPFS
 1223 extern int devtmpfs_create_node(struct device *dev);
 1224 extern int devtmpfs_delete_node(struct device *dev);
 1225 extern int devtmpfs_mount(const char *mntdir);
 1226 #else
 1227 static inline int devtmpfs_create_node(struct device *dev) { return 0; }
 1228 static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
 1229 static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
 1230 #endif
 1231 
 1232 /* drivers/base/power/shutdown.c */
 1233 extern void device_shutdown(void);
 1234 
 1235 /* debugging and troubleshooting/diagnostic helpers. */
 1236 extern const char *dev_driver_string(const struct device *dev);
 1237 
 1238 /* Device links interface. */
 1239 struct device_link *device_link_add(struct device *consumer,
 1240 				    struct device *supplier, u32 flags);
 1241 void device_link_del(struct device_link *link);
 1242 
 1243 #ifdef CONFIG_PRINTK
 1244 
 1245 extern __printf(3, 0)
 1246 int dev_vprintk_emit(int level, const struct device *dev,
 1247 		     const char *fmt, va_list args);
 1248 extern __printf(3, 4)
 1249 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
 1250 
 1251 extern __printf(3, 4)
 1252 void dev_printk(const char *level, const struct device *dev,
 1253 		const char *fmt, ...);
 1254 extern __printf(2, 3)
 1255 void dev_emerg(const struct device *dev, const char *fmt, ...);
 1256 extern __printf(2, 3)
 1257 void dev_alert(const struct device *dev, const char *fmt, ...);
 1258 extern __printf(2, 3)
 1259 void dev_crit(const struct device *dev, const char *fmt, ...);
 1260 extern __printf(2, 3)
 1261 void dev_err(const struct device *dev, const char *fmt, ...);
 1262 extern __printf(2, 3)
 1263 void dev_warn(const struct device *dev, const char *fmt, ...);
 1264 extern __printf(2, 3)
 1265 void dev_notice(const struct device *dev, const char *fmt, ...);
 1266 extern __printf(2, 3)
 1267 void _dev_info(const struct device *dev, const char *fmt, ...);
 1268 
 1269 #else
 1270 
 1271 static inline __printf(3, 0)
 1272 int dev_vprintk_emit(int level, const struct device *dev,
 1273 		     const char *fmt, va_list args)
 1274 { return 0; }
 1275 static inline __printf(3, 4)
 1276 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
 1277 { return 0; }
 1278 
 1279 static inline void __dev_printk(const char *level, const struct device *dev,
 1280 				struct va_format *vaf)
 1281 {}
 1282 static inline __printf(3, 4)
 1283 void dev_printk(const char *level, const struct device *dev,
 1284 		const char *fmt, ...)
 1285 {}
 1286 
 1287 static inline __printf(2, 3)
 1288 void dev_emerg(const struct device *dev, const char *fmt, ...)
 1289 {}
 1290 static inline __printf(2, 3)
 1291 void dev_crit(const struct device *dev, const char *fmt, ...)
 1292 {}
 1293 static inline __printf(2, 3)
 1294 void dev_alert(const struct device *dev, const char *fmt, ...)
 1295 {}
 1296 static inline __printf(2, 3)
 1297 void dev_err(const struct device *dev, const char *fmt, ...)
 1298 {}
 1299 static inline __printf(2, 3)
 1300 void dev_warn(const struct device *dev, const char *fmt, ...)
 1301 {}
 1302 static inline __printf(2, 3)
 1303 void dev_notice(const struct device *dev, const char *fmt, ...)
 1304 {}
 1305 static inline __printf(2, 3)
 1306 void _dev_info(const struct device *dev, const char *fmt, ...)
 1307 {}
 1308 
 1309 #endif
 1310 
 1311 /*
 1312  * Stupid hackaround for existing uses of non-printk uses dev_info
 1313  *
 1314  * Note that the definition of dev_info below is actually _dev_info
 1315  * and a macro is used to avoid redefining dev_info
 1316  */
 1317 
 1318 #define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
 1319 
 1320 #if defined(CONFIG_DYNAMIC_DEBUG)
 1321 #define dev_dbg(dev, format, ...)		     \
 1322 do {						     \
 1323 	dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
 1324 } while (0)
 1325 #elif defined(DEBUG)
 1326 #define dev_dbg(dev, format, arg...)		\
 1327 	dev_printk(KERN_DEBUG, dev, format, ##arg)
 1328 #else
 1329 #define dev_dbg(dev, format, arg...)				\
 1330 ({								\
 1331 	if (0)							\
 1332 		dev_printk(KERN_DEBUG, dev, format, ##arg);	\
 1333 })
 1334 #endif
 1335 
 1336 #ifdef CONFIG_PRINTK
 1337 #define dev_level_once(dev_level, dev, fmt, ...)			\
 1338 do {									\
 1339 	static bool __print_once __read_mostly;				\
 1340 									\
 1341 	if (!__print_once) {						\
 1342 		__print_once = true;					\
 1343 		dev_level(dev, fmt, ##__VA_ARGS__);			\
 1344 	}								\
 1345 } while (0)
 1346 #else
 1347 #define dev_level_once(dev_level, dev, fmt, ...)			\
 1348 do {									\
 1349 	if (0)								\
 1350 		dev_level(dev, fmt, ##__VA_ARGS__);			\
 1351 } while (0)
 1352 #endif
 1353 
 1354 #define dev_emerg_once(dev, fmt, ...)					\
 1355 	dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
 1356 #define dev_alert_once(dev, fmt, ...)					\
 1357 	dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
 1358 #define dev_crit_once(dev, fmt, ...)					\
 1359 	dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
 1360 #define dev_err_once(dev, fmt, ...)					\
 1361 	dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
 1362 #define dev_warn_once(dev, fmt, ...)					\
 1363 	dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
 1364 #define dev_notice_once(dev, fmt, ...)					\
 1365 	dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
 1366 #define dev_info_once(dev, fmt, ...)					\
 1367 	dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
 1368 #define dev_dbg_once(dev, fmt, ...)					\
 1369 	dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
 1370 
 1371 #define dev_level_ratelimited(dev_level, dev, fmt, ...)			\
 1372 do {									\
 1373 	static DEFINE_RATELIMIT_STATE(_rs,				\
 1374 				      DEFAULT_RATELIMIT_INTERVAL,	\
 1375 				      DEFAULT_RATELIMIT_BURST);		\
 1376 	if (__ratelimit(&_rs))						\
 1377 		dev_level(dev, fmt, ##__VA_ARGS__);			\
 1378 } while (0)
 1379 
 1380 #define dev_emerg_ratelimited(dev, fmt, ...)				\
 1381 	dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
 1382 #define dev_alert_ratelimited(dev, fmt, ...)				\
 1383 	dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
 1384 #define dev_crit_ratelimited(dev, fmt, ...)				\
 1385 	dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
 1386 #define dev_err_ratelimited(dev, fmt, ...)				\
 1387 	dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
 1388 #define dev_warn_ratelimited(dev, fmt, ...)				\
 1389 	dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
 1390 #define dev_notice_ratelimited(dev, fmt, ...)				\
 1391 	dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
 1392 #define dev_info_ratelimited(dev, fmt, ...)				\
 1393 	dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
 1394 #if defined(CONFIG_DYNAMIC_DEBUG)
 1395 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
 1396 #define dev_dbg_ratelimited(dev, fmt, ...)				\
 1397 do {									\
 1398 	static DEFINE_RATELIMIT_STATE(_rs,				\
 1399 				      DEFAULT_RATELIMIT_INTERVAL,	\
 1400 				      DEFAULT_RATELIMIT_BURST);		\
 1401 	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);			\
 1402 	if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&	\
 1403 	    __ratelimit(&_rs))						\
 1404 		__dynamic_dev_dbg(&descriptor, dev, fmt,		\
 1405 				  ##__VA_ARGS__);			\
 1406 } while (0)
 1407 #elif defined(DEBUG)
 1408 #define dev_dbg_ratelimited(dev, fmt, ...)				\
 1409 do {									\
 1410 	static DEFINE_RATELIMIT_STATE(_rs,				\
 1411 				      DEFAULT_RATELIMIT_INTERVAL,	\
 1412 				      DEFAULT_RATELIMIT_BURST);		\
 1413 	if (__ratelimit(&_rs))						\
 1414 		dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);	\
 1415 } while (0)
 1416 #else
 1417 #define dev_dbg_ratelimited(dev, fmt, ...)				\
 1418 do {									\
 1419 	if (0)								\
 1420 		dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);	\
 1421 } while (0)
 1422 #endif
 1423 
 1424 #ifdef VERBOSE_DEBUG
 1425 #define dev_vdbg	dev_dbg
 1426 #else
 1427 #define dev_vdbg(dev, format, arg...)				\
 1428 ({								\
 1429 	if (0)							\
 1430 		dev_printk(KERN_DEBUG, dev, format, ##arg);	\
 1431 })
 1432 #endif
 1433 
 1434 /*
 1435  * dev_WARN*() acts like dev_printk(), but with the key difference of
 1436  * using WARN/WARN_ONCE to include file/line information and a backtrace.
 1437  */
 1438 #define dev_WARN(dev, format, arg...) \
 1439 	WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
 1440 
 1441 #define dev_WARN_ONCE(dev, condition, format, arg...) \
 1442 	WARN_ONCE(condition, "%s %s: " format, \
 1443 			dev_driver_string(dev), dev_name(dev), ## arg)
 1444 
 1445 /* Create alias, so I can be autoloaded. */
 1446 #define MODULE_ALIAS_CHARDEV(major,minor) \
 1447 	MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
 1448 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \
 1449 	MODULE_ALIAS("char-major-" __stringify(major) "-*")
 1450 
 1451 #ifdef CONFIG_SYSFS_DEPRECATED
 1452 extern long sysfs_deprecated;
 1453 #else
 1454 #define sysfs_deprecated 0
 1455 #endif
 1456 
 1457 /**
 1458  * module_driver() - Helper macro for drivers that don't do anything
 1459  * special in module init/exit. This eliminates a lot of boilerplate.
 1460  * Each module may only use this macro once, and calling it replaces
 1461  * module_init() and module_exit().
 1462  *
 1463  * @__driver: driver name
 1464  * @__register: register function for this driver type
 1465  * @__unregister: unregister function for this driver type
 1466  * @...: Additional arguments to be passed to __register and __unregister.
 1467  *
 1468  * Use this macro to construct bus specific macros for registering
 1469  * drivers, and do not use it on its own.
 1470  */
 1471 #define module_driver(__driver, __register, __unregister, ...) \
 1472 static int __init __driver##_init(void) \
 1473 { \
 1474 	return __register(&(__driver) , ##__VA_ARGS__); \
 1475 } \
 1476 module_init(__driver##_init); \
 1477 static void __exit __driver##_exit(void) \
 1478 { \
 1479 	__unregister(&(__driver) , ##__VA_ARGS__); \
 1480 } \
 1481 module_exit(__driver##_exit);
 1482 
 1483 /**
 1484  * builtin_driver() - Helper macro for drivers that don't do anything
 1485  * special in init and have no exit. This eliminates some boilerplate.
 1486  * Each driver may only use this macro once, and calling it replaces
 1487  * device_initcall (or in some cases, the legacy __initcall).  This is
 1488  * meant to be a direct parallel of module_driver() above but without
 1489  * the __exit stuff that is not used for builtin cases.
 1490  *
 1491  * @__driver: driver name
 1492  * @__register: register function for this driver type
 1493  * @...: Additional arguments to be passed to __register
 1494  *
 1495  * Use this macro to construct bus specific macros for registering
 1496  * drivers, and do not use it on its own.
 1497  */
 1498 #define builtin_driver(__driver, __register, ...) \
 1499 static int __init __driver##_init(void) \
 1500 { \
 1501 	return __register(&(__driver) , ##__VA_ARGS__); \
 1502 } \
 1503 device_initcall(__driver##_init);
 1504 
 1505 #endif /* _DEVICE_H_ */                 1 /*
    2  * ioport.h	Definitions of routines for detecting, reserving and
    3  *		allocating system resources.
    4  *
    5  * Authors:	Linus Torvalds
    6  */
    7 
    8 #ifndef _LINUX_IOPORT_H
    9 #define _LINUX_IOPORT_H
   10 
   11 #ifndef __ASSEMBLY__
   12 #include <linux/compiler.h>
   13 #include <linux/types.h>
   14 /*
   15  * Resources are tree-like, allowing
   16  * nesting etc..
   17  */
   18 struct resource {
   19 	resource_size_t start;
   20 	resource_size_t end;
   21 	const char *name;
   22 	unsigned long flags;
   23 	unsigned long desc;
   24 	struct resource *parent, *sibling, *child;
   25 };
   26 
   27 /*
   28  * IO resources have these defined flags.
   29  *
   30  * PCI devices expose these flags to userspace in the "resource" sysfs file,
   31  * so don't move them.
   32  */
   33 #define IORESOURCE_BITS		0x000000ff	/* Bus-specific bits */
   34 
   35 #define IORESOURCE_TYPE_BITS	0x00001f00	/* Resource type */
   36 #define IORESOURCE_IO		0x00000100	/* PCI/ISA I/O ports */
   37 #define IORESOURCE_MEM		0x00000200
   38 #define IORESOURCE_REG		0x00000300	/* Register offsets */
   39 #define IORESOURCE_IRQ		0x00000400
   40 #define IORESOURCE_DMA		0x00000800
   41 #define IORESOURCE_BUS		0x00001000
   42 
   43 #define IORESOURCE_PREFETCH	0x00002000	/* No side effects */
   44 #define IORESOURCE_READONLY	0x00004000
   45 #define IORESOURCE_CACHEABLE	0x00008000
   46 #define IORESOURCE_RANGELENGTH	0x00010000
   47 #define IORESOURCE_SHADOWABLE	0x00020000
   48 
   49 #define IORESOURCE_SIZEALIGN	0x00040000	/* size indicates alignment */
   50 #define IORESOURCE_STARTALIGN	0x00080000	/* start field is alignment */
   51 
   52 #define IORESOURCE_MEM_64	0x00100000
   53 #define IORESOURCE_WINDOW	0x00200000	/* forwarded by bridge */
   54 #define IORESOURCE_MUXED	0x00400000	/* Resource is software muxed */
   55 
   56 #define IORESOURCE_EXT_TYPE_BITS 0x01000000	/* Resource extended types */
   57 #define IORESOURCE_SYSRAM	0x01000000	/* System RAM (modifier) */
   58 
   59 #define IORESOURCE_EXCLUSIVE	0x08000000	/* Userland may not map this resource */
   60 
   61 #define IORESOURCE_DISABLED	0x10000000
   62 #define IORESOURCE_UNSET	0x20000000	/* No address assigned yet */
   63 #define IORESOURCE_AUTO		0x40000000
   64 #define IORESOURCE_BUSY		0x80000000	/* Driver has marked this resource busy */
   65 
   66 /* I/O resource extended types */
   67 #define IORESOURCE_SYSTEM_RAM		(IORESOURCE_MEM|IORESOURCE_SYSRAM)
   68 
   69 /* PnP IRQ specific bits (IORESOURCE_BITS) */
   70 #define IORESOURCE_IRQ_HIGHEDGE		(1<<0)
   71 #define IORESOURCE_IRQ_LOWEDGE		(1<<1)
   72 #define IORESOURCE_IRQ_HIGHLEVEL	(1<<2)
   73 #define IORESOURCE_IRQ_LOWLEVEL		(1<<3)
   74 #define IORESOURCE_IRQ_SHAREABLE	(1<<4)
   75 #define IORESOURCE_IRQ_OPTIONAL 	(1<<5)
   76 
   77 /* PnP DMA specific bits (IORESOURCE_BITS) */
   78 #define IORESOURCE_DMA_TYPE_MASK	(3<<0)
   79 #define IORESOURCE_DMA_8BIT		(0<<0)
   80 #define IORESOURCE_DMA_8AND16BIT	(1<<0)
   81 #define IORESOURCE_DMA_16BIT		(2<<0)
   82 
   83 #define IORESOURCE_DMA_MASTER		(1<<2)
   84 #define IORESOURCE_DMA_BYTE		(1<<3)
   85 #define IORESOURCE_DMA_WORD		(1<<4)
   86 
   87 #define IORESOURCE_DMA_SPEED_MASK	(3<<6)
   88 #define IORESOURCE_DMA_COMPATIBLE	(0<<6)
   89 #define IORESOURCE_DMA_TYPEA		(1<<6)
   90 #define IORESOURCE_DMA_TYPEB		(2<<6)
   91 #define IORESOURCE_DMA_TYPEF		(3<<6)
   92 
   93 /* PnP memory I/O specific bits (IORESOURCE_BITS) */
   94 #define IORESOURCE_MEM_WRITEABLE	(1<<0)	/* dup: IORESOURCE_READONLY */
   95 #define IORESOURCE_MEM_CACHEABLE	(1<<1)	/* dup: IORESOURCE_CACHEABLE */
   96 #define IORESOURCE_MEM_RANGELENGTH	(1<<2)	/* dup: IORESOURCE_RANGELENGTH */
   97 #define IORESOURCE_MEM_TYPE_MASK	(3<<3)
   98 #define IORESOURCE_MEM_8BIT		(0<<3)
   99 #define IORESOURCE_MEM_16BIT		(1<<3)
  100 #define IORESOURCE_MEM_8AND16BIT	(2<<3)
  101 #define IORESOURCE_MEM_32BIT		(3<<3)
  102 #define IORESOURCE_MEM_SHADOWABLE	(1<<5)	/* dup: IORESOURCE_SHADOWABLE */
  103 #define IORESOURCE_MEM_EXPANSIONROM	(1<<6)
  104 
  105 /* PnP I/O specific bits (IORESOURCE_BITS) */
  106 #define IORESOURCE_IO_16BIT_ADDR	(1<<0)
  107 #define IORESOURCE_IO_FIXED		(1<<1)
  108 #define IORESOURCE_IO_SPARSE		(1<<2)
  109 
  110 /* PCI ROM control bits (IORESOURCE_BITS) */
  111 #define IORESOURCE_ROM_ENABLE		(1<<0)	/* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
  112 #define IORESOURCE_ROM_SHADOW		(1<<1)	/* Use RAM image, not ROM BAR */
  113 
  114 /* PCI control bits.  Shares IORESOURCE_BITS with above PCI ROM.  */
  115 #define IORESOURCE_PCI_FIXED		(1<<4)	/* Do not move resource */
  116 #define IORESOURCE_PCI_EA_BEI		(1<<5)	/* BAR Equivalent Indicator */
  117 
  118 /*
  119  * I/O Resource Descriptors
  120  *
  121  * Descriptors are used by walk_iomem_res_desc() and region_intersects()
  122  * for searching a specific resource range in the iomem table.  Assign
  123  * a new descriptor when a resource range supports the search interfaces.
  124  * Otherwise, resource.desc must be set to IORES_DESC_NONE (0).
  125  */
  126 enum {
  127 	IORES_DESC_NONE				= 0,
  128 	IORES_DESC_CRASH_KERNEL			= 1,
  129 	IORES_DESC_ACPI_TABLES			= 2,
  130 	IORES_DESC_ACPI_NV_STORAGE		= 3,
  131 	IORES_DESC_PERSISTENT_MEMORY		= 4,
  132 	IORES_DESC_PERSISTENT_MEMORY_LEGACY	= 5,
  133 };
  134 
  135 /* helpers to define resources */
  136 #define DEFINE_RES_NAMED(_start, _size, _name, _flags)			\
  137 	{								\
  138 		.start = (_start),					\
  139 		.end = (_start) + (_size) - 1,				\
  140 		.name = (_name),					\
  141 		.flags = (_flags),					\
  142 		.desc = IORES_DESC_NONE,				\
  143 	}
  144 
  145 #define DEFINE_RES_IO_NAMED(_start, _size, _name)			\
  146 	DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO)
  147 #define DEFINE_RES_IO(_start, _size)					\
  148 	DEFINE_RES_IO_NAMED((_start), (_size), NULL)
  149 
  150 #define DEFINE_RES_MEM_NAMED(_start, _size, _name)			\
  151 	DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_MEM)
  152 #define DEFINE_RES_MEM(_start, _size)					\
  153 	DEFINE_RES_MEM_NAMED((_start), (_size), NULL)
  154 
  155 #define DEFINE_RES_IRQ_NAMED(_irq, _name)				\
  156 	DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ)
  157 #define DEFINE_RES_IRQ(_irq)						\
  158 	DEFINE_RES_IRQ_NAMED((_irq), NULL)
  159 
  160 #define DEFINE_RES_DMA_NAMED(_dma, _name)				\
  161 	DEFINE_RES_NAMED((_dma), 1, (_name), IORESOURCE_DMA)
  162 #define DEFINE_RES_DMA(_dma)						\
  163 	DEFINE_RES_DMA_NAMED((_dma), NULL)
  164 
  165 /* PC/ISA/whatever - the normal PC address spaces: IO and memory */
  166 extern struct resource ioport_resource;
  167 extern struct resource iomem_resource;
  168 
  169 extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
  170 extern int request_resource(struct resource *root, struct resource *new);
  171 extern int release_resource(struct resource *new);
  172 void release_child_resources(struct resource *new);
  173 extern void reserve_region_with_split(struct resource *root,
  174 			     resource_size_t start, resource_size_t end,
  175 			     const char *name);
  176 extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
  177 extern int insert_resource(struct resource *parent, struct resource *new);
  178 extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
  179 extern int remove_resource(struct resource *old);
  180 extern void arch_remove_reservations(struct resource *avail);
  181 extern int allocate_resource(struct resource *root, struct resource *new,
  182 			     resource_size_t size, resource_size_t min,
  183 			     resource_size_t max, resource_size_t align,
  184 			     resource_size_t (*alignf)(void *,
  185 						       const struct resource *,
  186 						       resource_size_t,
  187 						       resource_size_t),
  188 			     void *alignf_data);
  189 struct resource *lookup_resource(struct resource *root, resource_size_t start);
  190 int adjust_resource(struct resource *res, resource_size_t start,
  191 		    resource_size_t size);
  192 resource_size_t resource_alignment(struct resource *res);
  193 static inline resource_size_t resource_size(const struct resource *res)
  194 {
  195 	return res->end - res->start + 1;
  196 }
  197 static inline unsigned long resource_type(const struct resource *res)
  198 {
  199 	return res->flags & IORESOURCE_TYPE_BITS;
  200 }
  201 static inline unsigned long resource_ext_type(const struct resource *res)
  202 {
  203 	return res->flags & IORESOURCE_EXT_TYPE_BITS;
  204 }
  205 /* True iff r1 completely contains r2 */
  206 static inline bool resource_contains(struct resource *r1, struct resource *r2)
  207 {
  208 	if (resource_type(r1) != resource_type(r2))
  209 		return false;
  210 	if (r1->flags & IORESOURCE_UNSET || r2->flags & IORESOURCE_UNSET)
  211 		return false;
  212 	return r1->start <= r2->start && r1->end >= r2->end;
  213 }
  214 
  215 
  216 /* Convenience shorthand with allocation */
  217 #define request_region(start,n,name)		__request_region(&ioport_resource, (start), (n), (name), 0)
  218 #define request_muxed_region(start,n,name)	__request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED)
  219 #define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl)
  220 #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0)
  221 #define request_mem_region_exclusive(start,n,name) \
  222 	__request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE)
  223 #define rename_region(region, newname) do { (region)->name = (newname); } while (0)
  224 
  225 extern struct resource * __request_region(struct resource *,
  226 					resource_size_t start,
  227 					resource_size_t n,
  228 					const char *name, int flags);
  229 
  230 /* Compatibility cruft */
  231 #define release_region(start,n)	__release_region(&ioport_resource, (start), (n))
  232 #define release_mem_region(start,n)	__release_region(&iomem_resource, (start), (n))
  233 
  234 extern void __release_region(struct resource *, resource_size_t,
  235 				resource_size_t);
  236 #ifdef CONFIG_MEMORY_HOTREMOVE
  237 extern int release_mem_region_adjustable(struct resource *, resource_size_t,
  238 				resource_size_t);
  239 #endif
  240 
  241 /* Wrappers for managed devices */
  242 struct device;
  243 
  244 extern int devm_request_resource(struct device *dev, struct resource *root,
  245 				 struct resource *new);
  246 extern void devm_release_resource(struct device *dev, struct resource *new);
  247 
  248 #define devm_request_region(dev,start,n,name) \
  249 	__devm_request_region(dev, &ioport_resource, (start), (n), (name))
  250 #define devm_request_mem_region(dev,start,n,name) \
  251 	__devm_request_region(dev, &iomem_resource, (start), (n), (name))
  252 
  253 extern struct resource * __devm_request_region(struct device *dev,
  254 				struct resource *parent, resource_size_t start,
  255 				resource_size_t n, const char *name);
  256 
  257 #define devm_release_region(dev, start, n) \
  258 	__devm_release_region(dev, &ioport_resource, (start), (n))
  259 #define devm_release_mem_region(dev, start, n) \
  260 	__devm_release_region(dev, &iomem_resource, (start), (n))
  261 
  262 extern void __devm_release_region(struct device *dev, struct resource *parent,
  263 				  resource_size_t start, resource_size_t n);
  264 extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
  265 extern int iomem_is_exclusive(u64 addr);
  266 
  267 extern int
  268 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
  269 		void *arg, int (*func)(unsigned long, unsigned long, void *));
  270 extern int
  271 walk_system_ram_res(u64 start, u64 end, void *arg,
  272 		    int (*func)(u64, u64, void *));
  273 extern int
  274 walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
  275 		    void *arg, int (*func)(u64, u64, void *));
  276 
  277 /* True if any part of r1 overlaps r2 */
  278 static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
  279 {
  280        return (r1->start <= r2->end && r1->end >= r2->start);
  281 }
  282 
  283 
  284 #endif /* __ASSEMBLY__ */
  285 #endif	/* _LINUX_IOPORT_H */                 1 /*
    2  * platform_device.h - generic, centralized driver model
    3  *
    4  * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
    5  *
    6  * This file is released under the GPLv2
    7  *
    8  * See Documentation/driver-model/ for more information.
    9  */
   10 
   11 #ifndef _PLATFORM_DEVICE_H_
   12 #define _PLATFORM_DEVICE_H_
   13 
   14 #include <linux/device.h>
   15 #include <linux/mod_devicetable.h>
   16 
   17 #define PLATFORM_DEVID_NONE	(-1)
   18 #define PLATFORM_DEVID_AUTO	(-2)
   19 
   20 struct mfd_cell;
   21 struct property_entry;
   22 
   23 struct platform_device {
   24 	const char	*name;
   25 	int		id;
   26 	bool		id_auto;
   27 	struct device	dev;
   28 	u32		num_resources;
   29 	struct resource	*resource;
   30 
   31 	const struct platform_device_id	*id_entry;
   32 	char *driver_override; /* Driver name to force a match */
   33 
   34 	/* MFD cell pointer */
   35 	struct mfd_cell *mfd_cell;
   36 
   37 	/* arch specific additions */
   38 	struct pdev_archdata	archdata;
   39 };
   40 
   41 #define platform_get_device_id(pdev)	((pdev)->id_entry)
   42 
   43 #define to_platform_device(x) container_of((x), struct platform_device, dev)
   44 
   45 extern int platform_device_register(struct platform_device *);
   46 extern void platform_device_unregister(struct platform_device *);
   47 
   48 extern struct bus_type platform_bus_type;
   49 extern struct device platform_bus;
   50 
   51 extern void arch_setup_pdev_archdata(struct platform_device *);
   52 extern struct resource *platform_get_resource(struct platform_device *,
   53 					      unsigned int, unsigned int);
   54 extern int platform_get_irq(struct platform_device *, unsigned int);
   55 extern int platform_irq_count(struct platform_device *);
   56 extern struct resource *platform_get_resource_byname(struct platform_device *,
   57 						     unsigned int,
   58 						     const char *);
   59 extern int platform_get_irq_byname(struct platform_device *, const char *);
   60 extern int platform_add_devices(struct platform_device **, int);
   61 
   62 struct platform_device_info {
   63 		struct device *parent;
   64 		struct fwnode_handle *fwnode;
   65 
   66 		const char *name;
   67 		int id;
   68 
   69 		const struct resource *res;
   70 		unsigned int num_res;
   71 
   72 		const void *data;
   73 		size_t size_data;
   74 		u64 dma_mask;
   75 
   76 		struct property_entry *properties;
   77 };
   78 extern struct platform_device *platform_device_register_full(
   79 		const struct platform_device_info *pdevinfo);
   80 
   81 /**
   82  * platform_device_register_resndata - add a platform-level device with
   83  * resources and platform-specific data
   84  *
   85  * @parent: parent device for the device we're adding
   86  * @name: base name of the device we're adding
   87  * @id: instance id
   88  * @res: set of resources that needs to be allocated for the device
   89  * @num: number of resources
   90  * @data: platform specific data for this platform device
   91  * @size: size of platform specific data
   92  *
   93  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
   94  */
   95 static inline struct platform_device *platform_device_register_resndata(
   96 		struct device *parent, const char *name, int id,
   97 		const struct resource *res, unsigned int num,
   98 		const void *data, size_t size) {
   99 
  100 	struct platform_device_info pdevinfo = {
  101 		.parent = parent,
  102 		.name = name,
  103 		.id = id,
  104 		.res = res,
  105 		.num_res = num,
  106 		.data = data,
  107 		.size_data = size,
  108 		.dma_mask = 0,
  109 	};
  110 
  111 	return platform_device_register_full(&pdevinfo);
  112 }
  113 
  114 /**
  115  * platform_device_register_simple - add a platform-level device and its resources
  116  * @name: base name of the device we're adding
  117  * @id: instance id
  118  * @res: set of resources that needs to be allocated for the device
  119  * @num: number of resources
  120  *
  121  * This function creates a simple platform device that requires minimal
  122  * resource and memory management. Canned release function freeing memory
  123  * allocated for the device allows drivers using such devices to be
  124  * unloaded without waiting for the last reference to the device to be
  125  * dropped.
  126  *
  127  * This interface is primarily intended for use with legacy drivers which
  128  * probe hardware directly.  Because such drivers create sysfs device nodes
  129  * themselves, rather than letting system infrastructure handle such device
  130  * enumeration tasks, they don't fully conform to the Linux driver model.
  131  * In particular, when such drivers are built as modules, they can't be
  132  * "hotplugged".
  133  *
  134  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
  135  */
  136 static inline struct platform_device *platform_device_register_simple(
  137 		const char *name, int id,
  138 		const struct resource *res, unsigned int num)
  139 {
  140 	return platform_device_register_resndata(NULL, name, id,
  141 			res, num, NULL, 0);
  142 }
  143 
  144 /**
  145  * platform_device_register_data - add a platform-level device with platform-specific data
  146  * @parent: parent device for the device we're adding
  147  * @name: base name of the device we're adding
  148  * @id: instance id
  149  * @data: platform specific data for this platform device
  150  * @size: size of platform specific data
  151  *
  152  * This function creates a simple platform device that requires minimal
  153  * resource and memory management. Canned release function freeing memory
  154  * allocated for the device allows drivers using such devices to be
  155  * unloaded without waiting for the last reference to the device to be
  156  * dropped.
  157  *
  158  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
  159  */
  160 static inline struct platform_device *platform_device_register_data(
  161 		struct device *parent, const char *name, int id,
  162 		const void *data, size_t size)
  163 {
  164 	return platform_device_register_resndata(parent, name, id,
  165 			NULL, 0, data, size);
  166 }
  167 
  168 extern struct platform_device *platform_device_alloc(const char *name, int id);
  169 extern int platform_device_add_resources(struct platform_device *pdev,
  170 					 const struct resource *res,
  171 					 unsigned int num);
  172 extern int platform_device_add_data(struct platform_device *pdev,
  173 				    const void *data, size_t size);
  174 extern int platform_device_add_properties(struct platform_device *pdev,
  175 					  struct property_entry *properties);
  176 extern int platform_device_add(struct platform_device *pdev);
  177 extern void platform_device_del(struct platform_device *pdev);
  178 extern void platform_device_put(struct platform_device *pdev);
  179 
  180 struct platform_driver {
  181 	int (*probe)(struct platform_device *);
  182 	int (*remove)(struct platform_device *);
  183 	void (*shutdown)(struct platform_device *);
  184 	int (*suspend)(struct platform_device *, pm_message_t state);
  185 	int (*resume)(struct platform_device *);
  186 	struct device_driver driver;
  187 	const struct platform_device_id *id_table;
  188 	bool prevent_deferred_probe;
  189 };
  190 
  191 #define to_platform_driver(drv)	(container_of((drv), struct platform_driver, \
  192 				 driver))
  193 
  194 /*
  195  * use a macro to avoid include chaining to get THIS_MODULE
  196  */
  197 #define platform_driver_register(drv) \
  198 	__platform_driver_register(drv, THIS_MODULE)
  199 extern int __platform_driver_register(struct platform_driver *,
  200 					struct module *);
  201 extern void platform_driver_unregister(struct platform_driver *);
  202 
  203 /* non-hotpluggable platform devices may use this so that probe() and
  204  * its support may live in __init sections, conserving runtime memory.
  205  */
  206 #define platform_driver_probe(drv, probe) \
  207 	__platform_driver_probe(drv, probe, THIS_MODULE)
  208 extern int __platform_driver_probe(struct platform_driver *driver,
  209 		int (*probe)(struct platform_device *), struct module *module);
  210 
  211 static inline void *platform_get_drvdata(const struct platform_device *pdev)
  212 {
  213 	return dev_get_drvdata(&pdev->dev);
  214 }
  215 
  216 static inline void platform_set_drvdata(struct platform_device *pdev,
  217 					void *data)
  218 {
  219 	dev_set_drvdata(&pdev->dev, data);
  220 }
  221 
  222 /* module_platform_driver() - Helper macro for drivers that don't do
  223  * anything special in module init/exit.  This eliminates a lot of
  224  * boilerplate.  Each module may only use this macro once, and
  225  * calling it replaces module_init() and module_exit()
  226  */
  227 #define module_platform_driver(__platform_driver) \
  228 	module_driver(__platform_driver, platform_driver_register, \
  229 			platform_driver_unregister)
  230 
  231 /* builtin_platform_driver() - Helper macro for builtin drivers that
  232  * don't do anything special in driver init.  This eliminates some
  233  * boilerplate.  Each driver may only use this macro once, and
  234  * calling it replaces device_initcall().  Note this is meant to be
  235  * a parallel of module_platform_driver() above, but w/o _exit stuff.
  236  */
  237 #define builtin_platform_driver(__platform_driver) \
  238 	builtin_driver(__platform_driver, platform_driver_register)
  239 
  240 /* module_platform_driver_probe() - Helper macro for drivers that don't do
  241  * anything special in module init/exit.  This eliminates a lot of
  242  * boilerplate.  Each module may only use this macro once, and
  243  * calling it replaces module_init() and module_exit()
  244  */
  245 #define module_platform_driver_probe(__platform_driver, __platform_probe) \
  246 static int __init __platform_driver##_init(void) \
  247 { \
  248 	return platform_driver_probe(&(__platform_driver), \
  249 				     __platform_probe);    \
  250 } \
  251 module_init(__platform_driver##_init); \
  252 static void __exit __platform_driver##_exit(void) \
  253 { \
  254 	platform_driver_unregister(&(__platform_driver)); \
  255 } \
  256 module_exit(__platform_driver##_exit);
  257 
  258 /* builtin_platform_driver_probe() - Helper macro for drivers that don't do
  259  * anything special in device init.  This eliminates some boilerplate.  Each
  260  * driver may only use this macro once, and using it replaces device_initcall.
  261  * This is meant to be a parallel of module_platform_driver_probe above, but
  262  * without the __exit parts.
  263  */
  264 #define builtin_platform_driver_probe(__platform_driver, __platform_probe) \
  265 static int __init __platform_driver##_init(void) \
  266 { \
  267 	return platform_driver_probe(&(__platform_driver), \
  268 				     __platform_probe);    \
  269 } \
  270 device_initcall(__platform_driver##_init); \
  271 
  272 #define platform_create_bundle(driver, probe, res, n_res, data, size) \
  273 	__platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE)
  274 extern struct platform_device *__platform_create_bundle(
  275 	struct platform_driver *driver, int (*probe)(struct platform_device *),
  276 	struct resource *res, unsigned int n_res,
  277 	const void *data, size_t size, struct module *module);
  278 
  279 int __platform_register_drivers(struct platform_driver * const *drivers,
  280 				unsigned int count, struct module *owner);
  281 void platform_unregister_drivers(struct platform_driver * const *drivers,
  282 				 unsigned int count);
  283 
  284 #define platform_register_drivers(drivers, count) \
  285 	__platform_register_drivers(drivers, count, THIS_MODULE)
  286 
  287 /* early platform driver interface */
  288 struct early_platform_driver {
  289 	const char *class_str;
  290 	struct platform_driver *pdrv;
  291 	struct list_head list;
  292 	int requested_id;
  293 	char *buffer;
  294 	int bufsize;
  295 };
  296 
  297 #define EARLY_PLATFORM_ID_UNSET -2
  298 #define EARLY_PLATFORM_ID_ERROR -3
  299 
  300 extern int early_platform_driver_register(struct early_platform_driver *epdrv,
  301 					  char *buf);
  302 extern void early_platform_add_devices(struct platform_device **devs, int num);
  303 
  304 static inline int is_early_platform_device(struct platform_device *pdev)
  305 {
  306 	return !pdev->dev.driver;
  307 }
  308 
  309 extern void early_platform_driver_register_all(char *class_str);
  310 extern int early_platform_driver_probe(char *class_str,
  311 				       int nr_probe, int user_only);
  312 extern void early_platform_cleanup(void);
  313 
  314 #define early_platform_init(class_string, platdrv)		\
  315 	early_platform_init_buffer(class_string, platdrv, NULL, 0)
  316 
  317 #ifndef MODULE
  318 #define early_platform_init_buffer(class_string, platdrv, buf, bufsiz)	\
  319 static __initdata struct early_platform_driver early_driver = {		\
  320 	.class_str = class_string,					\
  321 	.buffer = buf,							\
  322 	.bufsize = bufsiz,						\
  323 	.pdrv = platdrv,						\
  324 	.requested_id = EARLY_PLATFORM_ID_UNSET,			\
  325 };									\
  326 static int __init early_platform_driver_setup_func(char *buffer)	\
  327 {									\
  328 	return early_platform_driver_register(&early_driver, buffer);	\
  329 }									\
  330 early_param(class_string, early_platform_driver_setup_func)
  331 #else /* MODULE */
  332 #define early_platform_init_buffer(class_string, platdrv, buf, bufsiz)	\
  333 static inline char *early_platform_driver_setup_func(void)		\
  334 {									\
  335 	return bufsiz ? buf : NULL;					\
  336 }
  337 #endif /* MODULE */
  338 
  339 #ifdef CONFIG_SUSPEND
  340 extern int platform_pm_suspend(struct device *dev);
  341 extern int platform_pm_resume(struct device *dev);
  342 #else
  343 #define platform_pm_suspend		NULL
  344 #define platform_pm_resume		NULL
  345 #endif
  346 
  347 #ifdef CONFIG_HIBERNATE_CALLBACKS
  348 extern int platform_pm_freeze(struct device *dev);
  349 extern int platform_pm_thaw(struct device *dev);
  350 extern int platform_pm_poweroff(struct device *dev);
  351 extern int platform_pm_restore(struct device *dev);
  352 #else
  353 #define platform_pm_freeze		NULL
  354 #define platform_pm_thaw		NULL
  355 #define platform_pm_poweroff		NULL
  356 #define platform_pm_restore		NULL
  357 #endif
  358 
  359 #ifdef CONFIG_PM_SLEEP
  360 #define USE_PLATFORM_PM_SLEEP_OPS \
  361 	.suspend = platform_pm_suspend, \
  362 	.resume = platform_pm_resume, \
  363 	.freeze = platform_pm_freeze, \
  364 	.thaw = platform_pm_thaw, \
  365 	.poweroff = platform_pm_poweroff, \
  366 	.restore = platform_pm_restore,
  367 #else
  368 #define USE_PLATFORM_PM_SLEEP_OPS
  369 #endif
  370 
  371 #endif /* _PLATFORM_DEVICE_H_ */                 1 /*
    2  * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
    3  *
    4  * (C) SGI 2006, Christoph Lameter
    5  * 	Cleaned up and restructured to ease the addition of alternative
    6  * 	implementations of SLAB allocators.
    7  * (C) Linux Foundation 2008-2013
    8  *      Unified interface for all slab allocators
    9  */
   10 
   11 #ifndef _LINUX_SLAB_H
   12 #define	_LINUX_SLAB_H
   13 
   14 #include <linux/gfp.h>
   15 #include <linux/types.h>
   16 #include <linux/workqueue.h>
   17 
   18 
   19 /*
   20  * Flags to pass to kmem_cache_create().
   21  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
   22  */
   23 #define SLAB_CONSISTENCY_CHECKS	0x00000100UL	/* DEBUG: Perform (expensive) checks on alloc/free */
   24 #define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
   25 #define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
   26 #define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
   27 #define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
   28 #define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
   29 #define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
   30 /*
   31  * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
   32  *
   33  * This delays freeing the SLAB page by a grace period, it does _NOT_
   34  * delay object freeing. This means that if you do kmem_cache_free()
   35  * that memory location is free to be reused at any time. Thus it may
   36  * be possible to see another object there in the same RCU grace period.
   37  *
   38  * This feature only ensures the memory location backing the object
   39  * stays valid, the trick to using this is relying on an independent
   40  * object validation pass. Something like:
   41  *
   42  *  rcu_read_lock()
   43  * again:
   44  *  obj = lockless_lookup(key);
   45  *  if (obj) {
   46  *    if (!try_get_ref(obj)) // might fail for free objects
   47  *      goto again;
   48  *
   49  *    if (obj->key != key) { // not the object we expected
   50  *      put_ref(obj);
   51  *      goto again;
   52  *    }
   53  *  }
   54  *  rcu_read_unlock();
   55  *
   56  * This is useful if we need to approach a kernel structure obliquely,
   57  * from its address obtained without the usual locking. We can lock
   58  * the structure to stabilize it and check it's still at the given address,
   59  * only if we can be sure that the memory has not been meanwhile reused
   60  * for some other kind of object (which our subsystem's lock might corrupt).
   61  *
   62  * rcu_read_lock before reading the address, then rcu_read_unlock after
   63  * taking the spinlock within the structure expected at that address.
   64  */
   65 #define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
   66 #define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
   67 #define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
   68 
   69 /* Flag to prevent checks on free */
   70 #ifdef CONFIG_DEBUG_OBJECTS
   71 # define SLAB_DEBUG_OBJECTS	0x00400000UL
   72 #else
   73 # define SLAB_DEBUG_OBJECTS	0x00000000UL
   74 #endif
   75 
   76 #define SLAB_NOLEAKTRACE	0x00800000UL	/* Avoid kmemleak tracing */
   77 
   78 /* Don't track use of uninitialized memory */
   79 #ifdef CONFIG_KMEMCHECK
   80 # define SLAB_NOTRACK		0x01000000UL
   81 #else
   82 # define SLAB_NOTRACK		0x00000000UL
   83 #endif
   84 #ifdef CONFIG_FAILSLAB
   85 # define SLAB_FAILSLAB		0x02000000UL	/* Fault injection mark */
   86 #else
   87 # define SLAB_FAILSLAB		0x00000000UL
   88 #endif
   89 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
   90 # define SLAB_ACCOUNT		0x04000000UL	/* Account to memcg */
   91 #else
   92 # define SLAB_ACCOUNT		0x00000000UL
   93 #endif
   94 
   95 #ifdef CONFIG_KASAN
   96 #define SLAB_KASAN		0x08000000UL
   97 #else
   98 #define SLAB_KASAN		0x00000000UL
   99 #endif
  100 
  101 /* The following flags affect the page allocator grouping pages by mobility */
  102 #define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
  103 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
  104 /*
  105  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
  106  *
  107  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
  108  *
  109  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
  110  * Both make kfree a no-op.
  111  */
  112 #define ZERO_SIZE_PTR ((void *)16)
  113 
  114 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
  115 				(unsigned long)ZERO_SIZE_PTR)
  116 
  117 #include <linux/kmemleak.h>
  118 #include <linux/kasan.h>
  119 
  120 struct mem_cgroup;
  121 /*
  122  * struct kmem_cache related prototypes
  123  */
  124 void __init kmem_cache_init(void);
  125 bool slab_is_available(void);
  126 
  127 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
  128 			unsigned long,
  129 			void (*)(void *));
  130 void kmem_cache_destroy(struct kmem_cache *);
  131 int kmem_cache_shrink(struct kmem_cache *);
  132 
  133 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
  134 void memcg_deactivate_kmem_caches(struct mem_cgroup *);
  135 void memcg_destroy_kmem_caches(struct mem_cgroup *);
  136 
  137 /*
  138  * Please use this macro to create slab caches. Simply specify the
  139  * name of the structure and maybe some flags that are listed above.
  140  *
  141  * The alignment of the struct determines object alignment. If you
  142  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
  143  * then the objects will be properly aligned in SMP configurations.
  144  */
  145 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
  146 		sizeof(struct __struct), __alignof__(struct __struct),\
  147 		(__flags), NULL)
  148 
  149 /*
  150  * Common kmalloc functions provided by all allocators
  151  */
  152 void * __must_check __krealloc(const void *, size_t, gfp_t);
  153 void * __must_check krealloc(const void *, size_t, gfp_t);
  154 void kfree(const void *);
  155 void kzfree(const void *);
  156 size_t ksize(const void *);
  157 
  158 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
  159 const char *__check_heap_object(const void *ptr, unsigned long n,
  160 				struct page *page);
  161 #else
  162 static inline const char *__check_heap_object(const void *ptr,
  163 					      unsigned long n,
  164 					      struct page *page)
  165 {
  166 	return NULL;
  167 }
  168 #endif
  169 
  170 /*
  171  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
  172  * alignment larger than the alignment of a 64-bit integer.
  173  * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
  174  */
  175 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
  176 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
  177 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
  178 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
  179 #else
  180 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
  181 #endif
  182 
  183 /*
  184  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
  185  * Intended for arches that get misalignment faults even for 64 bit integer
  186  * aligned buffers.
  187  */
  188 #ifndef ARCH_SLAB_MINALIGN
  189 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
  190 #endif
  191 
  192 /*
  193  * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
  194  * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
  195  * aligned pointers.
  196  */
  197 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
  198 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
  199 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
  200 
  201 /*
  202  * Kmalloc array related definitions
  203  */
  204 
  205 #ifdef CONFIG_SLAB
  206 /*
  207  * The largest kmalloc size supported by the SLAB allocators is
  208  * 32 megabyte (2^25) or the maximum allocatable page order if that is
  209  * less than 32 MB.
  210  *
  211  * WARNING: Its not easy to increase this value since the allocators have
  212  * to do various tricks to work around compiler limitations in order to
  213  * ensure proper constant folding.
  214  */
  215 #define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
  216 				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
  217 #define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH
  218 #ifndef KMALLOC_SHIFT_LOW
  219 #define KMALLOC_SHIFT_LOW	5
  220 #endif
  221 #endif
  222 
  223 #ifdef CONFIG_SLUB
  224 /*
  225  * SLUB directly allocates requests fitting in to an order-1 page
  226  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
  227  */
  228 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
  229 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
  230 #ifndef KMALLOC_SHIFT_LOW
  231 #define KMALLOC_SHIFT_LOW	3
  232 #endif
  233 #endif
  234 
  235 #ifdef CONFIG_SLOB
  236 /*
  237  * SLOB passes all requests larger than one page to the page allocator.
  238  * No kmalloc array is necessary since objects of different sizes can
  239  * be allocated from the same page.
  240  */
  241 #define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
  242 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
  243 #ifndef KMALLOC_SHIFT_LOW
  244 #define KMALLOC_SHIFT_LOW	3
  245 #endif
  246 #endif
  247 
  248 /* Maximum allocatable size */
  249 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
  250 /* Maximum size for which we actually use a slab cache */
  251 #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
  252 /* Maximum order allocatable via the slab allocagtor */
  253 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
  254 
  255 /*
  256  * Kmalloc subsystem.
  257  */
  258 #ifndef KMALLOC_MIN_SIZE
  259 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
  260 #endif
  261 
  262 /*
  263  * This restriction comes from byte sized index implementation.
  264  * Page size is normally 2^12 bytes and, in this case, if we want to use
  265  * byte sized index which can represent 2^8 entries, the size of the object
  266  * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
  267  * If minimum size of kmalloc is less than 16, we use it as minimum object
  268  * size and give up to use byte sized index.
  269  */
  270 #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
  271                                (KMALLOC_MIN_SIZE) : 16)
  272 
  273 #ifndef CONFIG_SLOB
  274 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
  275 #ifdef CONFIG_ZONE_DMA
  276 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
  277 #endif
  278 
  279 /*
  280  * Figure out which kmalloc slab an allocation of a certain size
  281  * belongs to.
  282  * 0 = zero alloc
  283  * 1 =  65 .. 96 bytes
  284  * 2 = 129 .. 192 bytes
  285  * n = 2^(n-1)+1 .. 2^n
  286  */
  287 static __always_inline int kmalloc_index(size_t size)
  288 {
  289 	if (!size)
  290 		return 0;
  291 
  292 	if (size <= KMALLOC_MIN_SIZE)
  293 		return KMALLOC_SHIFT_LOW;
  294 
  295 	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
  296 		return 1;
  297 	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
  298 		return 2;
  299 	if (size <=          8) return 3;
  300 	if (size <=         16) return 4;
  301 	if (size <=         32) return 5;
  302 	if (size <=         64) return 6;
  303 	if (size <=        128) return 7;
  304 	if (size <=        256) return 8;
  305 	if (size <=        512) return 9;
  306 	if (size <=       1024) return 10;
  307 	if (size <=   2 * 1024) return 11;
  308 	if (size <=   4 * 1024) return 12;
  309 	if (size <=   8 * 1024) return 13;
  310 	if (size <=  16 * 1024) return 14;
  311 	if (size <=  32 * 1024) return 15;
  312 	if (size <=  64 * 1024) return 16;
  313 	if (size <= 128 * 1024) return 17;
  314 	if (size <= 256 * 1024) return 18;
  315 	if (size <= 512 * 1024) return 19;
  316 	if (size <= 1024 * 1024) return 20;
  317 	if (size <=  2 * 1024 * 1024) return 21;
  318 	if (size <=  4 * 1024 * 1024) return 22;
  319 	if (size <=  8 * 1024 * 1024) return 23;
  320 	if (size <=  16 * 1024 * 1024) return 24;
  321 	if (size <=  32 * 1024 * 1024) return 25;
  322 	if (size <=  64 * 1024 * 1024) return 26;
  323 	BUG();
  324 
  325 	/* Will never be reached. Needed because the compiler may complain */
  326 	return -1;
  327 }
  328 #endif /* !CONFIG_SLOB */
  329 
  330 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
  331 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
  332 void kmem_cache_free(struct kmem_cache *, void *);
  333 
  334 /*
  335  * Bulk allocation and freeing operations. These are accelerated in an
  336  * allocator specific way to avoid taking locks repeatedly or building
  337  * metadata structures unnecessarily.
  338  *
  339  * Note that interrupts must be enabled when calling these functions.
  340  */
  341 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
  342 int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
  343 
  344 /*
  345  * Caller must not use kfree_bulk() on memory not originally allocated
  346  * by kmalloc(), because the SLOB allocator cannot handle this.
  347  */
  348 static __always_inline void kfree_bulk(size_t size, void **p)
  349 {
  350 	kmem_cache_free_bulk(NULL, size, p);
  351 }
  352 
  353 #ifdef CONFIG_NUMA
  354 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
  355 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
  356 #else
  357 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
  358 {
  359 	return __kmalloc(size, flags);
  360 }
  361 
  362 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
  363 {
  364 	return kmem_cache_alloc(s, flags);
  365 }
  366 #endif
  367 
  368 #ifdef CONFIG_TRACING
  369 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
  370 
  371 #ifdef CONFIG_NUMA
  372 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
  373 					   gfp_t gfpflags,
  374 					   int node, size_t size) __assume_slab_alignment __malloc;
  375 #else
  376 static __always_inline void *
  377 kmem_cache_alloc_node_trace(struct kmem_cache *s,
  378 			      gfp_t gfpflags,
  379 			      int node, size_t size)
  380 {
  381 	return kmem_cache_alloc_trace(s, gfpflags, size);
  382 }
  383 #endif /* CONFIG_NUMA */
  384 
  385 #else /* CONFIG_TRACING */
  386 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
  387 		gfp_t flags, size_t size)
  388 {
  389 	void *ret = kmem_cache_alloc(s, flags);
  390 
  391 	kasan_kmalloc(s, ret, size, flags);
  392 	return ret;
  393 }
  394 
  395 static __always_inline void *
  396 kmem_cache_alloc_node_trace(struct kmem_cache *s,
  397 			      gfp_t gfpflags,
  398 			      int node, size_t size)
  399 {
  400 	void *ret = kmem_cache_alloc_node(s, gfpflags, node);
  401 
  402 	kasan_kmalloc(s, ret, size, gfpflags);
  403 	return ret;
  404 }
  405 #endif /* CONFIG_TRACING */
  406 
  407 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
  408 
  409 #ifdef CONFIG_TRACING
  410 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
  411 #else
  412 static __always_inline void *
  413 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
  414 {
  415 	return kmalloc_order(size, flags, order);
  416 }
  417 #endif
  418 
  419 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
  420 {
  421 	unsigned int order = get_order(size);
  422 	return kmalloc_order_trace(size, flags, order);
  423 }
  424 
  425 /**
  426  * kmalloc - allocate memory
  427  * @size: how many bytes of memory are required.
  428  * @flags: the type of memory to allocate.
  429  *
  430  * kmalloc is the normal method of allocating memory
  431  * for objects smaller than page size in the kernel.
  432  *
  433  * The @flags argument may be one of:
  434  *
  435  * %GFP_USER - Allocate memory on behalf of user.  May sleep.
  436  *
  437  * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
  438  *
  439  * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools.
  440  *   For example, use this inside interrupt handlers.
  441  *
  442  * %GFP_HIGHUSER - Allocate pages from high memory.
  443  *
  444  * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
  445  *
  446  * %GFP_NOFS - Do not make any fs calls while trying to get memory.
  447  *
  448  * %GFP_NOWAIT - Allocation will not sleep.
  449  *
  450  * %__GFP_THISNODE - Allocate node-local memory only.
  451  *
  452  * %GFP_DMA - Allocation suitable for DMA.
  453  *   Should only be used for kmalloc() caches. Otherwise, use a
  454  *   slab created with SLAB_DMA.
  455  *
  456  * Also it is possible to set different flags by OR'ing
  457  * in one or more of the following additional @flags:
  458  *
  459  * %__GFP_COLD - Request cache-cold pages instead of
  460  *   trying to return cache-warm pages.
  461  *
  462  * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
  463  *
  464  * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
  465  *   (think twice before using).
  466  *
  467  * %__GFP_NORETRY - If memory is not immediately available,
  468  *   then give up at once.
  469  *
  470  * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
  471  *
  472  * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
  473  *
  474  * There are other flags available as well, but these are not intended
  475  * for general use, and so are not documented here. For a full list of
  476  * potential flags, always refer to linux/gfp.h.
  477  */
  478 static __always_inline void *kmalloc(size_t size, gfp_t flags)
  479 {
  480 	if (__builtin_constant_p(size)) {
  481 		if (size > KMALLOC_MAX_CACHE_SIZE)
  482 			return kmalloc_large(size, flags);
  483 #ifndef CONFIG_SLOB
  484 		if (!(flags & GFP_DMA)) {
  485 			int index = kmalloc_index(size);
  486 
  487 			if (!index)
  488 				return ZERO_SIZE_PTR;
  489 
  490 			return kmem_cache_alloc_trace(kmalloc_caches[index],
  491 					flags, size);
  492 		}
  493 #endif
  494 	}
  495 	return __kmalloc(size, flags);
  496 }
  497 
  498 /*
  499  * Determine size used for the nth kmalloc cache.
  500  * return size or 0 if a kmalloc cache for that
  501  * size does not exist
  502  */
  503 static __always_inline int kmalloc_size(int n)
  504 {
  505 #ifndef CONFIG_SLOB
  506 	if (n > 2)
  507 		return 1 << n;
  508 
  509 	if (n == 1 && KMALLOC_MIN_SIZE <= 32)
  510 		return 96;
  511 
  512 	if (n == 2 && KMALLOC_MIN_SIZE <= 64)
  513 		return 192;
  514 #endif
  515 	return 0;
  516 }
  517 
  518 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  519 {
  520 #ifndef CONFIG_SLOB
  521 	if (__builtin_constant_p(size) &&
  522 		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
  523 		int i = kmalloc_index(size);
  524 
  525 		if (!i)
  526 			return ZERO_SIZE_PTR;
  527 
  528 		return kmem_cache_alloc_node_trace(kmalloc_caches[i],
  529 						flags, node, size);
  530 	}
  531 #endif
  532 	return __kmalloc_node(size, flags, node);
  533 }
  534 
  535 struct memcg_cache_array {
  536 	struct rcu_head rcu;
  537 	struct kmem_cache *entries[0];
  538 };
  539 
  540 /*
  541  * This is the main placeholder for memcg-related information in kmem caches.
  542  * Both the root cache and the child caches will have it. For the root cache,
  543  * this will hold a dynamically allocated array large enough to hold
  544  * information about the currently limited memcgs in the system. To allow the
  545  * array to be accessed without taking any locks, on relocation we free the old
  546  * version only after a grace period.
  547  *
  548  * Root and child caches hold different metadata.
  549  *
  550  * @root_cache:	Common to root and child caches.  NULL for root, pointer to
  551  *		the root cache for children.
  552  *
  553  * The following fields are specific to root caches.
  554  *
  555  * @memcg_caches: kmemcg ID indexed table of child caches.  This table is
  556  *		used to index child cachces during allocation and cleared
  557  *		early during shutdown.
  558  *
  559  * @root_caches_node: List node for slab_root_caches list.
  560  *
  561  * @children:	List of all child caches.  While the child caches are also
  562  *		reachable through @memcg_caches, a child cache remains on
  563  *		this list until it is actually destroyed.
  564  *
  565  * The following fields are specific to child caches.
  566  *
  567  * @memcg:	Pointer to the memcg this cache belongs to.
  568  *
  569  * @children_node: List node for @root_cache->children list.
  570  *
  571  * @kmem_caches_node: List node for @memcg->kmem_caches list.
  572  */
  573 struct memcg_cache_params {
  574 	struct kmem_cache *root_cache;
  575 	union {
  576 		struct {
  577 			struct memcg_cache_array __rcu *memcg_caches;
  578 			struct list_head __root_caches_node;
  579 			struct list_head children;
  580 		};
  581 		struct {
  582 			struct mem_cgroup *memcg;
  583 			struct list_head children_node;
  584 			struct list_head kmem_caches_node;
  585 
  586 			void (*deact_fn)(struct kmem_cache *);
  587 			union {
  588 				struct rcu_head deact_rcu_head;
  589 				struct work_struct deact_work;
  590 			};
  591 		};
  592 	};
  593 };
  594 
  595 int memcg_update_all_caches(int num_memcgs);
  596 
  597 /**
  598  * kmalloc_array - allocate memory for an array.
  599  * @n: number of elements.
  600  * @size: element size.
  601  * @flags: the type of memory to allocate (see kmalloc).
  602  */
  603 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
  604 {
  605 	if (size != 0 && n > SIZE_MAX / size)
  606 		return NULL;
  607 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
  608 		return kmalloc(n * size, flags);
  609 	return __kmalloc(n * size, flags);
  610 }
  611 
  612 /**
  613  * kcalloc - allocate memory for an array. The memory is set to zero.
  614  * @n: number of elements.
  615  * @size: element size.
  616  * @flags: the type of memory to allocate (see kmalloc).
  617  */
  618 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
  619 {
  620 	return kmalloc_array(n, size, flags | __GFP_ZERO);
  621 }
  622 
  623 /*
  624  * kmalloc_track_caller is a special version of kmalloc that records the
  625  * calling function of the routine calling it for slab leak tracking instead
  626  * of just the calling function (confusing, eh?).
  627  * It's useful when the call to kmalloc comes from a widely-used standard
  628  * allocator where we care about the real place the memory allocation
  629  * request comes from.
  630  */
  631 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
  632 #define kmalloc_track_caller(size, flags) \
  633 	__kmalloc_track_caller(size, flags, _RET_IP_)
  634 
  635 #ifdef CONFIG_NUMA
  636 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
  637 #define kmalloc_node_track_caller(size, flags, node) \
  638 	__kmalloc_node_track_caller(size, flags, node, \
  639 			_RET_IP_)
  640 
  641 #else /* CONFIG_NUMA */
  642 
  643 #define kmalloc_node_track_caller(size, flags, node) \
  644 	kmalloc_track_caller(size, flags)
  645 
  646 #endif /* CONFIG_NUMA */
  647 
  648 /*
  649  * Shortcuts
  650  */
  651 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
  652 {
  653 	return kmem_cache_alloc(k, flags | __GFP_ZERO);
  654 }
  655 
  656 /**
  657  * kzalloc - allocate memory. The memory is set to zero.
  658  * @size: how many bytes of memory are required.
  659  * @flags: the type of memory to allocate (see kmalloc).
  660  */
  661 static inline void *kzalloc(size_t size, gfp_t flags)
  662 {
  663 	return kmalloc(size, flags | __GFP_ZERO);
  664 }
  665 
  666 /**
  667  * kzalloc_node - allocate zeroed memory from a particular memory node.
  668  * @size: how many bytes of memory are required.
  669  * @flags: the type of memory to allocate (see kmalloc).
  670  * @node: memory node from which to allocate
  671  */
  672 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
  673 {
  674 	return kmalloc_node(size, flags | __GFP_ZERO, node);
  675 }
  676 
  677 unsigned int kmem_cache_size(struct kmem_cache *s);
  678 void __init kmem_cache_init_late(void);
  679 
  680 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
  681 int slab_prepare_cpu(unsigned int cpu);
  682 int slab_dead_cpu(unsigned int cpu);
  683 #else
  684 #define slab_prepare_cpu	NULL
  685 #define slab_dead_cpu		NULL
  686 #endif
  687 
  688 #endif	/* _LINUX_SLAB_H */                 1 #ifndef __LINUX_SPINLOCK_H
    2 #define __LINUX_SPINLOCK_H
    3 
    4 /*
    5  * include/linux/spinlock.h - generic spinlock/rwlock declarations
    6  *
    7  * here's the role of the various spinlock/rwlock related include files:
    8  *
    9  * on SMP builds:
   10  *
   11  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
   12  *                        initializers
   13  *
   14  *  linux/spinlock_types.h:
   15  *                        defines the generic type and initializers
   16  *
   17  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
   18  *                        implementations, mostly inline assembly code
   19  *
   20  *   (also included on UP-debug builds:)
   21  *
   22  *  linux/spinlock_api_smp.h:
   23  *                        contains the prototypes for the _spin_*() APIs.
   24  *
   25  *  linux/spinlock.h:     builds the final spin_*() APIs.
   26  *
   27  * on UP builds:
   28  *
   29  *  linux/spinlock_type_up.h:
   30  *                        contains the generic, simplified UP spinlock type.
   31  *                        (which is an empty structure on non-debug builds)
   32  *
   33  *  linux/spinlock_types.h:
   34  *                        defines the generic type and initializers
   35  *
   36  *  linux/spinlock_up.h:
   37  *                        contains the arch_spin_*()/etc. version of UP
   38  *                        builds. (which are NOPs on non-debug, non-preempt
   39  *                        builds)
   40  *
   41  *   (included on UP-non-debug builds:)
   42  *
   43  *  linux/spinlock_api_up.h:
   44  *                        builds the _spin_*() APIs.
   45  *
   46  *  linux/spinlock.h:     builds the final spin_*() APIs.
   47  */
   48 
   49 #include <linux/typecheck.h>
   50 #include <linux/preempt.h>
   51 #include <linux/linkage.h>
   52 #include <linux/compiler.h>
   53 #include <linux/irqflags.h>
   54 #include <linux/thread_info.h>
   55 #include <linux/kernel.h>
   56 #include <linux/stringify.h>
   57 #include <linux/bottom_half.h>
   58 #include <asm/barrier.h>
   59 
   60 
   61 /*
   62  * Must define these before including other files, inline functions need them
   63  */
   64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
   65 
   66 #define LOCK_SECTION_START(extra)               \
   67         ".subsection 1\n\t"                     \
   68         extra                                   \
   69         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
   70         LOCK_SECTION_NAME ":\n\t"               \
   71         ".endif\n"
   72 
   73 #define LOCK_SECTION_END                        \
   74         ".previous\n\t"
   75 
   76 #define __lockfunc __attribute__((section(".spinlock.text")))
   77 
   78 /*
   79  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
   80  */
   81 #include <linux/spinlock_types.h>
   82 
   83 /*
   84  * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
   85  */
   86 #ifdef CONFIG_SMP
   87 # include <asm/spinlock.h>
   88 #else
   89 # include <linux/spinlock_up.h>
   90 #endif
   91 
   92 #ifdef CONFIG_DEBUG_SPINLOCK
   93   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
   94 				   struct lock_class_key *key);
   95 # define raw_spin_lock_init(lock)				\
   96 do {								\
   97 	static struct lock_class_key __key;			\
   98 								\
   99 	__raw_spin_lock_init((lock), #lock, &__key);		\
  100 } while (0)
  101 
  102 #else
  103 # define raw_spin_lock_init(lock)				\
  104 	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
  105 #endif
  106 
  107 #define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
  108 
  109 #ifdef CONFIG_GENERIC_LOCKBREAK
  110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
  111 #else
  112 
  113 #ifdef arch_spin_is_contended
  114 #define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
  115 #else
  116 #define raw_spin_is_contended(lock)	(((void)(lock), 0))
  117 #endif /*arch_spin_is_contended*/
  118 #endif
  119 
  120 /*
  121  * Despite its name it doesn't necessarily has to be a full barrier.
  122  * It should only guarantee that a STORE before the critical section
  123  * can not be reordered with LOADs and STOREs inside this section.
  124  * spin_lock() is the one-way barrier, this LOAD can not escape out
  125  * of the region. So the default implementation simply ensures that
  126  * a STORE can not move into the critical section, smp_wmb() should
  127  * serialize it with another STORE done by spin_lock().
  128  */
  129 #ifndef smp_mb__before_spinlock
  130 #define smp_mb__before_spinlock()	smp_wmb()
  131 #endif
  132 
  133 /**
  134  * raw_spin_unlock_wait - wait until the spinlock gets unlocked
  135  * @lock: the spinlock in question.
  136  */
  137 #define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
  138 
  139 #ifdef CONFIG_DEBUG_SPINLOCK
  140  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
  141 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  142  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  143  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
  144 #else
  145 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
  146 {
  147 	__acquire(lock);
  148 	arch_spin_lock(&lock->raw_lock);
  149 }
  150 
  151 static inline void
  152 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
  153 {
  154 	__acquire(lock);
  155 	arch_spin_lock_flags(&lock->raw_lock, *flags);
  156 }
  157 
  158 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
  159 {
  160 	return arch_spin_trylock(&(lock)->raw_lock);
  161 }
  162 
  163 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
  164 {
  165 	arch_spin_unlock(&lock->raw_lock);
  166 	__release(lock);
  167 }
  168 #endif
  169 
  170 /*
  171  * Define the various spin_lock methods.  Note we define these
  172  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
  173  * various methods are defined as nops in the case they are not
  174  * required.
  175  */
  176 #define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
  177 
  178 #define raw_spin_lock(lock)	_raw_spin_lock(lock)
  179 
  180 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  181 # define raw_spin_lock_nested(lock, subclass) \
  182 	_raw_spin_lock_nested(lock, subclass)
  183 
  184 # define raw_spin_lock_nest_lock(lock, nest_lock)			\
  185 	 do {								\
  186 		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
  187 		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
  188 	 } while (0)
  189 #else
  190 /*
  191  * Always evaluate the 'subclass' argument to avoid that the compiler
  192  * warns about set-but-not-used variables when building with
  193  * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
  194  */
  195 # define raw_spin_lock_nested(lock, subclass)		\
  196 	_raw_spin_lock(((void)(subclass), (lock)))
  197 # define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
  198 #endif
  199 
  200 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  201 
  202 #define raw_spin_lock_irqsave(lock, flags)			\
  203 	do {						\
  204 		typecheck(unsigned long, flags);	\
  205 		flags = _raw_spin_lock_irqsave(lock);	\
  206 	} while (0)
  207 
  208 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  209 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  210 	do {								\
  211 		typecheck(unsigned long, flags);			\
  212 		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
  213 	} while (0)
  214 #else
  215 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  216 	do {								\
  217 		typecheck(unsigned long, flags);			\
  218 		flags = _raw_spin_lock_irqsave(lock);			\
  219 	} while (0)
  220 #endif
  221 
  222 #else
  223 
  224 #define raw_spin_lock_irqsave(lock, flags)		\
  225 	do {						\
  226 		typecheck(unsigned long, flags);	\
  227 		_raw_spin_lock_irqsave(lock, flags);	\
  228 	} while (0)
  229 
  230 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
  231 	raw_spin_lock_irqsave(lock, flags)
  232 
  233 #endif
  234 
  235 #define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
  236 #define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
  237 #define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
  238 #define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
  239 
  240 #define raw_spin_unlock_irqrestore(lock, flags)		\
  241 	do {							\
  242 		typecheck(unsigned long, flags);		\
  243 		_raw_spin_unlock_irqrestore(lock, flags);	\
  244 	} while (0)
  245 #define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
  246 
  247 #define raw_spin_trylock_bh(lock) \
  248 	__cond_lock(lock, _raw_spin_trylock_bh(lock))
  249 
  250 #define raw_spin_trylock_irq(lock) \
  251 ({ \
  252 	local_irq_disable(); \
  253 	raw_spin_trylock(lock) ? \
  254 	1 : ({ local_irq_enable(); 0;  }); \
  255 })
  256 
  257 #define raw_spin_trylock_irqsave(lock, flags) \
  258 ({ \
  259 	local_irq_save(flags); \
  260 	raw_spin_trylock(lock) ? \
  261 	1 : ({ local_irq_restore(flags); 0; }); \
  262 })
  263 
  264 /**
  265  * raw_spin_can_lock - would raw_spin_trylock() succeed?
  266  * @lock: the spinlock in question.
  267  */
  268 #define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))
  269 
  270 /* Include rwlock functions */
  271 #include <linux/rwlock.h>
  272 
  273 /*
  274  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  275  */
  276 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  277 # include <linux/spinlock_api_smp.h>
  278 #else
  279 # include <linux/spinlock_api_up.h>
  280 #endif
  281 
  282 /*
  283  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  284  */
  285 
  286 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
  287 {
  288 	return &lock->rlock;
  289 }
  290 
  291 #define spin_lock_init(_lock)				\
  292 do {							\
  293 	spinlock_check(_lock);				\
  294 	raw_spin_lock_init(&(_lock)->rlock);		\
  295 } while (0)
  296 
  297 static __always_inline void spin_lock(spinlock_t *lock)
  298 {
  299 	raw_spin_lock(&lock->rlock);
  300 }
  301 
  302 static __always_inline void spin_lock_bh(spinlock_t *lock)
  303 {
  304 	raw_spin_lock_bh(&lock->rlock);
  305 }
  306 
  307 static __always_inline int spin_trylock(spinlock_t *lock)
  308 {
  309 	return raw_spin_trylock(&lock->rlock);
  310 }
  311 
  312 #define spin_lock_nested(lock, subclass)			\
  313 do {								\
  314 	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
  315 } while (0)
  316 
  317 #define spin_lock_nest_lock(lock, nest_lock)				\
  318 do {									\
  319 	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
  320 } while (0)
  321 
  322 static __always_inline void spin_lock_irq(spinlock_t *lock)
  323 {
  324 	raw_spin_lock_irq(&lock->rlock);
  325 }
  326 
  327 #define spin_lock_irqsave(lock, flags)				\
  328 do {								\
  329 	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
  330 } while (0)
  331 
  332 #define spin_lock_irqsave_nested(lock, flags, subclass)			\
  333 do {									\
  334 	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
  335 } while (0)
  336 
  337 static __always_inline void spin_unlock(spinlock_t *lock)
  338 {
  339 	raw_spin_unlock(&lock->rlock);
  340 }
  341 
  342 static __always_inline void spin_unlock_bh(spinlock_t *lock)
  343 {
  344 	raw_spin_unlock_bh(&lock->rlock);
  345 }
  346 
  347 static __always_inline void spin_unlock_irq(spinlock_t *lock)
  348 {
  349 	raw_spin_unlock_irq(&lock->rlock);
  350 }
  351 
  352 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  353 {
  354 	raw_spin_unlock_irqrestore(&lock->rlock, flags);
  355 }
  356 
  357 static __always_inline int spin_trylock_bh(spinlock_t *lock)
  358 {
  359 	return raw_spin_trylock_bh(&lock->rlock);
  360 }
  361 
  362 static __always_inline int spin_trylock_irq(spinlock_t *lock)
  363 {
  364 	return raw_spin_trylock_irq(&lock->rlock);
  365 }
  366 
  367 #define spin_trylock_irqsave(lock, flags)			\
  368 ({								\
  369 	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
  370 })
  371 
  372 static __always_inline void spin_unlock_wait(spinlock_t *lock)
  373 {
  374 	raw_spin_unlock_wait(&lock->rlock);
  375 }
  376 
  377 static __always_inline int spin_is_locked(spinlock_t *lock)
  378 {
  379 	return raw_spin_is_locked(&lock->rlock);
  380 }
  381 
  382 static __always_inline int spin_is_contended(spinlock_t *lock)
  383 {
  384 	return raw_spin_is_contended(&lock->rlock);
  385 }
  386 
  387 static __always_inline int spin_can_lock(spinlock_t *lock)
  388 {
  389 	return raw_spin_can_lock(&lock->rlock);
  390 }
  391 
  392 #define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
  393 
  394 /*
  395  * Pull the atomic_t declaration:
  396  * (asm-mips/atomic.h needs above definitions)
  397  */
  398 #include <linux/atomic.h>
  399 /**
  400  * atomic_dec_and_lock - lock on reaching reference count zero
  401  * @atomic: the atomic counter
  402  * @lock: the spinlock in question
  403  *
  404  * Decrements @atomic by 1.  If the result is 0, returns true and locks
  405  * @lock.  Returns false for all other cases.
  406  */
  407 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  408 #define atomic_dec_and_lock(atomic, lock) \
  409 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  410 
  411 #endif /* __LINUX_SPINLOCK_H */            | 
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
| Ядро | Модуль | Правило | Верификатор | Вердикт | Статус | Время создания | Описание проблемы | 
| linux-4.11-rc1.tar.xz | drivers/usb/gadget/udc/mv_u3d_core.ko | 320_7a | CPAchecker | Bug | Fixed | 2017-04-01 00:10:49 | L0269 | 
Комментарий
Reported: 1 Apr 2017
[В начало]