Bug
        
                          [В начало]
Ошибка # 94
Показать/спрятать трассу ошибок|            Error trace     
         {    20     typedef unsigned char __u8;    23     typedef unsigned short __u16;    25     typedef int __s32;    26     typedef unsigned int __u32;    30     typedef unsigned long long __u64;    15     typedef signed char s8;    16     typedef unsigned char u8;    19     typedef unsigned short u16;    21     typedef int s32;    22     typedef unsigned int u32;    24     typedef long long s64;    25     typedef unsigned long long u64;    14     typedef long __kernel_long_t;    15     typedef unsigned long __kernel_ulong_t;    27     typedef int __kernel_pid_t;    48     typedef unsigned int __kernel_uid32_t;    49     typedef unsigned int __kernel_gid32_t;    71     typedef __kernel_ulong_t __kernel_size_t;    72     typedef __kernel_long_t __kernel_ssize_t;    87     typedef long long __kernel_loff_t;    88     typedef __kernel_long_t __kernel_time_t;    89     typedef __kernel_long_t __kernel_clock_t;    90     typedef int __kernel_timer_t;    91     typedef int __kernel_clockid_t;    32     typedef __u16 __le16;    34     typedef __u32 __le32;   229     struct kernel_symbol {   unsigned long value;   const char *name; } ;    33     struct module ;    12     typedef __u32 __kernel_dev_t;    15     typedef __kernel_dev_t dev_t;    18     typedef unsigned short umode_t;    21     typedef __kernel_pid_t pid_t;    26     typedef __kernel_clockid_t clockid_t;    29     typedef _Bool bool;    31     typedef __kernel_uid32_t uid_t;    32     typedef __kernel_gid32_t gid_t;    45     typedef __kernel_loff_t loff_t;    54     typedef __kernel_size_t size_t;    59     typedef __kernel_ssize_t ssize_t;    69     typedef __kernel_time_t time_t;   102     typedef __s32 int32_t;   108     typedef __u32 uint32_t;   133     typedef unsigned long sector_t;   134     typedef unsigned long blkcnt_t;   152     typedef u64 dma_addr_t;   157     typedef unsigned int gfp_t;   158     typedef unsigned int fmode_t;   159     typedef unsigned int oom_flags_t;   177     struct __anonstruct_atomic_t_6 {   int counter; } ;   177     typedef struct __anonstruct_atomic_t_6 atomic_t;   182     struct __anonstruct_atomic64_t_7 {   long counter; } ;   182     typedef struct __anonstruct_atomic64_t_7 atomic64_t;   183     struct list_head {   struct list_head *next;   struct list_head *prev; } ;   188     struct hlist_node ;   188     struct hlist_head {   struct hlist_node *first; } ;   192     struct hlist_node {   struct hlist_node *next;   struct hlist_node **pprev; } ;   203     struct callback_head {   struct callback_head *next;   void (*func)(struct callback_head *); } ;    67     struct pt_regs {   unsigned long r15;   unsigned long r14;   unsigned long r13;   unsigned long r12;   unsigned long bp;   unsigned long bx;   unsigned long r11;   unsigned long r10;   unsigned long r9;   unsigned long r8;   unsigned long ax;   unsigned long cx;   unsigned long dx;   unsigned long si;   unsigned long di;   unsigned long orig_ax;   unsigned long ip;   unsigned long cs;   unsigned long flags;   unsigned long sp;   unsigned long ss; } ;    66     struct __anonstruct____missing_field_name_9 {   unsigned int a;   unsigned int b; } ;    66     struct __anonstruct____missing_field_name_10 {   u16 limit0;   u16 base0;   unsigned char base1;   unsigned char type;   unsigned char s;   unsigned char dpl;   unsigned char p;   unsigned char limit;   unsigned char avl;   unsigned char l;   unsigned char d;   unsigned char g;   unsigned char base2; } ;    66     union __anonunion____missing_field_name_8 {   struct __anonstruct____missing_field_name_9 __annonCompField4;   struct __anonstruct____missing_field_name_10 __annonCompField5; } ;    66     struct desc_struct {   union __anonunion____missing_field_name_8 __annonCompField6; } ;    12     typedef unsigned long pteval_t;    13     typedef unsigned long pmdval_t;    15     typedef unsigned long pgdval_t;    16     typedef unsigned long pgprotval_t;    18     struct __anonstruct_pte_t_11 {   pteval_t pte; } ;    18     typedef struct __anonstruct_pte_t_11 pte_t;    20     struct pgprot {   pgprotval_t pgprot; } ;   218     typedef struct pgprot pgprot_t;   220     struct __anonstruct_pgd_t_12 {   pgdval_t pgd; } ;   220     typedef struct __anonstruct_pgd_t_12 pgd_t;   259     struct __anonstruct_pmd_t_14 {   pmdval_t pmd; } ;   259     typedef struct __anonstruct_pmd_t_14 pmd_t;   361     struct page ;   361     typedef struct page *pgtable_t;   372     struct file ;   385     struct seq_file ;   423     struct thread_struct ;   425     struct mm_struct ;   426     struct task_struct ;   427     struct cpumask ;    20     struct qspinlock {   atomic_t val; } ;    33     typedef struct qspinlock arch_spinlock_t;    34     struct qrwlock {   atomic_t cnts;   arch_spinlock_t lock; } ;    14     typedef struct qrwlock arch_rwlock_t;   131     typedef void (*ctor_fn_t)();   234     struct _ddebug {   const char *modname;   const char *function;   const char *filename;   const char *format;   unsigned int lineno;   unsigned char flags; } ;    48     struct device ;   420     struct file_operations ;   432     struct completion ;   692     struct lockdep_map ;    19     struct math_emu_info {   long ___orig_eip;   struct pt_regs *regs; } ;   328     struct bug_entry {   int bug_addr_disp;   int file_disp;   unsigned short line;   unsigned short flags; } ;   102     struct cpumask {   unsigned long bits[128U]; } ;    15     typedef struct cpumask cpumask_t;   652     typedef struct cpumask *cpumask_var_t;   260     struct fregs_state {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u32 status; } ;    26     struct __anonstruct____missing_field_name_24 {   u64 rip;   u64 rdp; } ;    26     struct __anonstruct____missing_field_name_25 {   u32 fip;   u32 fcs;   u32 foo;   u32 fos; } ;    26     union __anonunion____missing_field_name_23 {   struct __anonstruct____missing_field_name_24 __annonCompField10;   struct __anonstruct____missing_field_name_25 __annonCompField11; } ;    26     union __anonunion____missing_field_name_26 {   u32 padding1[12U];   u32 sw_reserved[12U]; } ;    26     struct fxregs_state {   u16 cwd;   u16 swd;   u16 twd;   u16 fop;   union __anonunion____missing_field_name_23 __annonCompField12;   u32 mxcsr;   u32 mxcsr_mask;   u32 st_space[32U];   u32 xmm_space[64U];   u32 padding[12U];   union __anonunion____missing_field_name_26 __annonCompField13; } ;    66     struct swregs_state {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u8 ftop;   u8 changed;   u8 lookahead;   u8 no_update;   u8 rm;   u8 alimit;   struct math_emu_info *info;   u32 entry_eip; } ;   155     struct xstate_header {   u64 xfeatures;   u64 xcomp_bv;   u64 reserved[6U]; } ;   161     struct xregs_state {   struct fxregs_state i387;   struct xstate_header header;   u8 __reserved[464U]; } ;   179     union fpregs_state {   struct fregs_state fsave;   struct fxregs_state fxsave;   struct swregs_state soft;   struct xregs_state xsave;   u8 __padding[4096U]; } ;   194     struct fpu {   unsigned int last_cpu;   unsigned char fpstate_active;   unsigned char fpregs_active;   unsigned char counter;   union fpregs_state state; } ;   170     struct seq_operations ;   369     struct perf_event ;   370     struct thread_struct {   struct desc_struct tls_array[3U];   unsigned long sp0;   unsigned long sp;   unsigned short es;   unsigned short ds;   unsigned short fsindex;   unsigned short gsindex;   unsigned long fs;   unsigned long gs;   struct perf_event *ptrace_bps[4U];   unsigned long debugreg6;   unsigned long ptrace_dr7;   unsigned long cr2;   unsigned long trap_nr;   unsigned long error_code;   unsigned long *io_bitmap_ptr;   unsigned long iopl;   unsigned int io_bitmap_max;   struct fpu fpu; } ;    23     typedef atomic64_t atomic_long_t;    55     struct stack_trace {   unsigned int nr_entries;   unsigned int max_entries;   unsigned long *entries;   int skip; } ;    28     struct lockdep_subclass_key {   char __one_byte; } ;    53     struct lock_class_key {   struct lockdep_subclass_key subkeys[8U]; } ;    59     struct lock_class {   struct list_head hash_entry;   struct list_head lock_entry;   struct lockdep_subclass_key *key;   unsigned int subclass;   unsigned int dep_gen_id;   unsigned long usage_mask;   struct stack_trace usage_traces[13U];   struct list_head locks_after;   struct list_head locks_before;   unsigned int version;   unsigned long ops;   const char *name;   int name_version;   unsigned long contention_point[4U];   unsigned long contending_point[4U]; } ;   144     struct lockdep_map {   struct lock_class_key *key;   struct lock_class *class_cache[2U];   const char *name;   int cpu;   unsigned long ip; } ;   205     struct held_lock {   u64 prev_chain_key;   unsigned long acquire_ip;   struct lockdep_map *instance;   struct lockdep_map *nest_lock;   u64 waittime_stamp;   u64 holdtime_stamp;   unsigned short class_idx;   unsigned char irq_context;   unsigned char trylock;   unsigned char read;   unsigned char check;   unsigned char hardirqs_off;   unsigned short references;   unsigned int pin_count; } ;   546     struct raw_spinlock {   arch_spinlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    32     typedef struct raw_spinlock raw_spinlock_t;    33     struct __anonstruct____missing_field_name_34 {   u8 __padding[24U];   struct lockdep_map dep_map; } ;    33     union __anonunion____missing_field_name_33 {   struct raw_spinlock rlock;   struct __anonstruct____missing_field_name_34 __annonCompField15; } ;    33     struct spinlock {   union __anonunion____missing_field_name_33 __annonCompField16; } ;    76     typedef struct spinlock spinlock_t;    23     struct __anonstruct_rwlock_t_35 {   arch_rwlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    23     typedef struct __anonstruct_rwlock_t_35 rwlock_t;    13     struct optimistic_spin_queue {   atomic_t tail; } ;    39     struct mutex {   atomic_t count;   spinlock_t wait_lock;   struct list_head wait_list;   struct task_struct *owner;   void *magic;   struct lockdep_map dep_map; } ;    67     struct mutex_waiter {   struct list_head list;   struct task_struct *task;   void *magic; } ;    64     struct usb_device ;   135     struct timespec ;   136     struct compat_timespec ;   137     struct __anonstruct_futex_37 {   u32 *uaddr;   u32 val;   u32 flags;   u32 bitset;   u64 time;   u32 *uaddr2; } ;   137     struct __anonstruct_nanosleep_38 {   clockid_t clockid;   struct timespec *rmtp;   struct compat_timespec *compat_rmtp;   u64 expires; } ;   137     struct pollfd ;   137     struct __anonstruct_poll_39 {   struct pollfd *ufds;   int nfds;   int has_timeout;   unsigned long tv_sec;   unsigned long tv_nsec; } ;   137     union __anonunion____missing_field_name_36 {   struct __anonstruct_futex_37 futex;   struct __anonstruct_nanosleep_38 nanosleep;   struct __anonstruct_poll_39 poll; } ;   137     struct restart_block {   long int (*fn)(struct restart_block *);   union __anonunion____missing_field_name_36 __annonCompField17; } ;   416     struct seqcount {   unsigned int sequence;   struct lockdep_map dep_map; } ;    52     typedef struct seqcount seqcount_t;   404     struct __anonstruct_seqlock_t_52 {   struct seqcount seqcount;   spinlock_t lock; } ;   404     typedef struct __anonstruct_seqlock_t_52 seqlock_t;   598     struct timespec {   __kernel_time_t tv_sec;   long tv_nsec; } ;    83     struct user_namespace ;    22     struct __anonstruct_kuid_t_53 {   uid_t val; } ;    22     typedef struct __anonstruct_kuid_t_53 kuid_t;    27     struct __anonstruct_kgid_t_54 {   gid_t val; } ;    27     typedef struct __anonstruct_kgid_t_54 kgid_t;   139     struct kstat {   u64 ino;   dev_t dev;   umode_t mode;   unsigned int nlink;   kuid_t uid;   kgid_t gid;   dev_t rdev;   loff_t size;   struct timespec atime;   struct timespec mtime;   struct timespec ctime;   unsigned long blksize;   unsigned long long blocks; } ;    36     struct vm_area_struct ;    38     struct __wait_queue_head {   spinlock_t lock;   struct list_head task_list; } ;    43     typedef struct __wait_queue_head wait_queue_head_t;    95     struct __anonstruct_nodemask_t_55 {   unsigned long bits[16U]; } ;    95     typedef struct __anonstruct_nodemask_t_55 nodemask_t;   810     struct rw_semaphore ;   811     struct rw_semaphore {   long count;   struct list_head wait_list;   raw_spinlock_t wait_lock;   struct optimistic_spin_queue osq;   struct task_struct *owner;   struct lockdep_map dep_map; } ;   172     struct completion {   unsigned int done;   wait_queue_head_t wait; } ;   446     union ktime {   s64 tv64; } ;    41     typedef union ktime ktime_t;  1133     struct timer_list {   struct hlist_node entry;   unsigned long expires;   void (*function)(unsigned long);   unsigned long data;   u32 flags;   int slack;   int start_pid;   void *start_site;   char start_comm[16U];   struct lockdep_map lockdep_map; } ;   238     struct hrtimer ;   239     enum hrtimer_restart ;   240     struct rb_node {   unsigned long __rb_parent_color;   struct rb_node *rb_right;   struct rb_node *rb_left; } ;    41     struct rb_root {   struct rb_node *rb_node; } ;   838     struct nsproxy ;   259     struct workqueue_struct ;   260     struct work_struct ;    54     struct work_struct {   atomic_long_t data;   struct list_head entry;   void (*func)(struct work_struct *);   struct lockdep_map lockdep_map; } ;   107     struct delayed_work {   struct work_struct work;   struct timer_list timer;   struct workqueue_struct *wq;   int cpu; } ;    58     struct pm_message {   int event; } ;    64     typedef struct pm_message pm_message_t;    65     struct dev_pm_ops {   int (*prepare)(struct device *);   void (*complete)(struct device *);   int (*suspend)(struct device *);   int (*resume)(struct device *);   int (*freeze)(struct device *);   int (*thaw)(struct device *);   int (*poweroff)(struct device *);   int (*restore)(struct device *);   int (*suspend_late)(struct device *);   int (*resume_early)(struct device *);   int (*freeze_late)(struct device *);   int (*thaw_early)(struct device *);   int (*poweroff_late)(struct device *);   int (*restore_early)(struct device *);   int (*suspend_noirq)(struct device *);   int (*resume_noirq)(struct device *);   int (*freeze_noirq)(struct device *);   int (*thaw_noirq)(struct device *);   int (*poweroff_noirq)(struct device *);   int (*restore_noirq)(struct device *);   int (*runtime_suspend)(struct device *);   int (*runtime_resume)(struct device *);   int (*runtime_idle)(struct device *); } ;   320     enum rpm_status {   RPM_ACTIVE = 0,   RPM_RESUMING = 1,   RPM_SUSPENDED = 2,   RPM_SUSPENDING = 3 } ;   327     enum rpm_request {   RPM_REQ_NONE = 0,   RPM_REQ_IDLE = 1,   RPM_REQ_SUSPEND = 2,   RPM_REQ_AUTOSUSPEND = 3,   RPM_REQ_RESUME = 4 } ;   335     struct wakeup_source ;   336     struct wake_irq ;   337     struct pm_domain_data ;   338     struct pm_subsys_data {   spinlock_t lock;   unsigned int refcount;   struct list_head clock_list;   struct pm_domain_data *domain_data; } ;   556     struct dev_pm_qos ;   556     struct dev_pm_info {   pm_message_t power_state;   unsigned char can_wakeup;   unsigned char async_suspend;   bool is_prepared;   bool is_suspended;   bool is_noirq_suspended;   bool is_late_suspended;   bool ignore_children;   bool early_init;   bool direct_complete;   spinlock_t lock;   struct list_head entry;   struct completion completion;   struct wakeup_source *wakeup;   bool wakeup_path;   bool syscore;   struct timer_list suspend_timer;   unsigned long timer_expires;   struct work_struct work;   wait_queue_head_t wait_queue;   struct wake_irq *wakeirq;   atomic_t usage_count;   atomic_t child_count;   unsigned char disable_depth;   unsigned char idle_notification;   unsigned char request_pending;   unsigned char deferred_resume;   unsigned char run_wake;   unsigned char runtime_auto;   unsigned char no_callbacks;   unsigned char irq_safe;   unsigned char use_autosuspend;   unsigned char timer_autosuspends;   unsigned char memalloc_noio;   enum rpm_request request;   enum rpm_status runtime_status;   int runtime_error;   int autosuspend_delay;   unsigned long last_busy;   unsigned long active_jiffies;   unsigned long suspended_jiffies;   unsigned long accounting_timestamp;   struct pm_subsys_data *subsys_data;   void (*set_latency_tolerance)(struct device *, s32 );   struct dev_pm_qos *qos; } ;   615     struct dev_pm_domain {   struct dev_pm_ops ops;   void (*detach)(struct device *, bool );   int (*activate)(struct device *);   void (*sync)(struct device *);   void (*dismiss)(struct device *); } ;    25     struct ldt_struct ;    25     struct __anonstruct_mm_context_t_124 {   struct ldt_struct *ldt;   unsigned short ia32_compat;   struct mutex lock;   void *vdso;   atomic_t perf_rdpmc_allowed; } ;    25     typedef struct __anonstruct_mm_context_t_124 mm_context_t;  1296     struct llist_node ;    64     struct llist_node {   struct llist_node *next; } ;    37     struct cred ;    19     struct inode ;    58     struct arch_uprobe_task {   unsigned long saved_scratch_register;   unsigned int saved_trap_nr;   unsigned int saved_tf; } ;    66     enum uprobe_task_state {   UTASK_RUNNING = 0,   UTASK_SSTEP = 1,   UTASK_SSTEP_ACK = 2,   UTASK_SSTEP_TRAPPED = 3 } ;    73     struct __anonstruct____missing_field_name_160 {   struct arch_uprobe_task autask;   unsigned long vaddr; } ;    73     struct __anonstruct____missing_field_name_161 {   struct callback_head dup_xol_work;   unsigned long dup_xol_addr; } ;    73     union __anonunion____missing_field_name_159 {   struct __anonstruct____missing_field_name_160 __annonCompField32;   struct __anonstruct____missing_field_name_161 __annonCompField33; } ;    73     struct uprobe ;    73     struct return_instance ;    73     struct uprobe_task {   enum uprobe_task_state state;   union __anonunion____missing_field_name_159 __annonCompField34;   struct uprobe *active_uprobe;   unsigned long xol_vaddr;   struct return_instance *return_instances;   unsigned int depth; } ;    94     struct return_instance {   struct uprobe *uprobe;   unsigned long func;   unsigned long stack;   unsigned long orig_ret_vaddr;   bool chained;   struct return_instance *next; } ;   110     struct xol_area ;   111     struct uprobes_state {   struct xol_area *xol_area; } ;   150     struct address_space ;   151     struct mem_cgroup ;    31     typedef void compound_page_dtor(struct page *);    32     union __anonunion____missing_field_name_162 {   struct address_space *mapping;   void *s_mem; } ;    32     union __anonunion____missing_field_name_164 {   unsigned long index;   void *freelist; } ;    32     struct __anonstruct____missing_field_name_168 {   unsigned short inuse;   unsigned short objects;   unsigned char frozen; } ;    32     union __anonunion____missing_field_name_167 {   atomic_t _mapcount;   struct __anonstruct____missing_field_name_168 __annonCompField37;   int units; } ;    32     struct __anonstruct____missing_field_name_166 {   union __anonunion____missing_field_name_167 __annonCompField38;   atomic_t _count; } ;    32     union __anonunion____missing_field_name_165 {   unsigned long counters;   struct __anonstruct____missing_field_name_166 __annonCompField39;   unsigned int active; } ;    32     struct __anonstruct____missing_field_name_163 {   union __anonunion____missing_field_name_164 __annonCompField36;   union __anonunion____missing_field_name_165 __annonCompField40; } ;    32     struct __anonstruct____missing_field_name_170 {   struct page *next;   int pages;   int pobjects; } ;    32     struct slab ;    32     struct __anonstruct____missing_field_name_171 {   compound_page_dtor *compound_dtor;   unsigned long compound_order; } ;    32     union __anonunion____missing_field_name_169 {   struct list_head lru;   struct __anonstruct____missing_field_name_170 __annonCompField42;   struct slab *slab_page;   struct callback_head callback_head;   struct __anonstruct____missing_field_name_171 __annonCompField43;   pgtable_t pmd_huge_pte; } ;    32     struct kmem_cache ;    32     union __anonunion____missing_field_name_172 {   unsigned long private;   spinlock_t *ptl;   struct kmem_cache *slab_cache;   struct page *first_page; } ;    32     struct page {   unsigned long flags;   union __anonunion____missing_field_name_162 __annonCompField35;   struct __anonstruct____missing_field_name_163 __annonCompField41;   union __anonunion____missing_field_name_169 __annonCompField44;   union __anonunion____missing_field_name_172 __annonCompField45;   struct mem_cgroup *mem_cgroup; } ;   172     struct page_frag {   struct page *page;   __u32 offset;   __u32 size; } ;   257     struct userfaultfd_ctx ;   257     struct vm_userfaultfd_ctx {   struct userfaultfd_ctx *ctx; } ;   264     struct __anonstruct_shared_173 {   struct rb_node rb;   unsigned long rb_subtree_last; } ;   264     struct anon_vma ;   264     struct vm_operations_struct ;   264     struct mempolicy ;   264     struct vm_area_struct {   unsigned long vm_start;   unsigned long vm_end;   struct vm_area_struct *vm_next;   struct vm_area_struct *vm_prev;   struct rb_node vm_rb;   unsigned long rb_subtree_gap;   struct mm_struct *vm_mm;   pgprot_t vm_page_prot;   unsigned long vm_flags;   struct __anonstruct_shared_173 shared;   struct list_head anon_vma_chain;   struct anon_vma *anon_vma;   const struct vm_operations_struct *vm_ops;   unsigned long vm_pgoff;   struct file *vm_file;   void *vm_private_data;   struct mempolicy *vm_policy;   struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;   337     struct core_thread {   struct task_struct *task;   struct core_thread *next; } ;   342     struct core_state {   atomic_t nr_threads;   struct core_thread dumper;   struct completion startup; } ;   355     struct task_rss_stat {   int events;   int count[3U]; } ;   363     struct mm_rss_stat {   atomic_long_t count[3U]; } ;   368     struct kioctx_table ;   369     struct linux_binfmt ;   369     struct mmu_notifier_mm ;   369     struct mm_struct {   struct vm_area_struct *mmap;   struct rb_root mm_rb;   u32 vmacache_seqnum;   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   unsigned long mmap_base;   unsigned long mmap_legacy_base;   unsigned long task_size;   unsigned long highest_vm_end;   pgd_t *pgd;   atomic_t mm_users;   atomic_t mm_count;   atomic_long_t nr_ptes;   atomic_long_t nr_pmds;   int map_count;   spinlock_t page_table_lock;   struct rw_semaphore mmap_sem;   struct list_head mmlist;   unsigned long hiwater_rss;   unsigned long hiwater_vm;   unsigned long total_vm;   unsigned long locked_vm;   unsigned long pinned_vm;   unsigned long shared_vm;   unsigned long exec_vm;   unsigned long stack_vm;   unsigned long def_flags;   unsigned long start_code;   unsigned long end_code;   unsigned long start_data;   unsigned long end_data;   unsigned long start_brk;   unsigned long brk;   unsigned long start_stack;   unsigned long arg_start;   unsigned long arg_end;   unsigned long env_start;   unsigned long env_end;   unsigned long saved_auxv[46U];   struct mm_rss_stat rss_stat;   struct linux_binfmt *binfmt;   cpumask_var_t cpu_vm_mask_var;   mm_context_t context;   unsigned long flags;   struct core_state *core_state;   spinlock_t ioctx_lock;   struct kioctx_table *ioctx_table;   struct task_struct *owner;   struct file *exe_file;   struct mmu_notifier_mm *mmu_notifier_mm;   struct cpumask cpumask_allocation;   unsigned long numa_next_scan;   unsigned long numa_scan_offset;   int numa_scan_seq;   bool tlb_flush_pending;   struct uprobes_state uprobes_state;   void *bd_addr; } ;    15     typedef __u64 Elf64_Addr;    16     typedef __u16 Elf64_Half;    20     typedef __u32 Elf64_Word;    21     typedef __u64 Elf64_Xword;   190     struct elf64_sym {   Elf64_Word st_name;   unsigned char st_info;   unsigned char st_other;   Elf64_Half st_shndx;   Elf64_Addr st_value;   Elf64_Xword st_size; } ;   198     typedef struct elf64_sym Elf64_Sym;    53     union __anonunion____missing_field_name_178 {   unsigned long bitmap[4U];   struct callback_head callback_head; } ;    53     struct idr_layer {   int prefix;   int layer;   struct idr_layer *ary[256U];   int count;   union __anonunion____missing_field_name_178 __annonCompField46; } ;    41     struct idr {   struct idr_layer *hint;   struct idr_layer *top;   int layers;   int cur;   spinlock_t lock;   int id_free_cnt;   struct idr_layer *id_free; } ;   124     struct ida_bitmap {   long nr_busy;   unsigned long bitmap[15U]; } ;   153     struct ida {   struct idr idr;   struct ida_bitmap *free_bitmap; } ;   185     struct dentry ;   186     struct iattr ;   187     struct super_block ;   188     struct file_system_type ;   189     struct kernfs_open_node ;   190     struct kernfs_iattrs ;   213     struct kernfs_root ;   213     struct kernfs_elem_dir {   unsigned long subdirs;   struct rb_root children;   struct kernfs_root *root; } ;    85     struct kernfs_node ;    85     struct kernfs_elem_symlink {   struct kernfs_node *target_kn; } ;    89     struct kernfs_ops ;    89     struct kernfs_elem_attr {   const struct kernfs_ops *ops;   struct kernfs_open_node *open;   loff_t size;   struct kernfs_node *notify_next; } ;    96     union __anonunion____missing_field_name_183 {   struct kernfs_elem_dir dir;   struct kernfs_elem_symlink symlink;   struct kernfs_elem_attr attr; } ;    96     struct kernfs_node {   atomic_t count;   atomic_t active;   struct lockdep_map dep_map;   struct kernfs_node *parent;   const char *name;   struct rb_node rb;   const void *ns;   unsigned int hash;   union __anonunion____missing_field_name_183 __annonCompField47;   void *priv;   unsigned short flags;   umode_t mode;   unsigned int ino;   struct kernfs_iattrs *iattr; } ;   138     struct kernfs_syscall_ops {   int (*remount_fs)(struct kernfs_root *, int *, char *);   int (*show_options)(struct seq_file *, struct kernfs_root *);   int (*mkdir)(struct kernfs_node *, const char *, umode_t );   int (*rmdir)(struct kernfs_node *);   int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ;   155     struct kernfs_root {   struct kernfs_node *kn;   unsigned int flags;   struct ida ino_ida;   struct kernfs_syscall_ops *syscall_ops;   struct list_head supers;   wait_queue_head_t deactivate_waitq; } ;   171     struct kernfs_open_file {   struct kernfs_node *kn;   struct file *file;   void *priv;   struct mutex mutex;   int event;   struct list_head list;   char *prealloc_buf;   size_t atomic_write_len;   bool mmapped;   const struct vm_operations_struct *vm_ops; } ;   188     struct kernfs_ops {   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   ssize_t  (*read)(struct kernfs_open_file *, char *, size_t , loff_t );   size_t atomic_write_len;   bool prealloc;   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *);   struct lock_class_key lockdep_key; } ;   481     struct sock ;   482     struct kobject ;   483     enum kobj_ns_type {   KOBJ_NS_TYPE_NONE = 0,   KOBJ_NS_TYPE_NET = 1,   KOBJ_NS_TYPES = 2 } ;   489     struct kobj_ns_type_operations {   enum kobj_ns_type type;   bool  (*current_may_mount)();   void * (*grab_current_ns)();   const void * (*netlink_ns)(struct sock *);   const void * (*initial_ns)();   void (*drop_ns)(void *); } ;    59     struct bin_attribute ;    60     struct attribute {   const char *name;   umode_t mode;   bool ignore_lockdep;   struct lock_class_key *key;   struct lock_class_key skey; } ;    37     struct attribute_group {   const char *name;   umode_t  (*is_visible)(struct kobject *, struct attribute *, int);   struct attribute **attrs;   struct bin_attribute **bin_attrs; } ;    82     struct bin_attribute {   struct attribute attr;   size_t size;   void *private;   ssize_t  (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   ssize_t  (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;   155     struct sysfs_ops {   ssize_t  (*show)(struct kobject *, struct attribute *, char *);   ssize_t  (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;   509     struct kref {   atomic_t refcount; } ;    52     struct kset ;    52     struct kobj_type ;    52     struct kobject {   const char *name;   struct list_head entry;   struct kobject *parent;   struct kset *kset;   struct kobj_type *ktype;   struct kernfs_node *sd;   struct kref kref;   struct delayed_work release;   unsigned char state_initialized;   unsigned char state_in_sysfs;   unsigned char state_add_uevent_sent;   unsigned char state_remove_uevent_sent;   unsigned char uevent_suppress; } ;   115     struct kobj_type {   void (*release)(struct kobject *);   const struct sysfs_ops *sysfs_ops;   struct attribute **default_attrs;   const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *);   const void * (*namespace)(struct kobject *); } ;   123     struct kobj_uevent_env {   char *argv[3U];   char *envp[32U];   int envp_idx;   char buf[2048U];   int buflen; } ;   131     struct kset_uevent_ops {   const int (*filter)(struct kset *, struct kobject *);   const const char * (*name)(struct kset *, struct kobject *);   const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;   148     struct kset {   struct list_head list;   spinlock_t list_lock;   struct kobject kobj;   const struct kset_uevent_ops *uevent_ops; } ;   223     struct kernel_param ;   228     struct kernel_param_ops {   unsigned int flags;   int (*set)(const char *, const struct kernel_param *);   int (*get)(char *, const struct kernel_param *);   void (*free)(void *); } ;    62     struct kparam_string ;    62     struct kparam_array ;    62     union __anonunion____missing_field_name_184 {   void *arg;   const struct kparam_string *str;   const struct kparam_array *arr; } ;    62     struct kernel_param {   const char *name;   struct module *mod;   const struct kernel_param_ops *ops;   const u16 perm;   s8 level;   u8 flags;   union __anonunion____missing_field_name_184 __annonCompField48; } ;    83     struct kparam_string {   unsigned int maxlen;   char *string; } ;    89     struct kparam_array {   unsigned int max;   unsigned int elemsize;   unsigned int *num;   const struct kernel_param_ops *ops;   void *elem; } ;   469     struct latch_tree_node {   struct rb_node node[2U]; } ;   211     struct mod_arch_specific { } ;    38     struct module_param_attrs ;    38     struct module_kobject {   struct kobject kobj;   struct module *mod;   struct kobject *drivers_dir;   struct module_param_attrs *mp;   struct completion *kobj_completion; } ;    48     struct module_attribute {   struct attribute attr;   ssize_t  (*show)(struct module_attribute *, struct module_kobject *, char *);   ssize_t  (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t );   void (*setup)(struct module *, const char *);   int (*test)(struct module *);   void (*free)(struct module *); } ;    74     struct exception_table_entry ;   290     enum module_state {   MODULE_STATE_LIVE = 0,   MODULE_STATE_COMING = 1,   MODULE_STATE_GOING = 2,   MODULE_STATE_UNFORMED = 3 } ;   297     struct mod_tree_node {   struct module *mod;   struct latch_tree_node node; } ;   304     struct module_sect_attrs ;   304     struct module_notes_attrs ;   304     struct tracepoint ;   304     struct trace_event_call ;   304     struct trace_enum_map ;   304     struct module {   enum module_state state;   struct list_head list;   char name[56U];   struct module_kobject mkobj;   struct module_attribute *modinfo_attrs;   const char *version;   const char *srcversion;   struct kobject *holders_dir;   const struct kernel_symbol *syms;   const unsigned long *crcs;   unsigned int num_syms;   struct mutex param_lock;   struct kernel_param *kp;   unsigned int num_kp;   unsigned int num_gpl_syms;   const struct kernel_symbol *gpl_syms;   const unsigned long *gpl_crcs;   const struct kernel_symbol *unused_syms;   const unsigned long *unused_crcs;   unsigned int num_unused_syms;   unsigned int num_unused_gpl_syms;   const struct kernel_symbol *unused_gpl_syms;   const unsigned long *unused_gpl_crcs;   bool sig_ok;   bool async_probe_requested;   const struct kernel_symbol *gpl_future_syms;   const unsigned long *gpl_future_crcs;   unsigned int num_gpl_future_syms;   unsigned int num_exentries;   struct exception_table_entry *extable;   int (*init)();   void *module_init;   void *module_core;   unsigned int init_size;   unsigned int core_size;   unsigned int init_text_size;   unsigned int core_text_size;   struct mod_tree_node mtn_core;   struct mod_tree_node mtn_init;   unsigned int init_ro_size;   unsigned int core_ro_size;   struct mod_arch_specific arch;   unsigned int taints;   unsigned int num_bugs;   struct list_head bug_list;   struct bug_entry *bug_table;   Elf64_Sym *symtab;   Elf64_Sym *core_symtab;   unsigned int num_symtab;   unsigned int core_num_syms;   char *strtab;   char *core_strtab;   struct module_sect_attrs *sect_attrs;   struct module_notes_attrs *notes_attrs;   char *args;   void *percpu;   unsigned int percpu_size;   unsigned int num_tracepoints;   const struct tracepoint **tracepoints_ptrs;   unsigned int num_trace_bprintk_fmt;   const char **trace_bprintk_fmt_start;   struct trace_event_call **trace_events;   unsigned int num_trace_events;   struct trace_enum_map **trace_enums;   unsigned int num_trace_enums;   bool klp_alive;   struct list_head source_list;   struct list_head target_list;   void (*exit)();   atomic_t refcnt;   ctor_fn_t  (**ctors)();   unsigned int num_ctors; } ;    22     struct kernel_cap_struct {   __u32 cap[2U]; } ;    25     typedef struct kernel_cap_struct kernel_cap_t;    84     struct plist_node {   int prio;   struct list_head prio_list;   struct list_head node_list; } ;     4     typedef unsigned long cputime_t;    25     struct sem_undo_list ;    25     struct sysv_sem {   struct sem_undo_list *undo_list; } ;    78     struct user_struct ;    26     struct sysv_shm {   struct list_head shm_clist; } ;    24     struct __anonstruct_sigset_t_192 {   unsigned long sig[1U]; } ;    24     typedef struct __anonstruct_sigset_t_192 sigset_t;    25     struct siginfo ;    17     typedef void __signalfn_t(int);    18     typedef __signalfn_t *__sighandler_t;    20     typedef void __restorefn_t();    21     typedef __restorefn_t *__sigrestore_t;    34     union sigval {   int sival_int;   void *sival_ptr; } ;    10     typedef union sigval sigval_t;    11     struct __anonstruct__kill_194 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid; } ;    11     struct __anonstruct__timer_195 {   __kernel_timer_t _tid;   int _overrun;   char _pad[0U];   sigval_t _sigval;   int _sys_private; } ;    11     struct __anonstruct__rt_196 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   sigval_t _sigval; } ;    11     struct __anonstruct__sigchld_197 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   int _status;   __kernel_clock_t _utime;   __kernel_clock_t _stime; } ;    11     struct __anonstruct__addr_bnd_199 {   void *_lower;   void *_upper; } ;    11     struct __anonstruct__sigfault_198 {   void *_addr;   short _addr_lsb;   struct __anonstruct__addr_bnd_199 _addr_bnd; } ;    11     struct __anonstruct__sigpoll_200 {   long _band;   int _fd; } ;    11     struct __anonstruct__sigsys_201 {   void *_call_addr;   int _syscall;   unsigned int _arch; } ;    11     union __anonunion__sifields_193 {   int _pad[28U];   struct __anonstruct__kill_194 _kill;   struct __anonstruct__timer_195 _timer;   struct __anonstruct__rt_196 _rt;   struct __anonstruct__sigchld_197 _sigchld;   struct __anonstruct__sigfault_198 _sigfault;   struct __anonstruct__sigpoll_200 _sigpoll;   struct __anonstruct__sigsys_201 _sigsys; } ;    11     struct siginfo {   int si_signo;   int si_errno;   int si_code;   union __anonunion__sifields_193 _sifields; } ;   113     typedef struct siginfo siginfo_t;    22     struct sigpending {   struct list_head list;   sigset_t signal; } ;   243     struct sigaction {   __sighandler_t sa_handler;   unsigned long sa_flags;   __sigrestore_t sa_restorer;   sigset_t sa_mask; } ;   257     struct k_sigaction {   struct sigaction sa; } ;   443     enum pid_type {   PIDTYPE_PID = 0,   PIDTYPE_PGID = 1,   PIDTYPE_SID = 2,   PIDTYPE_MAX = 3 } ;   450     struct pid_namespace ;   450     struct upid {   int nr;   struct pid_namespace *ns;   struct hlist_node pid_chain; } ;    56     struct pid {   atomic_t count;   unsigned int level;   struct hlist_head tasks[3U];   struct callback_head rcu;   struct upid numbers[1U]; } ;    68     struct pid_link {   struct hlist_node node;   struct pid *pid; } ;    53     struct seccomp_filter ;    54     struct seccomp {   int mode;   struct seccomp_filter *filter; } ;    40     struct rt_mutex_waiter ;    41     struct rlimit {   __kernel_ulong_t rlim_cur;   __kernel_ulong_t rlim_max; } ;    11     struct timerqueue_node {   struct rb_node node;   ktime_t expires; } ;    12     struct timerqueue_head {   struct rb_root head;   struct timerqueue_node *next; } ;    50     struct hrtimer_clock_base ;    51     struct hrtimer_cpu_base ;    60     enum hrtimer_restart {   HRTIMER_NORESTART = 0,   HRTIMER_RESTART = 1 } ;    65     struct hrtimer {   struct timerqueue_node node;   ktime_t _softexpires;   enum hrtimer_restart  (*function)(struct hrtimer *);   struct hrtimer_clock_base *base;   unsigned long state;   int start_pid;   void *start_site;   char start_comm[16U]; } ;   123     struct hrtimer_clock_base {   struct hrtimer_cpu_base *cpu_base;   int index;   clockid_t clockid;   struct timerqueue_head active;   ktime_t  (*get_time)();   ktime_t offset; } ;   156     struct hrtimer_cpu_base {   raw_spinlock_t lock;   seqcount_t seq;   struct hrtimer *running;   unsigned int cpu;   unsigned int active_bases;   unsigned int clock_was_set_seq;   bool migration_enabled;   bool nohz_active;   unsigned char in_hrtirq;   unsigned char hres_active;   unsigned char hang_detected;   ktime_t expires_next;   struct hrtimer *next_timer;   unsigned int nr_events;   unsigned int nr_retries;   unsigned int nr_hangs;   unsigned int max_hang_time;   struct hrtimer_clock_base clock_base[4U]; } ;   466     struct task_io_accounting {   u64 rchar;   u64 wchar;   u64 syscr;   u64 syscw;   u64 read_bytes;   u64 write_bytes;   u64 cancelled_write_bytes; } ;    45     struct latency_record {   unsigned long backtrace[12U];   unsigned int count;   unsigned long time;   unsigned long max; } ;    39     struct assoc_array_ptr ;    39     struct assoc_array {   struct assoc_array_ptr *root;   unsigned long nr_leaves_on_tree; } ;    31     typedef int32_t key_serial_t;    34     typedef uint32_t key_perm_t;    35     struct key ;    36     struct signal_struct ;    37     struct key_type ;    41     struct keyring_index_key {   struct key_type *type;   const char *description;   size_t desc_len; } ;   123     union __anonunion____missing_field_name_220 {   struct list_head graveyard_link;   struct rb_node serial_node; } ;   123     struct key_user ;   123     union __anonunion____missing_field_name_221 {   time_t expiry;   time_t revoked_at; } ;   123     struct __anonstruct____missing_field_name_223 {   struct key_type *type;   char *description; } ;   123     union __anonunion____missing_field_name_222 {   struct keyring_index_key index_key;   struct __anonstruct____missing_field_name_223 __annonCompField51; } ;   123     union __anonunion_type_data_224 {   struct list_head link;   unsigned long x[2U];   void *p[2U];   int reject_error; } ;   123     union __anonunion_payload_226 {   unsigned long value;   void *rcudata;   void *data;   void *data2[2U]; } ;   123     union __anonunion____missing_field_name_225 {   union __anonunion_payload_226 payload;   struct assoc_array keys; } ;   123     struct key {   atomic_t usage;   key_serial_t serial;   union __anonunion____missing_field_name_220 __annonCompField49;   struct rw_semaphore sem;   struct key_user *user;   void *security;   union __anonunion____missing_field_name_221 __annonCompField50;   time_t last_used_at;   kuid_t uid;   kgid_t gid;   key_perm_t perm;   unsigned short quotalen;   unsigned short datalen;   unsigned long flags;   union __anonunion____missing_field_name_222 __annonCompField52;   union __anonunion_type_data_224 type_data;   union __anonunion____missing_field_name_225 __annonCompField53; } ;   358     struct audit_context ;    27     struct group_info {   atomic_t usage;   int ngroups;   int nblocks;   kgid_t small_block[32U];   kgid_t *blocks[0U]; } ;    90     struct cred {   atomic_t usage;   atomic_t subscribers;   void *put_addr;   unsigned int magic;   kuid_t uid;   kgid_t gid;   kuid_t suid;   kgid_t sgid;   kuid_t euid;   kgid_t egid;   kuid_t fsuid;   kgid_t fsgid;   unsigned int securebits;   kernel_cap_t cap_inheritable;   kernel_cap_t cap_permitted;   kernel_cap_t cap_effective;   kernel_cap_t cap_bset;   kernel_cap_t cap_ambient;   unsigned char jit_keyring;   struct key *session_keyring;   struct key *process_keyring;   struct key *thread_keyring;   struct key *request_key_auth;   void *security;   struct user_struct *user;   struct user_namespace *user_ns;   struct group_info *group_info;   struct callback_head rcu; } ;   377     struct percpu_ref ;    55     typedef void percpu_ref_func_t(struct percpu_ref *);    68     struct percpu_ref {   atomic_long_t count;   unsigned long percpu_count_ptr;   percpu_ref_func_t *release;   percpu_ref_func_t *confirm_switch;   bool force_atomic;   struct callback_head rcu; } ;   327     struct percpu_rw_semaphore {   unsigned int *fast_read_ctr;   atomic_t write_ctr;   struct rw_semaphore rw_sem;   atomic_t slow_read_ctr;   wait_queue_head_t write_waitq; } ;    53     struct cgroup ;    54     struct cgroup_root ;    55     struct cgroup_subsys ;    56     struct cgroup_taskset ;   103     struct cgroup_subsys_state {   struct cgroup *cgroup;   struct cgroup_subsys *ss;   struct percpu_ref refcnt;   struct cgroup_subsys_state *parent;   struct list_head sibling;   struct list_head children;   int id;   unsigned int flags;   u64 serial_nr;   struct callback_head callback_head;   struct work_struct destroy_work; } ;   129     struct css_set {   atomic_t refcount;   struct hlist_node hlist;   struct list_head tasks;   struct list_head mg_tasks;   struct list_head cgrp_links;   struct cgroup *dfl_cgrp;   struct cgroup_subsys_state *subsys[13U];   struct list_head mg_preload_node;   struct list_head mg_node;   struct cgroup *mg_src_cgrp;   struct css_set *mg_dst_cset;   struct list_head e_cset_node[13U];   struct callback_head callback_head; } ;   202     struct cgroup {   struct cgroup_subsys_state self;   unsigned long flags;   int id;   int populated_cnt;   struct kernfs_node *kn;   struct kernfs_node *procs_kn;   struct kernfs_node *populated_kn;   unsigned int subtree_control;   unsigned int child_subsys_mask;   struct cgroup_subsys_state *subsys[13U];   struct cgroup_root *root;   struct list_head cset_links;   struct list_head e_csets[13U];   struct list_head pidlists;   struct mutex pidlist_mutex;   wait_queue_head_t offline_waitq;   struct work_struct release_agent_work; } ;   275     struct cgroup_root {   struct kernfs_root *kf_root;   unsigned int subsys_mask;   int hierarchy_id;   struct cgroup cgrp;   atomic_t nr_cgrps;   struct list_head root_list;   unsigned int flags;   struct idr cgroup_idr;   char release_agent_path[4096U];   char name[64U]; } ;   311     struct cftype {   char name[64U];   unsigned long private;   umode_t mode;   size_t max_write_len;   unsigned int flags;   struct cgroup_subsys *ss;   struct list_head node;   struct kernfs_ops *kf_ops;   u64  (*read_u64)(struct cgroup_subsys_state *, struct cftype *);   s64  (*read_s64)(struct cgroup_subsys_state *, struct cftype *);   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 );   int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 );   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   struct lock_class_key lockdep_key; } ;   393     struct cgroup_subsys {   struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *);   int (*css_online)(struct cgroup_subsys_state *);   void (*css_offline)(struct cgroup_subsys_state *);   void (*css_released)(struct cgroup_subsys_state *);   void (*css_free)(struct cgroup_subsys_state *);   void (*css_reset)(struct cgroup_subsys_state *);   void (*css_e_css_changed)(struct cgroup_subsys_state *);   int (*can_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *);   void (*cancel_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *);   void (*attach)(struct cgroup_subsys_state *, struct cgroup_taskset *);   int (*can_fork)(struct task_struct *, void **);   void (*cancel_fork)(struct task_struct *, void *);   void (*fork)(struct task_struct *, void *);   void (*exit)(struct cgroup_subsys_state *, struct cgroup_subsys_state *, struct task_struct *);   void (*bind)(struct cgroup_subsys_state *);   int disabled;   int early_init;   bool broken_hierarchy;   bool warned_broken_hierarchy;   int id;   const char *name;   const char *legacy_name;   struct cgroup_root *root;   struct idr css_idr;   struct list_head cfts;   struct cftype *dfl_cftypes;   struct cftype *legacy_cftypes;   unsigned int depends_on; } ;   128     struct futex_pi_state ;   129     struct robust_list_head ;   130     struct bio_list ;   131     struct fs_struct ;   132     struct perf_event_context ;   133     struct blk_plug ;   135     struct nameidata ;   188     struct cfs_rq ;   189     struct task_group ;   477     struct sighand_struct {   atomic_t count;   struct k_sigaction action[64U];   spinlock_t siglock;   wait_queue_head_t signalfd_wqh; } ;   516     struct pacct_struct {   int ac_flag;   long ac_exitcode;   unsigned long ac_mem;   cputime_t ac_utime;   cputime_t ac_stime;   unsigned long ac_minflt;   unsigned long ac_majflt; } ;   524     struct cpu_itimer {   cputime_t expires;   cputime_t incr;   u32 error;   u32 incr_error; } ;   531     struct prev_cputime {   cputime_t utime;   cputime_t stime;   raw_spinlock_t lock; } ;   556     struct task_cputime {   cputime_t utime;   cputime_t stime;   unsigned long long sum_exec_runtime; } ;   572     struct task_cputime_atomic {   atomic64_t utime;   atomic64_t stime;   atomic64_t sum_exec_runtime; } ;   594     struct thread_group_cputimer {   struct task_cputime_atomic cputime_atomic;   int running; } ;   630     struct autogroup ;   631     struct tty_struct ;   631     struct taskstats ;   631     struct tty_audit_buf ;   631     struct signal_struct {   atomic_t sigcnt;   atomic_t live;   int nr_threads;   struct list_head thread_head;   wait_queue_head_t wait_chldexit;   struct task_struct *curr_target;   struct sigpending shared_pending;   int group_exit_code;   int notify_count;   struct task_struct *group_exit_task;   int group_stop_count;   unsigned int flags;   unsigned char is_child_subreaper;   unsigned char has_child_subreaper;   int posix_timer_id;   struct list_head posix_timers;   struct hrtimer real_timer;   struct pid *leader_pid;   ktime_t it_real_incr;   struct cpu_itimer it[2U];   struct thread_group_cputimer cputimer;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   struct pid *tty_old_pgrp;   int leader;   struct tty_struct *tty;   struct autogroup *autogroup;   seqlock_t stats_lock;   cputime_t utime;   cputime_t stime;   cputime_t cutime;   cputime_t cstime;   cputime_t gtime;   cputime_t cgtime;   struct prev_cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   unsigned long cnvcsw;   unsigned long cnivcsw;   unsigned long min_flt;   unsigned long maj_flt;   unsigned long cmin_flt;   unsigned long cmaj_flt;   unsigned long inblock;   unsigned long oublock;   unsigned long cinblock;   unsigned long coublock;   unsigned long maxrss;   unsigned long cmaxrss;   struct task_io_accounting ioac;   unsigned long long sum_sched_runtime;   struct rlimit rlim[16U];   struct pacct_struct pacct;   struct taskstats *stats;   unsigned int audit_tty;   unsigned int audit_tty_log_passwd;   struct tty_audit_buf *tty_audit_buf;   oom_flags_t oom_flags;   short oom_score_adj;   short oom_score_adj_min;   struct mutex cred_guard_mutex; } ;   798     struct user_struct {   atomic_t __count;   atomic_t processes;   atomic_t sigpending;   atomic_t inotify_watches;   atomic_t inotify_devs;   atomic_t fanotify_listeners;   atomic_long_t epoll_watches;   unsigned long mq_bytes;   unsigned long locked_shm;   struct key *uid_keyring;   struct key *session_keyring;   struct hlist_node uidhash_node;   kuid_t uid;   atomic_long_t locked_vm; } ;   841     struct backing_dev_info ;   842     struct reclaim_state ;   843     struct sched_info {   unsigned long pcount;   unsigned long long run_delay;   unsigned long long last_arrival;   unsigned long long last_queued; } ;   857     struct task_delay_info {   spinlock_t lock;   unsigned int flags;   u64 blkio_start;   u64 blkio_delay;   u64 swapin_delay;   u32 blkio_count;   u32 swapin_count;   u64 freepages_start;   u64 freepages_delay;   u32 freepages_count; } ;   905     struct wake_q_node {   struct wake_q_node *next; } ;  1134     struct io_context ;  1168     struct pipe_inode_info ;  1170     struct load_weight {   unsigned long weight;   u32 inv_weight; } ;  1177     struct sched_avg {   u64 last_update_time;   u64 load_sum;   u32 util_sum;   u32 period_contrib;   unsigned long load_avg;   unsigned long util_avg; } ;  1197     struct sched_statistics {   u64 wait_start;   u64 wait_max;   u64 wait_count;   u64 wait_sum;   u64 iowait_count;   u64 iowait_sum;   u64 sleep_start;   u64 sleep_max;   s64 sum_sleep_runtime;   u64 block_start;   u64 block_max;   u64 exec_max;   u64 slice_max;   u64 nr_migrations_cold;   u64 nr_failed_migrations_affine;   u64 nr_failed_migrations_running;   u64 nr_failed_migrations_hot;   u64 nr_forced_migrations;   u64 nr_wakeups;   u64 nr_wakeups_sync;   u64 nr_wakeups_migrate;   u64 nr_wakeups_local;   u64 nr_wakeups_remote;   u64 nr_wakeups_affine;   u64 nr_wakeups_affine_attempts;   u64 nr_wakeups_passive;   u64 nr_wakeups_idle; } ;  1232     struct sched_entity {   struct load_weight load;   struct rb_node run_node;   struct list_head group_node;   unsigned int on_rq;   u64 exec_start;   u64 sum_exec_runtime;   u64 vruntime;   u64 prev_sum_exec_runtime;   u64 nr_migrations;   struct sched_statistics statistics;   int depth;   struct sched_entity *parent;   struct cfs_rq *cfs_rq;   struct cfs_rq *my_q;   struct sched_avg avg; } ;  1264     struct rt_rq ;  1264     struct sched_rt_entity {   struct list_head run_list;   unsigned long timeout;   unsigned long watchdog_stamp;   unsigned int time_slice;   struct sched_rt_entity *back;   struct sched_rt_entity *parent;   struct rt_rq *rt_rq;   struct rt_rq *my_q; } ;  1280     struct sched_dl_entity {   struct rb_node rb_node;   u64 dl_runtime;   u64 dl_deadline;   u64 dl_period;   u64 dl_bw;   s64 runtime;   u64 deadline;   unsigned int flags;   int dl_throttled;   int dl_new;   int dl_boosted;   int dl_yielded;   struct hrtimer dl_timer; } ;  1346     struct tlbflush_unmap_batch {   struct cpumask cpumask;   bool flush_required;   bool writable; } ;  1365     struct memcg_oom_info {   struct mem_cgroup *memcg;   gfp_t gfp_mask;   int order;   unsigned char may_oom; } ;  1791     struct sched_class ;  1791     struct files_struct ;  1791     struct compat_robust_list_head ;  1791     struct numa_group ;  1791     struct task_struct {   volatile long state;   void *stack;   atomic_t usage;   unsigned int flags;   unsigned int ptrace;   struct llist_node wake_entry;   int on_cpu;   unsigned int wakee_flips;   unsigned long wakee_flip_decay_ts;   struct task_struct *last_wakee;   int wake_cpu;   int on_rq;   int prio;   int static_prio;   int normal_prio;   unsigned int rt_priority;   const struct sched_class *sched_class;   struct sched_entity se;   struct sched_rt_entity rt;   struct task_group *sched_task_group;   struct sched_dl_entity dl;   struct hlist_head preempt_notifiers;   unsigned int policy;   int nr_cpus_allowed;   cpumask_t cpus_allowed;   unsigned long rcu_tasks_nvcsw;   bool rcu_tasks_holdout;   struct list_head rcu_tasks_holdout_list;   int rcu_tasks_idle_cpu;   struct sched_info sched_info;   struct list_head tasks;   struct plist_node pushable_tasks;   struct rb_node pushable_dl_tasks;   struct mm_struct *mm;   struct mm_struct *active_mm;   u32 vmacache_seqnum;   struct vm_area_struct *vmacache[4U];   struct task_rss_stat rss_stat;   int exit_state;   int exit_code;   int exit_signal;   int pdeath_signal;   unsigned long jobctl;   unsigned int personality;   unsigned char in_execve;   unsigned char in_iowait;   unsigned char sched_reset_on_fork;   unsigned char sched_contributes_to_load;   unsigned char sched_migrated;   unsigned char memcg_kmem_skip_account;   unsigned char brk_randomized;   unsigned long atomic_flags;   struct restart_block restart_block;   pid_t pid;   pid_t tgid;   struct task_struct *real_parent;   struct task_struct *parent;   struct list_head children;   struct list_head sibling;   struct task_struct *group_leader;   struct list_head ptraced;   struct list_head ptrace_entry;   struct pid_link pids[3U];   struct list_head thread_group;   struct list_head thread_node;   struct completion *vfork_done;   int *set_child_tid;   int *clear_child_tid;   cputime_t utime;   cputime_t stime;   cputime_t utimescaled;   cputime_t stimescaled;   cputime_t gtime;   struct prev_cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   u64 start_time;   u64 real_start_time;   unsigned long min_flt;   unsigned long maj_flt;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   const struct cred *real_cred;   const struct cred *cred;   char comm[16U];   struct nameidata *nameidata;   struct sysv_sem sysvsem;   struct sysv_shm sysvshm;   unsigned long last_switch_count;   struct fs_struct *fs;   struct files_struct *files;   struct nsproxy *nsproxy;   struct signal_struct *signal;   struct sighand_struct *sighand;   sigset_t blocked;   sigset_t real_blocked;   sigset_t saved_sigmask;   struct sigpending pending;   unsigned long sas_ss_sp;   size_t sas_ss_size;   int (*notifier)(void *);   void *notifier_data;   sigset_t *notifier_mask;   struct callback_head *task_works;   struct audit_context *audit_context;   kuid_t loginuid;   unsigned int sessionid;   struct seccomp seccomp;   u32 parent_exec_id;   u32 self_exec_id;   spinlock_t alloc_lock;   raw_spinlock_t pi_lock;   struct wake_q_node wake_q;   struct rb_root pi_waiters;   struct rb_node *pi_waiters_leftmost;   struct rt_mutex_waiter *pi_blocked_on;   struct mutex_waiter *blocked_on;   unsigned int irq_events;   unsigned long hardirq_enable_ip;   unsigned long hardirq_disable_ip;   unsigned int hardirq_enable_event;   unsigned int hardirq_disable_event;   int hardirqs_enabled;   int hardirq_context;   unsigned long softirq_disable_ip;   unsigned long softirq_enable_ip;   unsigned int softirq_disable_event;   unsigned int softirq_enable_event;   int softirqs_enabled;   int softirq_context;   u64 curr_chain_key;   int lockdep_depth;   unsigned int lockdep_recursion;   struct held_lock held_locks[48U];   gfp_t lockdep_reclaim_gfp;   void *journal_info;   struct bio_list *bio_list;   struct blk_plug *plug;   struct reclaim_state *reclaim_state;   struct backing_dev_info *backing_dev_info;   struct io_context *io_context;   unsigned long ptrace_message;   siginfo_t *last_siginfo;   struct task_io_accounting ioac;   u64 acct_rss_mem1;   u64 acct_vm_mem1;   cputime_t acct_timexpd;   nodemask_t mems_allowed;   seqcount_t mems_allowed_seq;   int cpuset_mem_spread_rotor;   int cpuset_slab_spread_rotor;   struct css_set *cgroups;   struct list_head cg_list;   struct robust_list_head *robust_list;   struct compat_robust_list_head *compat_robust_list;   struct list_head pi_state_list;   struct futex_pi_state *pi_state_cache;   struct perf_event_context *perf_event_ctxp[2U];   struct mutex perf_event_mutex;   struct list_head perf_event_list;   struct mempolicy *mempolicy;   short il_next;   short pref_node_fork;   int numa_scan_seq;   unsigned int numa_scan_period;   unsigned int numa_scan_period_max;   int numa_preferred_nid;   unsigned long numa_migrate_retry;   u64 node_stamp;   u64 last_task_numa_placement;   u64 last_sum_exec_runtime;   struct callback_head numa_work;   struct list_head numa_entry;   struct numa_group *numa_group;   unsigned long *numa_faults;   unsigned long total_numa_faults;   unsigned long numa_faults_locality[3U];   unsigned long numa_pages_migrated;   struct tlbflush_unmap_batch tlb_ubc;   struct callback_head rcu;   struct pipe_inode_info *splice_pipe;   struct page_frag task_frag;   struct task_delay_info *delays;   int make_it_fail;   int nr_dirtied;   int nr_dirtied_pause;   unsigned long dirty_paused_when;   int latency_record_count;   struct latency_record latency_record[32U];   unsigned long timer_slack_ns;   unsigned long default_timer_slack_ns;   unsigned int kasan_depth;   unsigned long trace;   unsigned long trace_recursion;   struct memcg_oom_info memcg_oom;   struct uprobe_task *utask;   unsigned int sequential_io;   unsigned int sequential_io_avg;   unsigned long task_state_change;   int pagefault_disabled;   struct thread_struct thread; } ;    62     struct exception_table_entry {   int insn;   int fixup; } ;    13     typedef unsigned long kernel_ulong_t;    39     struct usb_device_id {   __u16 match_flags;   __u16 idVendor;   __u16 idProduct;   __u16 bcdDevice_lo;   __u16 bcdDevice_hi;   __u8 bDeviceClass;   __u8 bDeviceSubClass;   __u8 bDeviceProtocol;   __u8 bInterfaceClass;   __u8 bInterfaceSubClass;   __u8 bInterfaceProtocol;   __u8 bInterfaceNumber;   kernel_ulong_t driver_info; } ;   186     struct acpi_device_id {   __u8 id[9U];   kernel_ulong_t driver_data;   __u32 cls;   __u32 cls_msk; } ;   221     struct of_device_id {   char name[32U];   char type[32U];   char compatible[128U];   const void *data; } ;   253     struct usb_device_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __le16 bcdUSB;   __u8 bDeviceClass;   __u8 bDeviceSubClass;   __u8 bDeviceProtocol;   __u8 bMaxPacketSize0;   __le16 idVendor;   __le16 idProduct;   __le16 bcdDevice;   __u8 iManufacturer;   __u8 iProduct;   __u8 iSerialNumber;   __u8 bNumConfigurations; } ;   275     struct usb_config_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __le16 wTotalLength;   __u8 bNumInterfaces;   __u8 bConfigurationValue;   __u8 iConfiguration;   __u8 bmAttributes;   __u8 bMaxPower; } ;   343     struct usb_interface_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bInterfaceNumber;   __u8 bAlternateSetting;   __u8 bNumEndpoints;   __u8 bInterfaceClass;   __u8 bInterfaceSubClass;   __u8 bInterfaceProtocol;   __u8 iInterface; } ;   363     struct usb_endpoint_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bEndpointAddress;   __u8 bmAttributes;   __le16 wMaxPacketSize;   __u8 bInterval;   __u8 bRefresh;   __u8 bSynchAddress; } ;   613     struct usb_ss_ep_comp_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bMaxBurst;   __u8 bmAttributes;   __le16 wBytesPerInterval; } ;   704     struct usb_interface_assoc_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bFirstInterface;   __u8 bInterfaceCount;   __u8 bFunctionClass;   __u8 bFunctionSubClass;   __u8 bFunctionProtocol;   __u8 iFunction; } ;   763     struct usb_bos_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __le16 wTotalLength;   __u8 bNumDeviceCaps; } ;   813     struct usb_ext_cap_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bDevCapabilityType;   __le32 bmAttributes; } ;   823     struct usb_ss_cap_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bDevCapabilityType;   __u8 bmAttributes;   __le16 wSpeedSupported;   __u8 bFunctionalitySupport;   __u8 bU1devExitLat;   __le16 bU2DevExitLat; } ;   852     struct usb_ss_container_id_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bDevCapabilityType;   __u8 bReserved;   __u8 ContainerID[16U]; } ;   917     enum usb_device_speed {   USB_SPEED_UNKNOWN = 0,   USB_SPEED_LOW = 1,   USB_SPEED_FULL = 2,   USB_SPEED_HIGH = 3,   USB_SPEED_WIRELESS = 4,   USB_SPEED_SUPER = 5 } ;   926     enum usb_device_state {   USB_STATE_NOTATTACHED = 0,   USB_STATE_ATTACHED = 1,   USB_STATE_POWERED = 2,   USB_STATE_RECONNECTING = 3,   USB_STATE_UNAUTHENTICATED = 4,   USB_STATE_DEFAULT = 5,   USB_STATE_ADDRESS = 6,   USB_STATE_CONFIGURED = 7,   USB_STATE_SUSPENDED = 8 } ;    63     struct irq_domain ;   672     struct klist_node ;    37     struct klist_node {   void *n_klist;   struct list_head n_node;   struct kref n_ref; } ;    68     struct path ;    69     struct seq_file {   char *buf;   size_t size;   size_t from;   size_t count;   size_t pad_until;   loff_t index;   loff_t read_pos;   u64 version;   struct mutex lock;   const struct seq_operations *op;   int poll_event;   struct user_namespace *user_ns;   void *private; } ;    35     struct seq_operations {   void * (*start)(struct seq_file *, loff_t *);   void (*stop)(struct seq_file *, void *);   void * (*next)(struct seq_file *, void *, loff_t *);   int (*show)(struct seq_file *, void *); } ;   227     struct pinctrl ;   228     struct pinctrl_state ;   194     struct dev_pin_info {   struct pinctrl *p;   struct pinctrl_state *default_state;   struct pinctrl_state *sleep_state;   struct pinctrl_state *idle_state; } ;    48     struct dma_map_ops ;    48     struct dev_archdata {   struct dma_map_ops *dma_ops;   void *iommu; } ;    14     struct device_private ;    15     struct device_driver ;    16     struct driver_private ;    17     struct class ;    18     struct subsys_private ;    19     struct bus_type ;    20     struct device_node ;    21     struct fwnode_handle ;    22     struct iommu_ops ;    23     struct iommu_group ;    61     struct device_attribute ;    61     struct bus_type {   const char *name;   const char *dev_name;   struct device *dev_root;   struct device_attribute *dev_attrs;   const struct attribute_group **bus_groups;   const struct attribute_group **dev_groups;   const struct attribute_group **drv_groups;   int (*match)(struct device *, struct device_driver *);   int (*uevent)(struct device *, struct kobj_uevent_env *);   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*online)(struct device *);   int (*offline)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct dev_pm_ops *pm;   const struct iommu_ops *iommu_ops;   struct subsys_private *p;   struct lock_class_key lock_key; } ;   139     struct device_type ;   197     enum probe_type {   PROBE_DEFAULT_STRATEGY = 0,   PROBE_PREFER_ASYNCHRONOUS = 1,   PROBE_FORCE_SYNCHRONOUS = 2 } ;   203     struct device_driver {   const char *name;   struct bus_type *bus;   struct module *owner;   const char *mod_name;   bool suppress_bind_attrs;   enum probe_type probe_type;   const struct of_device_id *of_match_table;   const struct acpi_device_id *acpi_match_table;   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct attribute_group **groups;   const struct dev_pm_ops *pm;   struct driver_private *p; } ;   353     struct class_attribute ;   353     struct class {   const char *name;   struct module *owner;   struct class_attribute *class_attrs;   const struct attribute_group **dev_groups;   struct kobject *dev_kobj;   int (*dev_uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *);   void (*class_release)(struct class *);   void (*dev_release)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct kobj_ns_type_operations *ns_type;   const void * (*namespace)(struct device *);   const struct dev_pm_ops *pm;   struct subsys_private *p; } ;   446     struct class_attribute {   struct attribute attr;   ssize_t  (*show)(struct class *, struct class_attribute *, char *);   ssize_t  (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;   514     struct device_type {   const char *name;   const struct attribute_group **groups;   int (*uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *);   void (*release)(struct device *);   const struct dev_pm_ops *pm; } ;   542     struct device_attribute {   struct attribute attr;   ssize_t  (*show)(struct device *, struct device_attribute *, char *);   ssize_t  (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;   675     struct device_dma_parameters {   unsigned int max_segment_size;   unsigned long segment_boundary_mask; } ;   684     struct dma_coherent_mem ;   684     struct cma ;   684     struct device {   struct device *parent;   struct device_private *p;   struct kobject kobj;   const char *init_name;   const struct device_type *type;   struct mutex mutex;   struct bus_type *bus;   struct device_driver *driver;   void *platform_data;   void *driver_data;   struct dev_pm_info power;   struct dev_pm_domain *pm_domain;   struct irq_domain *msi_domain;   struct dev_pin_info *pins;   struct list_head msi_list;   int numa_node;   u64 *dma_mask;   u64 coherent_dma_mask;   unsigned long dma_pfn_offset;   struct device_dma_parameters *dma_parms;   struct list_head dma_pools;   struct dma_coherent_mem *dma_mem;   struct cma *cma_area;   struct dev_archdata archdata;   struct device_node *of_node;   struct fwnode_handle *fwnode;   dev_t devt;   u32 id;   spinlock_t devres_lock;   struct list_head devres_head;   struct klist_node knode_class;   struct class *class;   const struct attribute_group **groups;   void (*release)(struct device *);   struct iommu_group *iommu_group;   bool offline_disabled;   bool offline; } ;   838     struct wakeup_source {   const char *name;   struct list_head entry;   spinlock_t lock;   struct wake_irq *wakeirq;   struct timer_list timer;   unsigned long timer_expires;   ktime_t total_time;   ktime_t max_time;   ktime_t last_time;   ktime_t start_prevent_time;   ktime_t prevent_sleep_time;   unsigned long event_count;   unsigned long active_count;   unsigned long relax_count;   unsigned long expire_count;   unsigned long wakeup_count;   bool active;   bool autosleep_enabled; } ;    93     struct hlist_bl_node ;    93     struct hlist_bl_head {   struct hlist_bl_node *first; } ;    36     struct hlist_bl_node {   struct hlist_bl_node *next;   struct hlist_bl_node **pprev; } ;   114     struct __anonstruct____missing_field_name_257 {   spinlock_t lock;   int count; } ;   114     union __anonunion____missing_field_name_256 {   struct __anonstruct____missing_field_name_257 __annonCompField64; } ;   114     struct lockref {   union __anonunion____missing_field_name_256 __annonCompField65; } ;    50     struct vfsmount ;    51     struct __anonstruct____missing_field_name_259 {   u32 hash;   u32 len; } ;    51     union __anonunion____missing_field_name_258 {   struct __anonstruct____missing_field_name_259 __annonCompField66;   u64 hash_len; } ;    51     struct qstr {   union __anonunion____missing_field_name_258 __annonCompField67;   const unsigned char *name; } ;    90     struct dentry_operations ;    90     union __anonunion_d_u_260 {   struct hlist_node d_alias;   struct callback_head d_rcu; } ;    90     struct dentry {   unsigned int d_flags;   seqcount_t d_seq;   struct hlist_bl_node d_hash;   struct dentry *d_parent;   struct qstr d_name;   struct inode *d_inode;   unsigned char d_iname[32U];   struct lockref d_lockref;   const struct dentry_operations *d_op;   struct super_block *d_sb;   unsigned long d_time;   void *d_fsdata;   struct list_head d_lru;   struct list_head d_child;   struct list_head d_subdirs;   union __anonunion_d_u_260 d_u; } ;   142     struct dentry_operations {   int (*d_revalidate)(struct dentry *, unsigned int);   int (*d_weak_revalidate)(struct dentry *, unsigned int);   int (*d_hash)(const struct dentry *, struct qstr *);   int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *);   int (*d_delete)(const struct dentry *);   void (*d_release)(struct dentry *);   void (*d_prune)(struct dentry *);   void (*d_iput)(struct dentry *, struct inode *);   char * (*d_dname)(struct dentry *, char *, int);   struct vfsmount * (*d_automount)(struct path *);   int (*d_manage)(struct dentry *, bool );   struct inode * (*d_select_inode)(struct dentry *, unsigned int); } ;   586     struct path {   struct vfsmount *mnt;   struct dentry *dentry; } ;    19     struct shrink_control {   gfp_t gfp_mask;   unsigned long nr_to_scan;   int nid;   struct mem_cgroup *memcg; } ;    27     struct shrinker {   unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *);   unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *);   int seeks;   long batch;   unsigned long flags;   struct list_head list;   atomic_long_t *nr_deferred; } ;    80     struct list_lru_one {   struct list_head list;   long nr_items; } ;    32     struct list_lru_memcg {   struct list_lru_one *lru[0U]; } ;    37     struct list_lru_node {   spinlock_t lock;   struct list_lru_one lru;   struct list_lru_memcg *memcg_lrus; } ;    47     struct list_lru {   struct list_lru_node *node;   struct list_head list; } ;    58     struct __anonstruct____missing_field_name_264 {   struct radix_tree_node *parent;   void *private_data; } ;    58     union __anonunion____missing_field_name_263 {   struct __anonstruct____missing_field_name_264 __annonCompField68;   struct callback_head callback_head; } ;    58     struct radix_tree_node {   unsigned int path;   unsigned int count;   union __anonunion____missing_field_name_263 __annonCompField69;   struct list_head private_list;   void *slots[64U];   unsigned long tags[3U][1U]; } ;   105     struct radix_tree_root {   unsigned int height;   gfp_t gfp_mask;   struct radix_tree_node *rnode; } ;    45     struct fiemap_extent {   __u64 fe_logical;   __u64 fe_physical;   __u64 fe_length;   __u64 fe_reserved64[2U];   __u32 fe_flags;   __u32 fe_reserved[3U]; } ;    38     enum migrate_mode {   MIGRATE_ASYNC = 0,   MIGRATE_SYNC_LIGHT = 1,   MIGRATE_SYNC = 2 } ;    47     struct block_device ;    60     struct bdi_writeback ;    61     struct export_operations ;    64     struct kiocb ;    65     struct poll_table_struct ;    66     struct kstatfs ;    67     struct swap_info_struct ;    68     struct iov_iter ;    75     struct iattr {   unsigned int ia_valid;   umode_t ia_mode;   kuid_t ia_uid;   kgid_t ia_gid;   loff_t ia_size;   struct timespec ia_atime;   struct timespec ia_mtime;   struct timespec ia_ctime;   struct file *ia_file; } ;   212     struct dquot ;    19     typedef __kernel_uid32_t projid_t;    23     struct __anonstruct_kprojid_t_270 {   projid_t val; } ;    23     typedef struct __anonstruct_kprojid_t_270 kprojid_t;   166     enum quota_type {   USRQUOTA = 0,   GRPQUOTA = 1,   PRJQUOTA = 2 } ;    66     typedef long long qsize_t;    67     union __anonunion____missing_field_name_271 {   kuid_t uid;   kgid_t gid;   kprojid_t projid; } ;    67     struct kqid {   union __anonunion____missing_field_name_271 __annonCompField71;   enum quota_type type; } ;   184     struct mem_dqblk {   qsize_t dqb_bhardlimit;   qsize_t dqb_bsoftlimit;   qsize_t dqb_curspace;   qsize_t dqb_rsvspace;   qsize_t dqb_ihardlimit;   qsize_t dqb_isoftlimit;   qsize_t dqb_curinodes;   time_t dqb_btime;   time_t dqb_itime; } ;   206     struct quota_format_type ;   207     struct mem_dqinfo {   struct quota_format_type *dqi_format;   int dqi_fmt_id;   struct list_head dqi_dirty_list;   unsigned long dqi_flags;   unsigned int dqi_bgrace;   unsigned int dqi_igrace;   qsize_t dqi_max_spc_limit;   qsize_t dqi_max_ino_limit;   void *dqi_priv; } ;   272     struct dquot {   struct hlist_node dq_hash;   struct list_head dq_inuse;   struct list_head dq_free;   struct list_head dq_dirty;   struct mutex dq_lock;   atomic_t dq_count;   wait_queue_head_t dq_wait_unused;   struct super_block *dq_sb;   struct kqid dq_id;   loff_t dq_off;   unsigned long dq_flags;   struct mem_dqblk dq_dqb; } ;   299     struct quota_format_ops {   int (*check_quota_file)(struct super_block *, int);   int (*read_file_info)(struct super_block *, int);   int (*write_file_info)(struct super_block *, int);   int (*free_file_info)(struct super_block *, int);   int (*read_dqblk)(struct dquot *);   int (*commit_dqblk)(struct dquot *);   int (*release_dqblk)(struct dquot *); } ;   310     struct dquot_operations {   int (*write_dquot)(struct dquot *);   struct dquot * (*alloc_dquot)(struct super_block *, int);   void (*destroy_dquot)(struct dquot *);   int (*acquire_dquot)(struct dquot *);   int (*release_dquot)(struct dquot *);   int (*mark_dirty)(struct dquot *);   int (*write_info)(struct super_block *, int);   qsize_t * (*get_reserved_space)(struct inode *);   int (*get_projid)(struct inode *, kprojid_t *); } ;   325     struct qc_dqblk {   int d_fieldmask;   u64 d_spc_hardlimit;   u64 d_spc_softlimit;   u64 d_ino_hardlimit;   u64 d_ino_softlimit;   u64 d_space;   u64 d_ino_count;   s64 d_ino_timer;   s64 d_spc_timer;   int d_ino_warns;   int d_spc_warns;   u64 d_rt_spc_hardlimit;   u64 d_rt_spc_softlimit;   u64 d_rt_space;   s64 d_rt_spc_timer;   int d_rt_spc_warns; } ;   348     struct qc_type_state {   unsigned int flags;   unsigned int spc_timelimit;   unsigned int ino_timelimit;   unsigned int rt_spc_timelimit;   unsigned int spc_warnlimit;   unsigned int ino_warnlimit;   unsigned int rt_spc_warnlimit;   unsigned long long ino;   blkcnt_t blocks;   blkcnt_t nextents; } ;   394     struct qc_state {   unsigned int s_incoredqs;   struct qc_type_state s_state[3U]; } ;   405     struct qc_info {   int i_fieldmask;   unsigned int i_flags;   unsigned int i_spc_timelimit;   unsigned int i_ino_timelimit;   unsigned int i_rt_spc_timelimit;   unsigned int i_spc_warnlimit;   unsigned int i_ino_warnlimit;   unsigned int i_rt_spc_warnlimit; } ;   418     struct quotactl_ops {   int (*quota_on)(struct super_block *, int, int, struct path *);   int (*quota_off)(struct super_block *, int);   int (*quota_enable)(struct super_block *, unsigned int);   int (*quota_disable)(struct super_block *, unsigned int);   int (*quota_sync)(struct super_block *, int);   int (*set_info)(struct super_block *, int, struct qc_info *);   int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*get_state)(struct super_block *, struct qc_state *);   int (*rm_xquota)(struct super_block *, unsigned int); } ;   432     struct quota_format_type {   int qf_fmt_id;   const struct quota_format_ops *qf_ops;   struct module *qf_owner;   struct quota_format_type *qf_next; } ;   496     struct quota_info {   unsigned int flags;   struct mutex dqio_mutex;   struct mutex dqonoff_mutex;   struct inode *files[3U];   struct mem_dqinfo info[3U];   const struct quota_format_ops *ops[3U]; } ;   526     struct writeback_control ;   527     struct kiocb {   struct file *ki_filp;   loff_t ki_pos;   void (*ki_complete)(struct kiocb *, long, long);   void *private;   int ki_flags; } ;   366     struct address_space_operations {   int (*writepage)(struct page *, struct writeback_control *);   int (*readpage)(struct file *, struct page *);   int (*writepages)(struct address_space *, struct writeback_control *);   int (*set_page_dirty)(struct page *);   int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int);   int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **);   int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *);   sector_t  (*bmap)(struct address_space *, sector_t );   void (*invalidatepage)(struct page *, unsigned int, unsigned int);   int (*releasepage)(struct page *, gfp_t );   void (*freepage)(struct page *);   ssize_t  (*direct_IO)(struct kiocb *, struct iov_iter *, loff_t );   int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode );   int (*launder_page)(struct page *);   int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);   void (*is_dirty_writeback)(struct page *, bool *, bool *);   int (*error_remove_page)(struct address_space *, struct page *);   int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *);   void (*swap_deactivate)(struct file *); } ;   423     struct address_space {   struct inode *host;   struct radix_tree_root page_tree;   spinlock_t tree_lock;   atomic_t i_mmap_writable;   struct rb_root i_mmap;   struct rw_semaphore i_mmap_rwsem;   unsigned long nrpages;   unsigned long nrshadows;   unsigned long writeback_index;   const struct address_space_operations *a_ops;   unsigned long flags;   spinlock_t private_lock;   struct list_head private_list;   void *private_data; } ;   443     struct request_queue ;   444     struct hd_struct ;   444     struct gendisk ;   444     struct block_device {   dev_t bd_dev;   int bd_openers;   struct inode *bd_inode;   struct super_block *bd_super;   struct mutex bd_mutex;   struct list_head bd_inodes;   void *bd_claiming;   void *bd_holder;   int bd_holders;   bool bd_write_holder;   struct list_head bd_holder_disks;   struct block_device *bd_contains;   unsigned int bd_block_size;   struct hd_struct *bd_part;   unsigned int bd_part_count;   int bd_invalidated;   struct gendisk *bd_disk;   struct request_queue *bd_queue;   struct list_head bd_list;   unsigned long bd_private;   int bd_fsfreeze_count;   struct mutex bd_fsfreeze_mutex; } ;   560     struct posix_acl ;   561     struct inode_operations ;   561     union __anonunion____missing_field_name_274 {   const unsigned int i_nlink;   unsigned int __i_nlink; } ;   561     union __anonunion____missing_field_name_275 {   struct hlist_head i_dentry;   struct callback_head i_rcu; } ;   561     struct file_lock_context ;   561     struct cdev ;   561     union __anonunion____missing_field_name_276 {   struct pipe_inode_info *i_pipe;   struct block_device *i_bdev;   struct cdev *i_cdev;   char *i_link; } ;   561     struct inode {   umode_t i_mode;   unsigned short i_opflags;   kuid_t i_uid;   kgid_t i_gid;   unsigned int i_flags;   struct posix_acl *i_acl;   struct posix_acl *i_default_acl;   const struct inode_operations *i_op;   struct super_block *i_sb;   struct address_space *i_mapping;   void *i_security;   unsigned long i_ino;   union __anonunion____missing_field_name_274 __annonCompField72;   dev_t i_rdev;   loff_t i_size;   struct timespec i_atime;   struct timespec i_mtime;   struct timespec i_ctime;   spinlock_t i_lock;   unsigned short i_bytes;   unsigned int i_blkbits;   blkcnt_t i_blocks;   unsigned long i_state;   struct mutex i_mutex;   unsigned long dirtied_when;   unsigned long dirtied_time_when;   struct hlist_node i_hash;   struct list_head i_io_list;   struct bdi_writeback *i_wb;   int i_wb_frn_winner;   u16 i_wb_frn_avg_time;   u16 i_wb_frn_history;   struct list_head i_lru;   struct list_head i_sb_list;   union __anonunion____missing_field_name_275 __annonCompField73;   u64 i_version;   atomic_t i_count;   atomic_t i_dio_count;   atomic_t i_writecount;   atomic_t i_readcount;   const struct file_operations *i_fop;   struct file_lock_context *i_flctx;   struct address_space i_data;   struct list_head i_devices;   union __anonunion____missing_field_name_276 __annonCompField74;   __u32 i_generation;   __u32 i_fsnotify_mask;   struct hlist_head i_fsnotify_marks;   void *i_private; } ;   807     struct fown_struct {   rwlock_t lock;   struct pid *pid;   enum pid_type pid_type;   kuid_t uid;   kuid_t euid;   int signum; } ;   815     struct file_ra_state {   unsigned long start;   unsigned int size;   unsigned int async_size;   unsigned int ra_pages;   unsigned int mmap_miss;   loff_t prev_pos; } ;   838     union __anonunion_f_u_277 {   struct llist_node fu_llist;   struct callback_head fu_rcuhead; } ;   838     struct file {   union __anonunion_f_u_277 f_u;   struct path f_path;   struct inode *f_inode;   const struct file_operations *f_op;   spinlock_t f_lock;   atomic_long_t f_count;   unsigned int f_flags;   fmode_t f_mode;   struct mutex f_pos_lock;   loff_t f_pos;   struct fown_struct f_owner;   const struct cred *f_cred;   struct file_ra_state f_ra;   u64 f_version;   void *f_security;   void *private_data;   struct list_head f_ep_links;   struct list_head f_tfile_llink;   struct address_space *f_mapping; } ;   923     typedef void *fl_owner_t;   924     struct file_lock ;   925     struct file_lock_operations {   void (*fl_copy_lock)(struct file_lock *, struct file_lock *);   void (*fl_release_private)(struct file_lock *); } ;   931     struct lock_manager_operations {   int (*lm_compare_owner)(struct file_lock *, struct file_lock *);   unsigned long int (*lm_owner_key)(struct file_lock *);   fl_owner_t  (*lm_get_owner)(fl_owner_t );   void (*lm_put_owner)(fl_owner_t );   void (*lm_notify)(struct file_lock *);   int (*lm_grant)(struct file_lock *, int);   bool  (*lm_break)(struct file_lock *);   int (*lm_change)(struct file_lock *, int, struct list_head *);   void (*lm_setup)(struct file_lock *, void **); } ;   958     struct nlm_lockowner ;   959     struct nfs_lock_info {   u32 state;   struct nlm_lockowner *owner;   struct list_head list; } ;    14     struct nfs4_lock_state ;    15     struct nfs4_lock_info {   struct nfs4_lock_state *owner; } ;    19     struct fasync_struct ;    19     struct __anonstruct_afs_279 {   struct list_head link;   int state; } ;    19     union __anonunion_fl_u_278 {   struct nfs_lock_info nfs_fl;   struct nfs4_lock_info nfs4_fl;   struct __anonstruct_afs_279 afs; } ;    19     struct file_lock {   struct file_lock *fl_next;   struct list_head fl_list;   struct hlist_node fl_link;   struct list_head fl_block;   fl_owner_t fl_owner;   unsigned int fl_flags;   unsigned char fl_type;   unsigned int fl_pid;   int fl_link_cpu;   struct pid *fl_nspid;   wait_queue_head_t fl_wait;   struct file *fl_file;   loff_t fl_start;   loff_t fl_end;   struct fasync_struct *fl_fasync;   unsigned long fl_break_time;   unsigned long fl_downgrade_time;   const struct file_lock_operations *fl_ops;   const struct lock_manager_operations *fl_lmops;   union __anonunion_fl_u_278 fl_u; } ;  1011     struct file_lock_context {   spinlock_t flc_lock;   struct list_head flc_flock;   struct list_head flc_posix;   struct list_head flc_lease; } ;  1227     struct fasync_struct {   spinlock_t fa_lock;   int magic;   int fa_fd;   struct fasync_struct *fa_next;   struct file *fa_file;   struct callback_head fa_rcu; } ;  1262     struct sb_writers {   int frozen;   wait_queue_head_t wait_unfrozen;   struct percpu_rw_semaphore rw_sem[3U]; } ;  1288     struct super_operations ;  1288     struct xattr_handler ;  1288     struct mtd_info ;  1288     struct super_block {   struct list_head s_list;   dev_t s_dev;   unsigned char s_blocksize_bits;   unsigned long s_blocksize;   loff_t s_maxbytes;   struct file_system_type *s_type;   const struct super_operations *s_op;   const struct dquot_operations *dq_op;   const struct quotactl_ops *s_qcop;   const struct export_operations *s_export_op;   unsigned long s_flags;   unsigned long s_iflags;   unsigned long s_magic;   struct dentry *s_root;   struct rw_semaphore s_umount;   int s_count;   atomic_t s_active;   void *s_security;   const struct xattr_handler **s_xattr;   struct hlist_bl_head s_anon;   struct list_head s_mounts;   struct block_device *s_bdev;   struct backing_dev_info *s_bdi;   struct mtd_info *s_mtd;   struct hlist_node s_instances;   unsigned int s_quota_types;   struct quota_info s_dquot;   struct sb_writers s_writers;   char s_id[32U];   u8 s_uuid[16U];   void *s_fs_info;   unsigned int s_max_links;   fmode_t s_mode;   u32 s_time_gran;   struct mutex s_vfs_rename_mutex;   char *s_subtype;   char *s_options;   const struct dentry_operations *s_d_op;   int cleancache_poolid;   struct shrinker s_shrink;   atomic_long_t s_remove_count;   int s_readonly_remount;   struct workqueue_struct *s_dio_done_wq;   struct hlist_head s_pins;   struct list_lru s_dentry_lru;   struct list_lru s_inode_lru;   struct callback_head rcu;   struct work_struct destroy_work;   struct mutex s_sync_lock;   int s_stack_depth;   spinlock_t s_inode_list_lock;   struct list_head s_inodes; } ;  1537     struct fiemap_extent_info {   unsigned int fi_flags;   unsigned int fi_extents_mapped;   unsigned int fi_extents_max;   struct fiemap_extent *fi_extents_start; } ;  1551     struct dir_context ;  1576     struct dir_context {   int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int);   loff_t pos; } ;  1583     struct file_operations {   struct module *owner;   loff_t  (*llseek)(struct file *, loff_t , int);   ssize_t  (*read)(struct file *, char *, size_t , loff_t *);   ssize_t  (*write)(struct file *, const char *, size_t , loff_t *);   ssize_t  (*read_iter)(struct kiocb *, struct iov_iter *);   ssize_t  (*write_iter)(struct kiocb *, struct iov_iter *);   int (*iterate)(struct file *, struct dir_context *);   unsigned int (*poll)(struct file *, struct poll_table_struct *);   long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long);   long int (*compat_ioctl)(struct file *, unsigned int, unsigned long);   int (*mmap)(struct file *, struct vm_area_struct *);   int (*open)(struct inode *, struct file *);   int (*flush)(struct file *, fl_owner_t );   int (*release)(struct inode *, struct file *);   int (*fsync)(struct file *, loff_t , loff_t , int);   int (*aio_fsync)(struct kiocb *, int);   int (*fasync)(int, struct file *, int);   int (*lock)(struct file *, int, struct file_lock *);   ssize_t  (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int);   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   int (*check_flags)(int);   int (*flock)(struct file *, int, struct file_lock *);   ssize_t  (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int);   ssize_t  (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int);   int (*setlease)(struct file *, long, struct file_lock **, void **);   long int (*fallocate)(struct file *, int, loff_t , loff_t );   void (*show_fdinfo)(struct seq_file *, struct file *); } ;  1643     struct inode_operations {   struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int);   const char * (*follow_link)(struct dentry *, void **);   int (*permission)(struct inode *, int);   struct posix_acl * (*get_acl)(struct inode *, int);   int (*readlink)(struct dentry *, char *, int);   void (*put_link)(struct inode *, void *);   int (*create)(struct inode *, struct dentry *, umode_t , bool );   int (*link)(struct dentry *, struct inode *, struct dentry *);   int (*unlink)(struct inode *, struct dentry *);   int (*symlink)(struct inode *, struct dentry *, const char *);   int (*mkdir)(struct inode *, struct dentry *, umode_t );   int (*rmdir)(struct inode *, struct dentry *);   int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t );   int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *);   int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int);   int (*setattr)(struct dentry *, struct iattr *);   int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *);   int (*setxattr)(struct dentry *, const char *, const void *, size_t , int);   ssize_t  (*getxattr)(struct dentry *, const char *, void *, size_t );   ssize_t  (*listxattr)(struct dentry *, char *, size_t );   int (*removexattr)(struct dentry *, const char *);   int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 );   int (*update_time)(struct inode *, struct timespec *, int);   int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *);   int (*tmpfile)(struct inode *, struct dentry *, umode_t );   int (*set_acl)(struct inode *, struct posix_acl *, int); } ;  1697     struct super_operations {   struct inode * (*alloc_inode)(struct super_block *);   void (*destroy_inode)(struct inode *);   void (*dirty_inode)(struct inode *, int);   int (*write_inode)(struct inode *, struct writeback_control *);   int (*drop_inode)(struct inode *);   void (*evict_inode)(struct inode *);   void (*put_super)(struct super_block *);   int (*sync_fs)(struct super_block *, int);   int (*freeze_super)(struct super_block *);   int (*freeze_fs)(struct super_block *);   int (*thaw_super)(struct super_block *);   int (*unfreeze_fs)(struct super_block *);   int (*statfs)(struct dentry *, struct kstatfs *);   int (*remount_fs)(struct super_block *, int *, char *);   void (*umount_begin)(struct super_block *);   int (*show_options)(struct seq_file *, struct dentry *);   int (*show_devname)(struct seq_file *, struct dentry *);   int (*show_path)(struct seq_file *, struct dentry *);   int (*show_stats)(struct seq_file *, struct dentry *);   ssize_t  (*quota_read)(struct super_block *, int, char *, size_t , loff_t );   ssize_t  (*quota_write)(struct super_block *, int, const char *, size_t , loff_t );   struct dquot ** (*get_dquots)(struct inode *);   int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t );   long int (*nr_cached_objects)(struct super_block *, struct shrink_control *);   long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;  1936     struct file_system_type {   const char *name;   int fs_flags;   struct dentry * (*mount)(struct file_system_type *, int, const char *, void *);   void (*kill_sb)(struct super_block *);   struct module *owner;   struct file_system_type *next;   struct hlist_head fs_supers;   struct lock_class_key s_lock_key;   struct lock_class_key s_umount_key;   struct lock_class_key s_vfs_rename_key;   struct lock_class_key s_writers_key[3U];   struct lock_class_key i_lock_key;   struct lock_class_key i_mutex_key;   struct lock_class_key i_mutex_dir_key; } ;   276     struct usb_driver ;   277     struct wusb_dev ;   278     struct ep_device ;   279     struct usb_host_endpoint {   struct usb_endpoint_descriptor desc;   struct usb_ss_ep_comp_descriptor ss_ep_comp;   struct list_head urb_list;   void *hcpriv;   struct ep_device *ep_dev;   unsigned char *extra;   int extralen;   int enabled;   int streams; } ;    77     struct usb_host_interface {   struct usb_interface_descriptor desc;   int extralen;   unsigned char *extra;   struct usb_host_endpoint *endpoint;   char *string; } ;    92     enum usb_interface_condition {   USB_INTERFACE_UNBOUND = 0,   USB_INTERFACE_BINDING = 1,   USB_INTERFACE_BOUND = 2,   USB_INTERFACE_UNBINDING = 3 } ;    99     struct usb_interface {   struct usb_host_interface *altsetting;   struct usb_host_interface *cur_altsetting;   unsigned int num_altsetting;   struct usb_interface_assoc_descriptor *intf_assoc;   int minor;   enum usb_interface_condition condition;   unsigned char sysfs_files_created;   unsigned char ep_devs_created;   unsigned char unregistering;   unsigned char needs_remote_wakeup;   unsigned char needs_altsetting0;   unsigned char needs_binding;   unsigned char resetting_device;   struct device dev;   struct device *usb_dev;   atomic_t pm_usage_cnt;   struct work_struct reset_ws; } ;   201     struct usb_interface_cache {   unsigned int num_altsetting;   struct kref ref;   struct usb_host_interface altsetting[0U]; } ;   256     struct usb_host_config {   struct usb_config_descriptor desc;   char *string;   struct usb_interface_assoc_descriptor *intf_assoc[16U];   struct usb_interface *interface[32U];   struct usb_interface_cache *intf_cache[32U];   unsigned char *extra;   int extralen; } ;   320     struct usb_host_bos {   struct usb_bos_descriptor *desc;   struct usb_ext_cap_descriptor *ext_cap;   struct usb_ss_cap_descriptor *ss_cap;   struct usb_ss_container_id_descriptor *ss_id; } ;   332     struct usb_devmap {   unsigned long devicemap[2U]; } ;   344     struct mon_bus ;   344     struct usb_bus {   struct device *controller;   int busnum;   const char *bus_name;   u8 uses_dma;   u8 uses_pio_for_control;   u8 otg_port;   unsigned char is_b_host;   unsigned char b_hnp_enable;   unsigned char no_stop_on_short;   unsigned char no_sg_constraint;   unsigned int sg_tablesize;   int devnum_next;   struct usb_devmap devmap;   struct usb_device *root_hub;   struct usb_bus *hs_companion;   struct list_head bus_list;   struct mutex usb_address0_mutex;   int bandwidth_allocated;   int bandwidth_int_reqs;   int bandwidth_isoc_reqs;   unsigned int resuming_ports;   struct mon_bus *mon_bus;   int monitored; } ;   395     struct usb_tt ;   396     enum usb_device_removable {   USB_DEVICE_REMOVABLE_UNKNOWN = 0,   USB_DEVICE_REMOVABLE = 1,   USB_DEVICE_FIXED = 2 } ;   409     struct usb2_lpm_parameters {   unsigned int besl;   int timeout; } ;   430     struct usb3_lpm_parameters {   unsigned int mel;   unsigned int pel;   unsigned int sel;   int timeout; } ;   469     struct usb_device {   int devnum;   char devpath[16U];   u32 route;   enum usb_device_state state;   enum usb_device_speed speed;   struct usb_tt *tt;   int ttport;   unsigned int toggle[2U];   struct usb_device *parent;   struct usb_bus *bus;   struct usb_host_endpoint ep0;   struct device dev;   struct usb_device_descriptor descriptor;   struct usb_host_bos *bos;   struct usb_host_config *config;   struct usb_host_config *actconfig;   struct usb_host_endpoint *ep_in[16U];   struct usb_host_endpoint *ep_out[16U];   char **rawdescriptors;   unsigned short bus_mA;   u8 portnum;   u8 level;   unsigned char can_submit;   unsigned char persist_enabled;   unsigned char have_langid;   unsigned char authorized;   unsigned char authenticated;   unsigned char wusb;   unsigned char lpm_capable;   unsigned char usb2_hw_lpm_capable;   unsigned char usb2_hw_lpm_besl_capable;   unsigned char usb2_hw_lpm_enabled;   unsigned char usb2_hw_lpm_allowed;   unsigned char usb3_lpm_enabled;   int string_langid;   char *product;   char *manufacturer;   char *serial;   struct list_head filelist;   int maxchild;   u32 quirks;   atomic_t urbnum;   unsigned long active_duration;   unsigned long connect_time;   unsigned char do_remote_wakeup;   unsigned char reset_resume;   unsigned char port_is_suspended;   struct wusb_dev *wusb_dev;   int slot_id;   enum usb_device_removable removable;   struct usb2_lpm_parameters l1_params;   struct usb3_lpm_parameters u1_params;   struct usb3_lpm_parameters u2_params;   unsigned int lpm_disable_count; } ;   819     struct usb_dynids {   spinlock_t lock;   struct list_head list; } ;  1007     struct usbdrv_wrap {   struct device_driver driver;   int for_devices; } ;  1017     struct usb_driver {   const char *name;   int (*probe)(struct usb_interface *, const struct usb_device_id *);   void (*disconnect)(struct usb_interface *);   int (*unlocked_ioctl)(struct usb_interface *, unsigned int, void *);   int (*suspend)(struct usb_interface *, pm_message_t );   int (*resume)(struct usb_interface *);   int (*reset_resume)(struct usb_interface *);   int (*pre_reset)(struct usb_interface *);   int (*post_reset)(struct usb_interface *);   const struct usb_device_id *id_table;   struct usb_dynids dynids;   struct usbdrv_wrap drvwrap;   unsigned char no_dynamic_id;   unsigned char supports_autosuspend;   unsigned char disable_hub_initiated_lpm;   unsigned char soft_unbind; } ;  1145     struct usb_class_driver {   char *name;   char * (*devnode)(struct device *, umode_t *);   const struct file_operations *fops;   int minor_base; } ;  1200     struct usb_iso_packet_descriptor {   unsigned int offset;   unsigned int length;   unsigned int actual_length;   int status; } ;  1242     struct urb ;  1243     struct usb_anchor {   struct list_head urb_list;   wait_queue_head_t wait;   spinlock_t lock;   atomic_t suspend_wakeups;   unsigned char poisoned; } ;  1262     struct scatterlist ;  1262     struct urb {   struct kref kref;   void *hcpriv;   atomic_t use_count;   atomic_t reject;   int unlinked;   struct list_head urb_list;   struct list_head anchor_list;   struct usb_anchor *anchor;   struct usb_device *dev;   struct usb_host_endpoint *ep;   unsigned int pipe;   unsigned int stream_id;   int status;   unsigned int transfer_flags;   void *transfer_buffer;   dma_addr_t transfer_dma;   struct scatterlist *sg;   int num_mapped_sgs;   int num_sgs;   u32 transfer_buffer_length;   u32 actual_length;   unsigned char *setup_packet;   dma_addr_t setup_dma;   int start_frame;   int number_of_packets;   int interval;   int error_count;   void *context;   void (*complete)(struct urb *);   struct usb_iso_packet_descriptor iso_frame_desc[0U]; } ;  1894     struct pollfd {   int fd;   short events;   short revents; } ;    32     struct poll_table_struct {   void (*_qproc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);   unsigned long _key; } ;   210     struct vm_fault {   unsigned int flags;   unsigned long pgoff;   void *virtual_address;   struct page *cow_page;   struct page *page;   unsigned long max_pgoff;   pte_t *pte; } ;   242     struct vm_operations_struct {   void (*open)(struct vm_area_struct *);   void (*close)(struct vm_area_struct *);   int (*mremap)(struct vm_area_struct *);   int (*fault)(struct vm_area_struct *, struct vm_fault *);   int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int);   void (*map_pages)(struct vm_area_struct *, struct vm_fault *);   int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*access)(struct vm_area_struct *, unsigned long, void *, int, int);   const char * (*name)(struct vm_area_struct *);   int (*set_policy)(struct vm_area_struct *, struct mempolicy *);   struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long);   struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;  2316     struct scatterlist {   unsigned long sg_magic;   unsigned long page_link;   unsigned int offset;   unsigned int length;   dma_addr_t dma_address;   unsigned int dma_length; } ;   381     struct __kfifo {   unsigned int in;   unsigned int out;   unsigned int mask;   unsigned int esize;   void *data; } ;    65     union __anonunion____missing_field_name_281 {   struct __kfifo kfifo;   unsigned char *type;   const unsigned char *const_type;   char (*rectype)[0U];   void *ptr;   const void *ptr_const; } ;    65     struct kfifo {   union __anonunion____missing_field_name_281 __annonCompField75;   unsigned char buf[0U]; } ;   832     struct lirc_buffer {   wait_queue_head_t wait_poll;   spinlock_t fifo_lock;   unsigned int chunk_size;   unsigned int size;   struct kfifo fifo; } ;   120     struct rc_dev ;   120     struct lirc_driver {   char name[40U];   int minor;   __u32 code_length;   unsigned int buffer_size;   int sample_rate;   __u32 features;   unsigned int chunk_size;   void *data;   int min_timeout;   int max_timeout;   int (*add_to_buf)(void *, struct lirc_buffer *);   struct lirc_buffer *rbuf;   int (*set_use_inc)(void *);   void (*set_use_dec)(void *);   struct rc_dev *rdev;   const struct file_operations *fops;   struct device *dev;   struct module *owner; } ;    74     struct rx_data {   int count;   int prev_bit;   int initial_space; } ;   103     struct tx_t {   unsigned char data_buf[35U];   struct completion finished;   atomic_t busy;   int status; } ;   110     struct imon_context {   struct usb_device *usbdev;   int display;   int display_isopen;   int ir_isopen;   int dev_present;   struct mutex ctx_lock;   wait_queue_head_t remove_ok;   int vfd_proto_6p;   struct lirc_driver *driver;   struct usb_endpoint_descriptor *rx_endpoint;   struct usb_endpoint_descriptor *tx_endpoint;   struct urb *rx_urb;   struct urb *tx_urb;   unsigned char usb_rx_buf[8U];   unsigned char usb_tx_buf[8U];   struct rx_data rx;   struct tx_t tx; } ;   135     typedef int ldv_func_ret_type;     1     long int __builtin_expect(long exp, long c);    33     extern struct module __this_module;   142     int printk(const char *, ...);    53     void __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...);     3     bool  ldv_is_err(const void *ptr);     6     long int ldv_ptr_err(const void *ptr);    12     void * memdup_user(const void *, size_t );    30     void * __memcpy(void *, const void *, size_t );    65     char * strcpy(char *, const char *);    66     void warn_slowpath_fmt(const char *, const int, const char *, ...);    32     long int PTR_ERR(const void *ptr);    41     bool  IS_ERR(const void *ptr);    25     int atomic_read(const atomic_t *v);    37     void atomic_set(atomic_t *v, int i);   119     void __mutex_init(struct mutex *, const char *, struct lock_class_key *);   173     int mutex_trylock(struct mutex *);   176     int ldv_mutex_trylock_8(struct mutex *ldv_func_arg1);   178     void mutex_unlock(struct mutex *);   181     void ldv_mutex_unlock_6(struct mutex *ldv_func_arg1);   185     void ldv_mutex_unlock_9(struct mutex *ldv_func_arg1);   189     void ldv_mutex_unlock_10(struct mutex *ldv_func_arg1);   193     void ldv_mutex_unlock_14(struct mutex *ldv_func_arg1);   197     void ldv_mutex_unlock_15(struct mutex *ldv_func_arg1);   201     void ldv_mutex_unlock_17(struct mutex *ldv_func_arg1);   205     void ldv_mutex_unlock_18(struct mutex *ldv_func_arg1);   209     void ldv_mutex_unlock_19(struct mutex *ldv_func_arg1);   213     void ldv_mutex_unlock_22(struct mutex *ldv_func_arg1);   217     void ldv_mutex_unlock_24(struct mutex *ldv_func_arg1);   221     void ldv_mutex_unlock_26(struct mutex *ldv_func_arg1);   225     void ldv_mutex_unlock_27(struct mutex *ldv_func_arg1);   229     void ldv_mutex_unlock_30(struct mutex *ldv_func_arg1);   233     void ldv_mutex_unlock_33(struct mutex *ldv_func_arg1);   237     void ldv_mutex_unlock_34(struct mutex *ldv_func_arg1);   241     void ldv_mutex_unlock_35(struct mutex *ldv_func_arg1);    18     void mutex_lock(struct mutex *);    21     void ldv_mutex_lock_5(struct mutex *ldv_func_arg1);    25     void ldv_mutex_lock_7(struct mutex *ldv_func_arg1);    29     void ldv_mutex_lock_11(struct mutex *ldv_func_arg1);    33     void ldv_mutex_lock_12(struct mutex *ldv_func_arg1);    37     void ldv_mutex_lock_13(struct mutex *ldv_func_arg1);    41     void ldv_mutex_lock_16(struct mutex *ldv_func_arg1);    45     void ldv_mutex_lock_20(struct mutex *ldv_func_arg1);    49     void ldv_mutex_lock_21(struct mutex *ldv_func_arg1);    53     void ldv_mutex_lock_23(struct mutex *ldv_func_arg1);    57     void ldv_mutex_lock_25(struct mutex *ldv_func_arg1);    61     void ldv_mutex_lock_28(struct mutex *ldv_func_arg1);    65     void ldv_mutex_lock_29(struct mutex *ldv_func_arg1);    69     void ldv_mutex_lock_31(struct mutex *ldv_func_arg1);    73     void ldv_mutex_lock_32(struct mutex *ldv_func_arg1);    78     void ldv_mutex_lock_ctx_lock_of_imon_context(struct mutex *lock);    82     void ldv_mutex_unlock_ctx_lock_of_imon_context(struct mutex *lock);    91     void ldv_mutex_lock_driver_lock(struct mutex *lock);    95     void ldv_mutex_unlock_driver_lock(struct mutex *lock);   104     void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock);   108     void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock);   117     void ldv_mutex_lock_lock(struct mutex *lock);   121     void ldv_mutex_unlock_lock(struct mutex *lock);   130     void ldv_mutex_lock_mutex_of_device(struct mutex *lock);   131     int ldv_mutex_trylock_mutex_of_device(struct mutex *lock);   134     void ldv_mutex_unlock_mutex_of_device(struct mutex *lock);    93     void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);    34     unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *);    45     void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);   289     raw_spinlock_t * spinlock_check(spinlock_t *lock);   360     void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);    72     void __init_waitqueue_head(wait_queue_head_t *, const char *, struct lock_class_key *);   149     void __wake_up(wait_queue_head_t *, unsigned int, int, void *);    73     void init_completion(struct completion *x);    93     int wait_for_completion_interruptible(struct completion *);   106     void complete(struct completion *);   107     void complete_all(struct completion *);   143     void kfree(const void *);   289     void * __kmalloc(size_t , gfp_t );   428     void * kmalloc(size_t size, gfp_t flags);   591     void * kzalloc(size_t size, gfp_t flags);   437     int usb_endpoint_type(const struct usb_endpoint_descriptor *epd);   888     void * dev_get_drvdata(const struct device *dev);   893     void dev_set_drvdata(struct device *dev, void *data);  1109     void dev_err(const struct device *, const char *, ...);  1111     void dev_warn(const struct device *, const char *, ...);  1115     void _dev_info(const struct device *, const char *, ...);   796     unsigned int iminor(const struct inode *inode);  2669     loff_t  noop_llseek(struct file *, loff_t , int);   189     void * usb_get_intfdata(struct usb_interface *intf);   194     void usb_set_intfdata(struct usb_interface *intf, void *data);   616     struct usb_device * interface_to_usbdev(struct usb_interface *intf);   621     struct usb_device * usb_get_dev(struct usb_device *);   764     const struct usb_device_id * usb_match_id(struct usb_interface *, const struct usb_device_id *);   770     struct usb_interface * usb_find_interface(struct usb_driver *, int);  1194     int usb_register_dev(struct usb_interface *, struct usb_class_driver *);  1196     void usb_deregister_dev(struct usb_interface *, struct usb_class_driver *);  1573     void usb_fill_int_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe, void *transfer_buffer, int buffer_length, void (*complete_fn)(struct urb *), void *context, int interval);  1602     struct urb * usb_alloc_urb(int, gfp_t );  1603     void usb_free_urb(struct urb *);  1606     int usb_submit_urb(struct urb *, gfp_t );  1608     void usb_kill_urb(struct urb *);  1805     unsigned int __create_pipe(struct usb_device *dev, unsigned int endpoint);   182     int __kfifo_int_must_check_helper(int val);   771     int __kfifo_alloc(struct __kfifo *, unsigned int, size_t , gfp_t );   774     void __kfifo_free(struct __kfifo *);   779     unsigned int __kfifo_in(struct __kfifo *, const void *, unsigned int);   800     unsigned int __kfifo_in_r(struct __kfifo *, const void *, unsigned int, size_t );    47     int lirc_buffer_init(struct lirc_buffer *buf, unsigned int chunk_size, unsigned int size);    62     void lirc_buffer_free(struct lirc_buffer *buf);   110     unsigned int lirc_buffer_write(struct lirc_buffer *buf, unsigned char *orig);   199     int lirc_register_driver(struct lirc_driver *);   203     int lirc_unregister_driver(int);    53     int imon_probe(struct usb_interface *interface, const struct usb_device_id *id);    55     void imon_disconnect(struct usb_interface *interface);    56     void usb_rx_callback(struct urb *urb);    57     void usb_tx_callback(struct urb *urb);    60     int imon_resume(struct usb_interface *intf);    61     int imon_suspend(struct usb_interface *intf, pm_message_t message);    64     int display_open(struct inode *inode, struct file *file);    65     int display_close(struct inode *inode, struct file *file);    68     ssize_t  vfd_write(struct file *file, const char *buf, size_t n_bytes, loff_t *pos);    72     int ir_open(void *data);    73     void ir_close(void *data);   112     const struct file_operations display_fops = { &__this_module, &noop_llseek, 0, &vfd_write, 0, 0, 0, 0, 0, 0, 0, &display_open, 0, &display_close, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };   130     struct usb_device_id imon_usb_id_table[5U] = { { 3U, 2728U, 32769U, (unsigned short)0, (unsigned short)0, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0UL }, { 3U, 1256U, 65328U, (unsigned short)0, (unsigned short)0, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0UL }, { 3U, 2728U, 65498U, (unsigned short)0, (unsigned short)0, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0UL }, { 3U, 5570U, 65498U, (unsigned short)0, (unsigned short)0, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0UL } };   147     struct usb_device_id vfd_proto_6p_list[2U] = { { 3U, 5570U, 65498U, (unsigned short)0, (unsigned short)0, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0UL } };   153     struct usb_device_id ir_only_list[3U] = { { 3U, 2728U, 32769U, (unsigned short)0, (unsigned short)0, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0UL }, { 3U, 1256U, 65328U, (unsigned short)0, (unsigned short)0, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0UL } };   160     struct usb_driver imon_driver = { "lirc_imon", &imon_probe, &imon_disconnect, 0, &imon_suspend, &imon_resume, 0, 0, 0, (const struct usb_device_id *)(&imon_usb_id_table), { { { { { { 0 } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 } }, { { 0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 0 }, 0U, 0U, 0U, 0U };   169     struct usb_class_driver imon_class = { (char *)"lcd%d", 0, &display_fops, 144 };   176     struct mutex driver_lock = { { 1 }, { { { { { 0 } }, 3735899821U, 4294967295U, (void *)-1, { 0, { 0, 0 }, "driver_lock.wait_lock", 0, 0UL } } } }, { &(driver_lock.wait_list), &(driver_lock.wait_list) }, 0, (void *)(&driver_lock), { 0, { 0, 0 }, "driver_lock", 0, 0UL } };   178     int debug = 0;   186     const struct usb_device_id __mod_usb__imon_usb_id_table_device_table[5U] = {  };   190     void free_imon_context(struct imon_context *context);   204     void deregister_from_lirc(struct imon_context *context);   322     int send_packet(struct imon_context *context);   553     void submit_data(struct imon_context *context);   576     void imon_incoming_packet(struct imon_context *context, struct urb *urb, int intf);  1002     void ldv_check_final_state();  1005     void ldv_check_return_value(int);  1008     void ldv_check_return_value_probe(int);  1011     void ldv_initialize();  1014     void ldv_handler_precall();  1017     int nondet_int();  1020     int LDV_IN_INTERRUPT = 0;  1023     void ldv_main0_sequence_infinite_withcheck_stateful();    10     void ldv_error();    20     void ldv_stop();    25     int ldv_undef_int();    30     int ldv_undef_int_negative();    14     void * ldv_err_ptr(long error);    28     bool  ldv_is_err_or_null(const void *ptr);     8     int ldv_mutex_ctx_lock_of_imon_context = 1;    11     int ldv_mutex_lock_interruptible_ctx_lock_of_imon_context(struct mutex *lock);    37     int ldv_mutex_lock_killable_ctx_lock_of_imon_context(struct mutex *lock);    72     int ldv_mutex_trylock_ctx_lock_of_imon_context(struct mutex *lock);    98     int ldv_atomic_dec_and_mutex_lock_ctx_lock_of_imon_context(atomic_t *cnt, struct mutex *lock);   123     int ldv_mutex_is_locked_ctx_lock_of_imon_context(struct mutex *lock);   163     void ldv_usb_lock_device_ctx_lock_of_imon_context();   170     int ldv_usb_trylock_device_ctx_lock_of_imon_context();   176     int ldv_usb_lock_device_for_reset_ctx_lock_of_imon_context();   189     void ldv_usb_unlock_device_ctx_lock_of_imon_context();   194     int ldv_mutex_driver_lock = 1;   197     int ldv_mutex_lock_interruptible_driver_lock(struct mutex *lock);   223     int ldv_mutex_lock_killable_driver_lock(struct mutex *lock);   258     int ldv_mutex_trylock_driver_lock(struct mutex *lock);   284     int ldv_atomic_dec_and_mutex_lock_driver_lock(atomic_t *cnt, struct mutex *lock);   309     int ldv_mutex_is_locked_driver_lock(struct mutex *lock);   349     void ldv_usb_lock_device_driver_lock();   356     int ldv_usb_trylock_device_driver_lock();   362     int ldv_usb_lock_device_for_reset_driver_lock();   375     void ldv_usb_unlock_device_driver_lock();   380     int ldv_mutex_i_mutex_of_inode = 1;   383     int ldv_mutex_lock_interruptible_i_mutex_of_inode(struct mutex *lock);   409     int ldv_mutex_lock_killable_i_mutex_of_inode(struct mutex *lock);   444     int ldv_mutex_trylock_i_mutex_of_inode(struct mutex *lock);   470     int ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode(atomic_t *cnt, struct mutex *lock);   495     int ldv_mutex_is_locked_i_mutex_of_inode(struct mutex *lock);   535     void ldv_usb_lock_device_i_mutex_of_inode();   542     int ldv_usb_trylock_device_i_mutex_of_inode();   548     int ldv_usb_lock_device_for_reset_i_mutex_of_inode();   561     void ldv_usb_unlock_device_i_mutex_of_inode();   566     int ldv_mutex_lock = 1;   569     int ldv_mutex_lock_interruptible_lock(struct mutex *lock);   595     int ldv_mutex_lock_killable_lock(struct mutex *lock);   630     int ldv_mutex_trylock_lock(struct mutex *lock);   656     int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt, struct mutex *lock);   681     int ldv_mutex_is_locked_lock(struct mutex *lock);   721     void ldv_usb_lock_device_lock();   728     int ldv_usb_trylock_device_lock();   734     int ldv_usb_lock_device_for_reset_lock();   747     void ldv_usb_unlock_device_lock();   752     int ldv_mutex_mutex_of_device = 1;   755     int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock);   781     int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock);   842     int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt, struct mutex *lock);   867     int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock);   907     void ldv_usb_lock_device_mutex_of_device();   914     int ldv_usb_trylock_device_mutex_of_device();   920     int ldv_usb_lock_device_for_reset_mutex_of_device();   933     void ldv_usb_unlock_device_mutex_of_device();           return ;         }        {      1025     struct usb_interface *var_group1;  1026     const struct usb_device_id *var_imon_probe_12_p1;  1027     int res_imon_probe_12;  1028     struct pm_message var_imon_suspend_14_p1;  1029     int ldv_s_imon_driver_usb_driver;  1030     int tmp;  1031     int tmp___0;  1111     ldv_s_imon_driver_usb_driver = 0;  1101     LDV_IN_INTERRUPT = 1;  1110     ldv_initialize() { /* Function call is skipped due to function is undefined */}  1114     goto ldv_32240;  1114     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}  1117     goto ldv_32239;  1115     ldv_32239:;  1118     tmp = nondet_int() { /* Function call is skipped due to function is undefined */}  1118     switch (tmp)          {   684       struct usb_device *usbdev;   685       struct usb_host_interface *iface_desc;   686       struct usb_endpoint_descriptor *rx_endpoint;   687       struct usb_endpoint_descriptor *tx_endpoint;   688       struct urb *rx_urb;   689       struct urb *tx_urb;   690       struct lirc_driver *driver;   691       struct lirc_buffer *rbuf;   692       struct device *dev;   693       int ifnum;   694       int lirc_minor;   695       int num_endpts;   696       int retval;   697       int display_ep_found;   698       int ir_ep_found;   699       int vfd_proto_6p;   700       struct imon_context *context;   701       int i;   702       unsigned short vendor;   703       unsigned short product;   704       void *tmp;   705       const struct usb_device_id *tmp___0;   706       struct usb_device *tmp___1;   707       struct _ddebug descriptor;   708       long tmp___2;   709       struct usb_endpoint_descriptor *ep;   710       int ep_dir;   711       int ep_type;   712       struct _ddebug descriptor___0;   713       long tmp___3;   714       struct _ddebug descriptor___1;   715       long tmp___4;   716       struct _ddebug descriptor___2;   717       long tmp___5;   718       const struct usb_device_id *tmp___6;   719       struct _ddebug descriptor___3;   720       long tmp___7;   721       void *tmp___8;   722       void *tmp___9;   723       int tmp___10;   724       struct lock_class_key __key;   725       unsigned int tmp___11;   726       struct _ddebug descriptor___4;   727       long tmp___12;   728       int tmp___13;   685       usbdev = (struct usb_device *)0;   686       iface_desc = (struct usb_host_interface *)0;   687       rx_endpoint = (struct usb_endpoint_descriptor *)0;   688       tx_endpoint = (struct usb_endpoint_descriptor *)0;   689       rx_urb = (struct urb *)0;   690       tx_urb = (struct urb *)0;   691       driver = (struct lirc_driver *)0;   692       rbuf = (struct lirc_buffer *)0;   693       dev = &(interface->dev);   695       lirc_minor = 0;   697       retval = -12;   698       display_ep_found = 0;   699       ir_ep_found = 0;   700       vfd_proto_6p = 0;   701       context = (struct imon_context *)0;             {   593         void *tmp;               {             }  430           void *tmp___2;   445           tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}               }  708       context = (struct imon_context *)tmp;   716       tmp___0 = usb_match_id(interface, (const struct usb_device_id *)(&ir_only_list)) { /* Function call is skipped due to function is undefined */}   719       context->display = 1;   721       usbdev = usb_get_dev(tmp___1) { /* Function call is skipped due to function is undefined */}   722       iface_desc = interface->cur_altsetting;   723       num_endpts = (int)(iface_desc->desc.bNumEndpoints);   724       ifnum = (int)(iface_desc->desc.bInterfaceNumber);   725       vendor = usbdev->descriptor.idVendor;   726       product = usbdev->descriptor.idProduct;   728       descriptor.modname = "lirc_imon";   728       descriptor.function = "imon_probe";   728       descriptor.filename = "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.3-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.3-rc1.tar.xz/csd_deg_dscv/7460/dscv_tempdir/dscv/ri/32_7a/drivers/staging/media/lirc/lirc_imon.c";   728       descriptor.format = "%s: found iMON device (%04x:%04x, intf%d)\n";   728       descriptor.lineno = 729U;   728       descriptor.flags = 0U;   736       i = 0;   736       goto ldv_32167;   738       goto ldv_32166;   737       ldv_32166:;   741       ep = &(((iface_desc->endpoint) + ((unsigned long)i))->desc);   742       int __CPAchecker_TMP_0 = (int)(ep->bEndpointAddress);   742       ep_dir = __CPAchecker_TMP_0 & 128;   749       rx_endpoint = ep;   750       ir_ep_found = 1;   751       descriptor___0.modname = "lirc_imon";   751       descriptor___0.function = "imon_probe";   751       descriptor___0.filename = "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.3-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.3-rc1.tar.xz/csd_deg_dscv/7460/dscv_tempdir/dscv/ri/32_7a/drivers/staging/media/lirc/lirc_imon.c";   751       descriptor___0.format = "%s: found IR endpoint\n";   751       descriptor___0.lineno = 751U;   751       descriptor___0.flags = 0U;   736       i = i + 1;   737       ldv_32167:;             {   593         void *tmp;               {             }  430           void *tmp___2;   445           tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}               }  788       driver = (struct lirc_driver *)tmp___8;             {   430         void *tmp___2;   445         tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}             }  792       rbuf = (struct lirc_buffer *)tmp___9;             {    50         int ret;    51         struct lock_class_key __key;    52         struct lock_class_key __key___0;    53         struct kfifo *__tmp;    54         struct __kfifo *__kfifo;    55         int tmp;    53         __init_waitqueue_head(&(buf->wait_poll), "&buf->wait_poll", &__key) { /* Function call is skipped due to function is undefined */}    54         __raw_spin_lock_init(&(buf->fifo_lock.__annonCompField16.rlock), "&(&buf->fifo_lock)->rlock", &__key___0) { /* Function call is skipped due to function is undefined */}    55         buf->chunk_size = chunk_size;    56         buf->size = size;    57         __tmp = &(buf->fifo);    57         __kfifo = &(__tmp->__annonCompField75.kfifo);    57         tmp = __kfifo_alloc(__kfifo, size * chunk_size, 1UL, 208U) { /* Function call is skipped due to function is undefined */}             }  800       rx_urb = usb_alloc_urb(0, 208U) { /* Function call is skipped due to function is undefined */}   805       tx_urb = usb_alloc_urb(0, 208U) { /* Function call is skipped due to function is undefined */}   812       __mutex_init(&(context->ctx_lock), "&context->ctx_lock", &__key) { /* Function call is skipped due to function is undefined */}   813       context->vfd_proto_6p = vfd_proto_6p;   815       strcpy((char *)(&(driver->name)), "lirc_imon") { /* Function call is skipped due to function is undefined */}   816       driver->minor = -1;   817       driver->code_length = 32U;   818       driver->sample_rate = 0;   819       driver->features = 262144U;   820       driver->data = (void *)context;   821       driver->rbuf = rbuf;   822       driver->set_use_inc = &ir_open;   823       driver->set_use_dec = &ir_close;   824       driver->dev = &(interface->dev);   825       driver->owner = &__this_module;             {   309         mutex_lock(ldv_func_arg1) { /* Function call is skipped due to function is undefined */}             }  829       context->driver = driver;   832       lirc_minor = lirc_register_driver(driver) { /* Function call is skipped due to function is undefined */}   838       _dev_info((const struct device *)dev, "Registered iMON driver (lirc minor: %d)\n", lirc_minor) { /* Function call is skipped due to function is undefined */}   842       driver->minor = lirc_minor;   844       context->usbdev = usbdev;   845       context->dev_present = 1;   846       context->rx_endpoint = rx_endpoint;   847       context->rx_urb = rx_urb;   853       context->tx_endpoint = tx_endpoint;   854       context->tx_urb = tx_urb;   859       unsigned int __CPAchecker_TMP_1 = (unsigned int)(context->rx_endpoint->bEndpointAddress);   859       int __CPAchecker_TMP_2 = (int)(context->rx_endpoint->bInterval);   859       -usb_fill_int_urb(context->rx_urb, context->usbdev, tmp___11 | 1073741952U, (void *)(&(context->usb_rx_buf)), 8, &usb_rx_callback, (void *)context, __CPAchecker_TMP_2)             {  1578         int _min1;  1579         int _max1;  1580         int _max2;  1581         int _min2;  1582         urb->dev = dev;  1583         urb->pipe = pipe;  1584         urb->transfer_buffer = transfer_buffer;  1585         urb->transfer_buffer_length = (u32 )buffer_length;  1586         urb->complete = complete_fn;  1587         urb->context = context;  1589         unsigned int __CPAchecker_TMP_0 = (unsigned int)(dev->speed);  1589         unsigned int __CPAchecker_TMP_1 = (unsigned int)(dev->speed);  1595         urb->interval = interval;  1598         urb->start_frame = -1;             }  866       retval = usb_submit_urb(context->rx_urb, 208U) { /* Function call is skipped due to function is undefined */}             { }   885       _dev_info((const struct device *)dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n", (int)vendor, (int)product, ifnum, usbdev->bus->busnum, usbdev->devnum) { /* Function call is skipped due to function is undefined */}   889       goto driver_unlock;           } 1141     ldv_check_return_value(res_imon_probe_12) { /* Function call is skipped due to function is undefined */}  1142     ldv_check_return_value_probe(res_imon_probe_12) { /* Function call is skipped due to function is undefined */}  1145     ldv_s_imon_driver_usb_driver = ldv_s_imon_driver_usb_driver + 1;  1151     goto ldv_32234;  1241     ldv_32234:;  1242     ldv_32240:;  1114     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}  1117     goto ldv_32239;  1115     ldv_32239:;  1118     tmp = nondet_int() { /* Function call is skipped due to function is undefined */}  1118     switch (tmp) 1172     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}           {   963       struct imon_context *context;   964       void *tmp;   963       context = (struct imon_context *)tmp;   965       usb_kill_urb(context->rx_urb) { /* Function call is skipped due to function is undefined */}           } 1174     ldv_s_imon_driver_usb_driver = ldv_s_imon_driver_usb_driver + 1;  1180     goto ldv_32234;  1241     ldv_32234:;  1242     ldv_32240:;  1114     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}  1117     goto ldv_32239;  1115     ldv_32239:;  1118     tmp = nondet_int() { /* Function call is skipped due to function is undefined */}  1118     switch (tmp) 1201     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}           {   972       struct imon_context *context;   973       void *tmp;   974       unsigned int tmp___0;   975       int tmp___1;   972       context = (struct imon_context *)tmp;   974       unsigned int __CPAchecker_TMP_0 = (unsigned int)(context->rx_endpoint->bEndpointAddress);   974       int __CPAchecker_TMP_1 = (int)(context->rx_endpoint->bInterval);   974       -usb_fill_int_urb(context->rx_urb, context->usbdev, tmp___0 | 1073741952U, (void *)(&(context->usb_rx_buf)), 8, &usb_rx_callback, (void *)context, __CPAchecker_TMP_1)             {  1578         int _min1;  1579         int _max1;  1580         int _max2;  1581         int _min2;  1582         urb->dev = dev;  1583         urb->pipe = pipe;  1584         urb->transfer_buffer = transfer_buffer;  1585         urb->transfer_buffer_length = (u32 )buffer_length;  1586         urb->complete = complete_fn;  1587         urb->context = context;  1589         unsigned int __CPAchecker_TMP_0 = (unsigned int)(dev->speed);  1589         unsigned int __CPAchecker_TMP_1 = (unsigned int)(dev->speed);  1595         urb->interval = interval;  1598         urb->start_frame = -1;             }  981       tmp___1 = usb_submit_urb(context->rx_urb, 32U) { /* Function call is skipped due to function is undefined */}           } 1203     ldv_s_imon_driver_usb_driver = ldv_s_imon_driver_usb_driver + 1;  1209     goto ldv_32234;  1241     ldv_32234:;  1242     ldv_32240:;  1114     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}  1117     goto ldv_32239;  1115     ldv_32239:;  1118     tmp = nondet_int() { /* Function call is skipped due to function is undefined */}  1118     switch (tmp) 1230     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}           {         }  923       struct imon_context *context;   924       int ifnum;   925       void *tmp;   926       int tmp___0;   929       context = (struct imon_context *)tmp;   930       ifnum = (int)(interface->cur_altsetting->desc.bInterfaceNumber);             {           }}  |              Source code         
     1 
    2 /*
    3  *   lirc_imon.c:  LIRC/VFD/LCD driver for SoundGraph iMON IR/VFD/LCD
    4  *		   including the iMON PAD model
    5  *
    6  *   Copyright(C) 2004  Venky Raju(dev@venky.ws)
    7  *   Copyright(C) 2009  Jarod Wilson <jarod@wilsonet.com>
    8  *
    9  *   lirc_imon is free software; you can redistribute it and/or modify
   10  *   it under the terms of the GNU General Public License as published by
   11  *   the Free Software Foundation; either version 2 of the License, or
   12  *   (at your option) any later version.
   13  *
   14  *   This program is distributed in the hope that it will be useful,
   15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
   16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   17  *   GNU General Public License for more details.
   18  *
   19  *   You should have received a copy of the GNU General Public License
   20  *   along with this program; if not, write to the Free Software
   21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
   22  */
   23 
   24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   25 
   26 #include <linux/errno.h>
   27 #include <linux/kernel.h>
   28 #include <linux/module.h>
   29 #include <linux/slab.h>
   30 #include <linux/uaccess.h>
   31 #include <linux/usb.h>
   32 
   33 #include <media/lirc.h>
   34 #include <media/lirc_dev.h>
   35 
   36 
   37 #define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
   38 #define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
   39 #define MOD_NAME	"lirc_imon"
   40 #define MOD_VERSION	"0.8"
   41 
   42 #define DISPLAY_MINOR_BASE	144
   43 #define DEVICE_NAME	"lcd%d"
   44 
   45 #define BUF_CHUNK_SIZE	4
   46 #define BUF_SIZE	128
   47 
   48 #define BIT_DURATION	250	/* each bit received is 250us */
   49 
   50 /*** P R O T O T Y P E S ***/
   51 
   52 /* USB Callback prototypes */
   53 static int imon_probe(struct usb_interface *interface,
   54 		      const struct usb_device_id *id);
   55 static void imon_disconnect(struct usb_interface *interface);
   56 static void usb_rx_callback(struct urb *urb);
   57 static void usb_tx_callback(struct urb *urb);
   58 
   59 /* suspend/resume support */
   60 static int imon_resume(struct usb_interface *intf);
   61 static int imon_suspend(struct usb_interface *intf, pm_message_t message);
   62 
   63 /* Display file_operations function prototypes */
   64 static int display_open(struct inode *inode, struct file *file);
   65 static int display_close(struct inode *inode, struct file *file);
   66 
   67 /* VFD write operation */
   68 static ssize_t vfd_write(struct file *file, const char __user *buf,
   69 			 size_t n_bytes, loff_t *pos);
   70 
   71 /* LIRC driver function prototypes */
   72 static int ir_open(void *data);
   73 static void ir_close(void *data);
   74 
   75 /*** G L O B A L S ***/
   76 #define IMON_DATA_BUF_SZ	35
   77 
   78 struct imon_context {
   79 	struct usb_device *usbdev;
   80 	/* Newer devices have two interfaces */
   81 	int display;			/* not all controllers do */
   82 	int display_isopen;		/* display port has been opened */
   83 	int ir_isopen;			/* IR port open	*/
   84 	int dev_present;		/* USB device presence */
   85 	struct mutex ctx_lock;		/* to lock this object */
   86 	wait_queue_head_t remove_ok;	/* For unexpected USB disconnects */
   87 
   88 	int vfd_proto_6p;		/* some VFD require a 6th packet */
   89 
   90 	struct lirc_driver *driver;
   91 	struct usb_endpoint_descriptor *rx_endpoint;
   92 	struct usb_endpoint_descriptor *tx_endpoint;
   93 	struct urb *rx_urb;
   94 	struct urb *tx_urb;
   95 	unsigned char usb_rx_buf[8];
   96 	unsigned char usb_tx_buf[8];
   97 
   98 	struct rx_data {
   99 		int count;		/* length of 0 or 1 sequence */
  100 		int prev_bit;		/* logic level of sequence */
  101 		int initial_space;	/* initial space flag */
  102 	} rx;
  103 
  104 	struct tx_t {
  105 		unsigned char data_buf[IMON_DATA_BUF_SZ]; /* user data buffer */
  106 		struct completion finished;	/* wait for write to finish */
  107 		atomic_t busy;			/* write in progress */
  108 		int status;			/* status of tx completion */
  109 	} tx;
  110 };
  111 
  112 static const struct file_operations display_fops = {
  113 	.owner		= THIS_MODULE,
  114 	.open		= &display_open,
  115 	.write		= &vfd_write,
  116 	.release	= &display_close,
  117 	.llseek		= noop_llseek,
  118 };
  119 
  120 /*
  121  * USB Device ID for iMON USB Control Boards
  122  *
  123  * The Windows drivers contain 6 different inf files, more or less one for
  124  * each new device until the 0x0034-0x0046 devices, which all use the same
  125  * driver. Some of the devices in the 34-46 range haven't been definitively
  126  * identified yet. Early devices have either a TriGem Computer, Inc. or a
  127  * Samsung vendor ID (0x0aa8 and 0x04e8 respectively), while all later
  128  * devices use the SoundGraph vendor ID (0x15c2).
  129  */
  130 static struct usb_device_id imon_usb_id_table[] = {
  131 	/* TriGem iMON (IR only) -- TG_iMON.inf */
  132 	{ USB_DEVICE(0x0aa8, 0x8001) },
  133 
  134 	/* SoundGraph iMON (IR only) -- sg_imon.inf */
  135 	{ USB_DEVICE(0x04e8, 0xff30) },
  136 
  137 	/* SoundGraph iMON VFD (IR & VFD) -- iMON_VFD.inf */
  138 	{ USB_DEVICE(0x0aa8, 0xffda) },
  139 
  140 	/* SoundGraph iMON SS (IR & VFD) -- iMON_SS.inf */
  141 	{ USB_DEVICE(0x15c2, 0xffda) },
  142 
  143 	{}
  144 };
  145 
  146 /* Some iMON VFD models requires a 6th packet for VFD writes */
  147 static struct usb_device_id vfd_proto_6p_list[] = {
  148 	{ USB_DEVICE(0x15c2, 0xffda) },
  149 	{}
  150 };
  151 
  152 /* Some iMON devices have no lcd/vfd, don't set one up */
  153 static struct usb_device_id ir_only_list[] = {
  154 	{ USB_DEVICE(0x0aa8, 0x8001) },
  155 	{ USB_DEVICE(0x04e8, 0xff30) },
  156 	{}
  157 };
  158 
  159 /* USB Device data */
  160 static struct usb_driver imon_driver = {
  161 	.name		= MOD_NAME,
  162 	.probe		= imon_probe,
  163 	.disconnect	= imon_disconnect,
  164 	.suspend	= imon_suspend,
  165 	.resume		= imon_resume,
  166 	.id_table	= imon_usb_id_table,
  167 };
  168 
  169 static struct usb_class_driver imon_class = {
  170 	.name		= DEVICE_NAME,
  171 	.fops		= &display_fops,
  172 	.minor_base	= DISPLAY_MINOR_BASE,
  173 };
  174 
  175 /* to prevent races between open() and disconnect(), probing, etc */
  176 static DEFINE_MUTEX(driver_lock);
  177 
  178 static int debug;
  179 
  180 /***  M O D U L E   C O D E ***/
  181 
  182 MODULE_AUTHOR(MOD_AUTHOR);
  183 MODULE_DESCRIPTION(MOD_DESC);
  184 MODULE_VERSION(MOD_VERSION);
  185 MODULE_LICENSE("GPL");
  186 MODULE_DEVICE_TABLE(usb, imon_usb_id_table);
  187 module_param(debug, int, S_IRUGO | S_IWUSR);
  188 MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes(default: no)");
  189 
  190 static void free_imon_context(struct imon_context *context)
  191 {
  192 	struct device *dev = context->driver->dev;
  193 
  194 	usb_free_urb(context->tx_urb);
  195 	usb_free_urb(context->rx_urb);
  196 	lirc_buffer_free(context->driver->rbuf);
  197 	kfree(context->driver->rbuf);
  198 	kfree(context->driver);
  199 	kfree(context);
  200 
  201 	dev_dbg(dev, "%s: iMON context freed\n", __func__);
  202 }
  203 
  204 static void deregister_from_lirc(struct imon_context *context)
  205 {
  206 	int retval;
  207 	int minor = context->driver->minor;
  208 
  209 	retval = lirc_unregister_driver(minor);
  210 	if (retval)
  211 		dev_err(&context->usbdev->dev,
  212 			"unable to deregister from lirc(%d)", retval);
  213 	else
  214 		dev_info(&context->usbdev->dev,
  215 			 "Deregistered iMON driver (minor:%d)\n", minor);
  216 
  217 }
  218 
  219 /**
  220  * Called when the Display device (e.g. /dev/lcd0)
  221  * is opened by the application.
  222  */
  223 static int display_open(struct inode *inode, struct file *file)
  224 {
  225 	struct usb_interface *interface;
  226 	struct imon_context *context = NULL;
  227 	int subminor;
  228 	int retval = 0;
  229 
  230 	/* prevent races with disconnect */
  231 	mutex_lock(&driver_lock);
  232 
  233 	subminor = iminor(inode);
  234 	interface = usb_find_interface(&imon_driver, subminor);
  235 	if (!interface) {
  236 		pr_err("%s: could not find interface for minor %d\n",
  237 		       __func__, subminor);
  238 		retval = -ENODEV;
  239 		goto exit;
  240 	}
  241 	context = usb_get_intfdata(interface);
  242 
  243 	if (!context) {
  244 		dev_err(&interface->dev, "no context found for minor %d\n",
  245 			subminor);
  246 		retval = -ENODEV;
  247 		goto exit;
  248 	}
  249 
  250 	mutex_lock(&context->ctx_lock);
  251 
  252 	if (!context->display) {
  253 		dev_err(&interface->dev,
  254 			"%s: display not supported by device\n", __func__);
  255 		retval = -ENODEV;
  256 	} else if (context->display_isopen) {
  257 		dev_err(&interface->dev,
  258 			"%s: display port is already open\n", __func__);
  259 		retval = -EBUSY;
  260 	} else {
  261 		context->display_isopen = 1;
  262 		file->private_data = context;
  263 		dev_info(context->driver->dev, "display port opened\n");
  264 	}
  265 
  266 	mutex_unlock(&context->ctx_lock);
  267 
  268 exit:
  269 	mutex_unlock(&driver_lock);
  270 	return retval;
  271 }
  272 
  273 /**
  274  * Called when the display device (e.g. /dev/lcd0)
  275  * is closed by the application.
  276  */
  277 static int display_close(struct inode *inode, struct file *file)
  278 {
  279 	struct imon_context *context = NULL;
  280 	int retval = 0;
  281 
  282 	context = file->private_data;
  283 
  284 	if (!context) {
  285 		pr_err("%s: no context for device\n", __func__);
  286 		return -ENODEV;
  287 	}
  288 
  289 	mutex_lock(&context->ctx_lock);
  290 
  291 	if (!context->display) {
  292 		dev_err(&context->usbdev->dev,
  293 			"%s: display not supported by device\n", __func__);
  294 		retval = -ENODEV;
  295 	} else if (!context->display_isopen) {
  296 		dev_err(&context->usbdev->dev,
  297 			"%s: display is not open\n", __func__);
  298 		retval = -EIO;
  299 	} else {
  300 		context->display_isopen = 0;
  301 		dev_info(context->driver->dev, "display port closed\n");
  302 		if (!context->dev_present && !context->ir_isopen) {
  303 			/*
  304 			 * Device disconnected before close and IR port is not
  305 			 * open. If IR port is open, context will be deleted by
  306 			 * ir_close.
  307 			 */
  308 			mutex_unlock(&context->ctx_lock);
  309 			free_imon_context(context);
  310 			return retval;
  311 		}
  312 	}
  313 
  314 	mutex_unlock(&context->ctx_lock);
  315 	return retval;
  316 }
  317 
  318 /**
  319  * Sends a packet to the device -- this function must be called
  320  * with context->ctx_lock held.
  321  */
  322 static int send_packet(struct imon_context *context)
  323 {
  324 	unsigned int pipe;
  325 	int interval = 0;
  326 	int retval = 0;
  327 
  328 	/* Check if we need to use control or interrupt urb */
  329 	pipe = usb_sndintpipe(context->usbdev,
  330 			      context->tx_endpoint->bEndpointAddress);
  331 	interval = context->tx_endpoint->bInterval;
  332 
  333 	usb_fill_int_urb(context->tx_urb, context->usbdev, pipe,
  334 			 context->usb_tx_buf,
  335 			 sizeof(context->usb_tx_buf),
  336 			 usb_tx_callback, context, interval);
  337 
  338 	context->tx_urb->actual_length = 0;
  339 
  340 	init_completion(&context->tx.finished);
  341 	atomic_set(&context->tx.busy, 1);
  342 
  343 	retval = usb_submit_urb(context->tx_urb, GFP_KERNEL);
  344 	if (retval) {
  345 		atomic_set(&context->tx.busy, 0);
  346 		dev_err(&context->usbdev->dev, "error submitting urb(%d)\n",
  347 			retval);
  348 	} else {
  349 		/* Wait for transmission to complete (or abort) */
  350 		mutex_unlock(&context->ctx_lock);
  351 		retval = wait_for_completion_interruptible(
  352 				&context->tx.finished);
  353 		if (retval)
  354 			dev_err(&context->usbdev->dev,
  355 				"%s: task interrupted\n", __func__);
  356 		mutex_lock(&context->ctx_lock);
  357 
  358 		retval = context->tx.status;
  359 		if (retval)
  360 			dev_err(&context->usbdev->dev,
  361 				"packet tx failed (%d)\n", retval);
  362 	}
  363 
  364 	return retval;
  365 }
  366 
  367 /**
  368  * Writes data to the VFD.  The iMON VFD is 2x16 characters
  369  * and requires data in 5 consecutive USB interrupt packets,
  370  * each packet but the last carrying 7 bytes.
  371  *
  372  * I don't know if the VFD board supports features such as
  373  * scrolling, clearing rows, blanking, etc. so at
  374  * the caller must provide a full screen of data.  If fewer
  375  * than 32 bytes are provided spaces will be appended to
  376  * generate a full screen.
  377  */
  378 static ssize_t vfd_write(struct file *file, const char __user *buf,
  379 			 size_t n_bytes, loff_t *pos)
  380 {
  381 	int i;
  382 	int offset;
  383 	int seq;
  384 	int retval = 0;
  385 	struct imon_context *context;
  386 	const unsigned char vfd_packet6[] = {
  387 		0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF };
  388 	int *data_buf = NULL;
  389 
  390 	context = file->private_data;
  391 	if (!context) {
  392 		pr_err("%s: no context for device\n", __func__);
  393 		return -ENODEV;
  394 	}
  395 
  396 	mutex_lock(&context->ctx_lock);
  397 
  398 	if (!context->dev_present) {
  399 		dev_err(&context->usbdev->dev,
  400 			"%s: no iMON device present\n", __func__);
  401 		retval = -ENODEV;
  402 		goto exit;
  403 	}
  404 
  405 	if (n_bytes <= 0 || n_bytes > IMON_DATA_BUF_SZ - 3) {
  406 		dev_err(&context->usbdev->dev,
  407 			"%s: invalid payload size\n", __func__);
  408 		retval = -EINVAL;
  409 		goto exit;
  410 	}
  411 
  412 	data_buf = memdup_user(buf, n_bytes);
  413 	if (IS_ERR(data_buf)) {
  414 		retval = PTR_ERR(data_buf);
  415 		data_buf = NULL;
  416 		goto exit;
  417 	}
  418 
  419 	memcpy(context->tx.data_buf, data_buf, n_bytes);
  420 
  421 	/* Pad with spaces */
  422 	for (i = n_bytes; i < IMON_DATA_BUF_SZ - 3; ++i)
  423 		context->tx.data_buf[i] = ' ';
  424 
  425 	for (i = IMON_DATA_BUF_SZ - 3; i < IMON_DATA_BUF_SZ; ++i)
  426 		context->tx.data_buf[i] = 0xFF;
  427 
  428 	offset = 0;
  429 	seq = 0;
  430 
  431 	do {
  432 		memcpy(context->usb_tx_buf, context->tx.data_buf + offset, 7);
  433 		context->usb_tx_buf[7] = (unsigned char) seq;
  434 
  435 		retval = send_packet(context);
  436 		if (retval) {
  437 			dev_err(&context->usbdev->dev,
  438 				"send packet failed for packet #%d\n",
  439 				seq / 2);
  440 			goto exit;
  441 		} else {
  442 			seq += 2;
  443 			offset += 7;
  444 		}
  445 
  446 	} while (offset < IMON_DATA_BUF_SZ);
  447 
  448 	if (context->vfd_proto_6p) {
  449 		/* Send packet #6 */
  450 		memcpy(context->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6));
  451 		context->usb_tx_buf[7] = (unsigned char) seq;
  452 		retval = send_packet(context);
  453 		if (retval)
  454 			dev_err(&context->usbdev->dev,
  455 				"send packet failed for packet #%d\n",
  456 				seq / 2);
  457 	}
  458 
  459 exit:
  460 	mutex_unlock(&context->ctx_lock);
  461 	kfree(data_buf);
  462 
  463 	return (!retval) ? n_bytes : retval;
  464 }
  465 
  466 /**
  467  * Callback function for USB core API: transmit data
  468  */
  469 static void usb_tx_callback(struct urb *urb)
  470 {
  471 	struct imon_context *context;
  472 
  473 	if (!urb)
  474 		return;
  475 	context = (struct imon_context *)urb->context;
  476 	if (!context)
  477 		return;
  478 
  479 	context->tx.status = urb->status;
  480 
  481 	/* notify waiters that write has finished */
  482 	atomic_set(&context->tx.busy, 0);
  483 	complete(&context->tx.finished);
  484 }
  485 
  486 /**
  487  * Called by lirc_dev when the application opens /dev/lirc
  488  */
  489 static int ir_open(void *data)
  490 {
  491 	struct imon_context *context;
  492 
  493 	/* prevent races with disconnect */
  494 	mutex_lock(&driver_lock);
  495 
  496 	context = data;
  497 
  498 	/* initial IR protocol decode variables */
  499 	context->rx.count = 0;
  500 	context->rx.initial_space = 1;
  501 	context->rx.prev_bit = 0;
  502 
  503 	context->ir_isopen = 1;
  504 	dev_info(context->driver->dev, "IR port opened\n");
  505 
  506 	mutex_unlock(&driver_lock);
  507 	return 0;
  508 }
  509 
  510 /**
  511  * Called by lirc_dev when the application closes /dev/lirc
  512  */
  513 static void ir_close(void *data)
  514 {
  515 	struct imon_context *context;
  516 
  517 	context = data;
  518 	if (!context) {
  519 		pr_err("%s: no context for device\n", __func__);
  520 		return;
  521 	}
  522 
  523 	mutex_lock(&context->ctx_lock);
  524 
  525 	context->ir_isopen = 0;
  526 	dev_info(context->driver->dev, "IR port closed\n");
  527 
  528 	if (!context->dev_present) {
  529 		/*
  530 		 * Device disconnected while IR port was still open. Driver
  531 		 * was not deregistered at disconnect time, so do it now.
  532 		 */
  533 		deregister_from_lirc(context);
  534 
  535 		if (!context->display_isopen) {
  536 			mutex_unlock(&context->ctx_lock);
  537 			free_imon_context(context);
  538 			return;
  539 		}
  540 		/*
  541 		 * If display port is open, context will be deleted by
  542 		 * display_close
  543 		 */
  544 	}
  545 
  546 	mutex_unlock(&context->ctx_lock);
  547 }
  548 
  549 /**
  550  * Convert bit count to time duration (in us) and submit
  551  * the value to lirc_dev.
  552  */
  553 static void submit_data(struct imon_context *context)
  554 {
  555 	unsigned char buf[4];
  556 	int value = context->rx.count;
  557 	int i;
  558 
  559 	dev_dbg(context->driver->dev, "submitting data to LIRC\n");
  560 
  561 	value *= BIT_DURATION;
  562 	value &= PULSE_MASK;
  563 	if (context->rx.prev_bit)
  564 		value |= PULSE_BIT;
  565 
  566 	for (i = 0; i < 4; ++i)
  567 		buf[i] = value>>(i*8);
  568 
  569 	lirc_buffer_write(context->driver->rbuf, buf);
  570 	wake_up(&context->driver->rbuf->wait_poll);
  571 }
  572 
  573 /**
  574  * Process the incoming packet
  575  */
  576 static void imon_incoming_packet(struct imon_context *context,
  577 				 struct urb *urb, int intf)
  578 {
  579 	int len = urb->actual_length;
  580 	unsigned char *buf = urb->transfer_buffer;
  581 	struct device *dev = context->driver->dev;
  582 	int octet, bit;
  583 	unsigned char mask;
  584 
  585 	/*
  586 	 * just bail out if no listening IR client
  587 	 */
  588 	if (!context->ir_isopen)
  589 		return;
  590 
  591 	if (len != 8) {
  592 		dev_warn(dev, "imon %s: invalid incoming packet size (len = %d, intf%d)\n",
  593 			__func__, len, intf);
  594 		return;
  595 	}
  596 
  597 	if (debug)
  598 		dev_info(dev, "raw packet: %*ph\n", len, buf);
  599 	/*
  600 	 * Translate received data to pulse and space lengths.
  601 	 * Received data is active low, i.e. pulses are 0 and
  602 	 * spaces are 1.
  603 	 *
  604 	 * My original algorithm was essentially similar to
  605 	 * Changwoo Ryu's with the exception that he switched
  606 	 * the incoming bits to active high and also fed an
  607 	 * initial space to LIRC at the start of a new sequence
  608 	 * if the previous bit was a pulse.
  609 	 *
  610 	 * I've decided to adopt his algorithm.
  611 	 */
  612 
  613 	if (buf[7] == 1 && context->rx.initial_space) {
  614 		/* LIRC requires a leading space */
  615 		context->rx.prev_bit = 0;
  616 		context->rx.count = 4;
  617 		submit_data(context);
  618 		context->rx.count = 0;
  619 	}
  620 
  621 	for (octet = 0; octet < 5; ++octet) {
  622 		mask = 0x80;
  623 		for (bit = 0; bit < 8; ++bit) {
  624 			int curr_bit = !(buf[octet] & mask);
  625 
  626 			if (curr_bit != context->rx.prev_bit) {
  627 				if (context->rx.count) {
  628 					submit_data(context);
  629 					context->rx.count = 0;
  630 				}
  631 				context->rx.prev_bit = curr_bit;
  632 			}
  633 			++context->rx.count;
  634 			mask >>= 1;
  635 		}
  636 	}
  637 
  638 	if (buf[7] == 10) {
  639 		if (context->rx.count) {
  640 			submit_data(context);
  641 			context->rx.count = 0;
  642 		}
  643 		context->rx.initial_space = context->rx.prev_bit;
  644 	}
  645 }
  646 
  647 /**
  648  * Callback function for USB core API: receive data
  649  */
  650 static void usb_rx_callback(struct urb *urb)
  651 {
  652 	struct imon_context *context;
  653 	int intfnum = 0;
  654 
  655 	if (!urb)
  656 		return;
  657 
  658 	context = (struct imon_context *)urb->context;
  659 	if (!context)
  660 		return;
  661 
  662 	switch (urb->status) {
  663 	case -ENOENT:		/* usbcore unlink successful! */
  664 		return;
  665 
  666 	case 0:
  667 		imon_incoming_packet(context, urb, intfnum);
  668 		break;
  669 
  670 	default:
  671 		dev_warn(context->driver->dev, "imon %s: status(%d): ignored\n",
  672 			 __func__, urb->status);
  673 		break;
  674 	}
  675 
  676 	usb_submit_urb(context->rx_urb, GFP_ATOMIC);
  677 }
  678 
  679 /**
  680  * Callback function for USB core API: Probe
  681  */
  682 static int imon_probe(struct usb_interface *interface,
  683 		      const struct usb_device_id *id)
  684 {
  685 	struct usb_device *usbdev = NULL;
  686 	struct usb_host_interface *iface_desc = NULL;
  687 	struct usb_endpoint_descriptor *rx_endpoint = NULL;
  688 	struct usb_endpoint_descriptor *tx_endpoint = NULL;
  689 	struct urb *rx_urb = NULL;
  690 	struct urb *tx_urb = NULL;
  691 	struct lirc_driver *driver = NULL;
  692 	struct lirc_buffer *rbuf = NULL;
  693 	struct device *dev = &interface->dev;
  694 	int ifnum;
  695 	int lirc_minor = 0;
  696 	int num_endpts;
  697 	int retval = -ENOMEM;
  698 	int display_ep_found = 0;
  699 	int ir_ep_found = 0;
  700 	int vfd_proto_6p = 0;
  701 	struct imon_context *context = NULL;
  702 	int i;
  703 	u16 vendor, product;
  704 
  705 	/* prevent races probing devices w/multiple interfaces */
  706 	mutex_lock(&driver_lock);
  707 
  708 	context = kzalloc(sizeof(struct imon_context), GFP_KERNEL);
  709 	if (!context)
  710 		goto driver_unlock;
  711 
  712 	/*
  713 	 * Try to auto-detect the type of display if the user hasn't set
  714 	 * it by hand via the display_type modparam. Default is VFD.
  715 	 */
  716 	if (usb_match_id(interface, ir_only_list))
  717 		context->display = 0;
  718 	else
  719 		context->display = 1;
  720 
  721 	usbdev     = usb_get_dev(interface_to_usbdev(interface));
  722 	iface_desc = interface->cur_altsetting;
  723 	num_endpts = iface_desc->desc.bNumEndpoints;
  724 	ifnum      = iface_desc->desc.bInterfaceNumber;
  725 	vendor     = le16_to_cpu(usbdev->descriptor.idVendor);
  726 	product    = le16_to_cpu(usbdev->descriptor.idProduct);
  727 
  728 	dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n",
  729 		__func__, vendor, product, ifnum);
  730 
  731 	/*
  732 	 * Scan the endpoint list and set:
  733 	 *	first input endpoint = IR endpoint
  734 	 *	first output endpoint = display endpoint
  735 	 */
  736 	for (i = 0; i < num_endpts && !(ir_ep_found && display_ep_found); ++i) {
  737 		struct usb_endpoint_descriptor *ep;
  738 		int ep_dir;
  739 		int ep_type;
  740 
  741 		ep = &iface_desc->endpoint[i].desc;
  742 		ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
  743 		ep_type = usb_endpoint_type(ep);
  744 
  745 		if (!ir_ep_found &&
  746 			ep_dir == USB_DIR_IN &&
  747 			ep_type == USB_ENDPOINT_XFER_INT) {
  748 
  749 			rx_endpoint = ep;
  750 			ir_ep_found = 1;
  751 			dev_dbg(dev, "%s: found IR endpoint\n", __func__);
  752 
  753 		} else if (!display_ep_found && ep_dir == USB_DIR_OUT &&
  754 			   ep_type == USB_ENDPOINT_XFER_INT) {
  755 			tx_endpoint = ep;
  756 			display_ep_found = 1;
  757 			dev_dbg(dev, "%s: found display endpoint\n", __func__);
  758 		}
  759 	}
  760 
  761 	/*
  762 	 * Some iMON receivers have no display. Unfortunately, it seems
  763 	 * that SoundGraph recycles device IDs between devices both with
  764 	 * and without... :\
  765 	 */
  766 	if (context->display == 0) {
  767 		display_ep_found = 0;
  768 		dev_dbg(dev, "%s: device has no display\n", __func__);
  769 	}
  770 
  771 	/* Input endpoint is mandatory */
  772 	if (!ir_ep_found) {
  773 		dev_err(dev, "%s: no valid input (IR) endpoint found.\n",
  774 			__func__);
  775 		retval = -ENODEV;
  776 		goto free_context;
  777 	}
  778 
  779 	/* Determine if display requires 6 packets */
  780 	if (display_ep_found) {
  781 		if (usb_match_id(interface, vfd_proto_6p_list))
  782 			vfd_proto_6p = 1;
  783 
  784 		dev_dbg(dev, "%s: vfd_proto_6p: %d\n",
  785 			__func__, vfd_proto_6p);
  786 	}
  787 
  788 	driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
  789 	if (!driver)
  790 		goto free_context;
  791 
  792 	rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
  793 	if (!rbuf)
  794 		goto free_driver;
  795 
  796 	if (lirc_buffer_init(rbuf, BUF_CHUNK_SIZE, BUF_SIZE)) {
  797 		dev_err(dev, "%s: lirc_buffer_init failed\n", __func__);
  798 		goto free_rbuf;
  799 	}
  800 	rx_urb = usb_alloc_urb(0, GFP_KERNEL);
  801 	if (!rx_urb) {
  802 		dev_err(dev, "%s: usb_alloc_urb failed for IR urb\n", __func__);
  803 		goto free_lirc_buf;
  804 	}
  805 	tx_urb = usb_alloc_urb(0, GFP_KERNEL);
  806 	if (!tx_urb) {
  807 		dev_err(dev, "%s: usb_alloc_urb failed for display urb\n",
  808 		    __func__);
  809 		goto free_rx_urb;
  810 	}
  811 
  812 	mutex_init(&context->ctx_lock);
  813 	context->vfd_proto_6p = vfd_proto_6p;
  814 
  815 	strcpy(driver->name, MOD_NAME);
  816 	driver->minor = -1;
  817 	driver->code_length = BUF_CHUNK_SIZE * 8;
  818 	driver->sample_rate = 0;
  819 	driver->features = LIRC_CAN_REC_MODE2;
  820 	driver->data = context;
  821 	driver->rbuf = rbuf;
  822 	driver->set_use_inc = ir_open;
  823 	driver->set_use_dec = ir_close;
  824 	driver->dev = &interface->dev;
  825 	driver->owner = THIS_MODULE;
  826 
  827 	mutex_lock(&context->ctx_lock);
  828 
  829 	context->driver = driver;
  830 	/* start out in keyboard mode */
  831 
  832 	lirc_minor = lirc_register_driver(driver);
  833 	if (lirc_minor < 0) {
  834 		dev_err(dev, "%s: lirc_register_driver failed\n", __func__);
  835 		goto free_tx_urb;
  836 	}
  837 
  838 	dev_info(dev, "Registered iMON driver (lirc minor: %d)\n",
  839 			lirc_minor);
  840 
  841 	/* Needed while unregistering! */
  842 	driver->minor = lirc_minor;
  843 
  844 	context->usbdev = usbdev;
  845 	context->dev_present = 1;
  846 	context->rx_endpoint = rx_endpoint;
  847 	context->rx_urb = rx_urb;
  848 
  849 	/*
  850 	 * tx is used to send characters to lcd/vfd, associate RF
  851 	 * remotes, set IR protocol, and maybe more...
  852 	 */
  853 	context->tx_endpoint = tx_endpoint;
  854 	context->tx_urb = tx_urb;
  855 
  856 	if (display_ep_found)
  857 		context->display = 1;
  858 
  859 	usb_fill_int_urb(context->rx_urb, context->usbdev,
  860 		usb_rcvintpipe(context->usbdev,
  861 			context->rx_endpoint->bEndpointAddress),
  862 		context->usb_rx_buf, sizeof(context->usb_rx_buf),
  863 		usb_rx_callback, context,
  864 		context->rx_endpoint->bInterval);
  865 
  866 	retval = usb_submit_urb(context->rx_urb, GFP_KERNEL);
  867 	if (retval) {
  868 		dev_err(dev, "usb_submit_urb failed for intf0 (%d)\n", retval);
  869 		goto unregister_lirc;
  870 	}
  871 
  872 	usb_set_intfdata(interface, context);
  873 
  874 	if (context->display && ifnum == 0) {
  875 		dev_dbg(dev, "%s: Registering iMON display with sysfs\n",
  876 			__func__);
  877 
  878 		if (usb_register_dev(interface, &imon_class)) {
  879 			/* Not a fatal error, so ignore */
  880 			dev_info(dev, "%s: could not get a minor number for display\n",
  881 				 __func__);
  882 		}
  883 	}
  884 
  885 	dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n",
  886 		vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum);
  887 
  888 	/* Everything went fine. Just unlock and return retval (with is 0) */
  889 	goto driver_unlock;
  890 
  891 unregister_lirc:
  892 	lirc_unregister_driver(driver->minor);
  893 
  894 free_tx_urb:
  895 	usb_free_urb(tx_urb);
  896 
  897 free_rx_urb:
  898 	usb_free_urb(rx_urb);
  899 
  900 free_lirc_buf:
  901 	lirc_buffer_free(rbuf);
  902 
  903 free_rbuf:
  904 	kfree(rbuf);
  905 
  906 free_driver:
  907 	kfree(driver);
  908 free_context:
  909 	kfree(context);
  910 	context = NULL;
  911 
  912 driver_unlock:
  913 	mutex_unlock(&driver_lock);
  914 
  915 	return retval;
  916 }
  917 
  918 /**
  919  * Callback function for USB core API: disconnect
  920  */
  921 static void imon_disconnect(struct usb_interface *interface)
  922 {
  923 	struct imon_context *context;
  924 	int ifnum;
  925 
  926 	/* prevent races with ir_open()/display_open() */
  927 	mutex_lock(&driver_lock);
  928 
  929 	context = usb_get_intfdata(interface);
  930 	ifnum = interface->cur_altsetting->desc.bInterfaceNumber;
  931 
  932 	mutex_lock(&context->ctx_lock);
  933 
  934 	usb_set_intfdata(interface, NULL);
  935 
  936 	/* Abort ongoing write */
  937 	if (atomic_read(&context->tx.busy)) {
  938 		usb_kill_urb(context->tx_urb);
  939 		complete_all(&context->tx.finished);
  940 	}
  941 
  942 	context->dev_present = 0;
  943 	usb_kill_urb(context->rx_urb);
  944 	if (context->display)
  945 		usb_deregister_dev(interface, &imon_class);
  946 
  947 	if (!context->ir_isopen && !context->dev_present) {
  948 		deregister_from_lirc(context);
  949 		mutex_unlock(&context->ctx_lock);
  950 		if (!context->display_isopen)
  951 			free_imon_context(context);
  952 	} else
  953 		mutex_unlock(&context->ctx_lock);
  954 
  955 	mutex_unlock(&driver_lock);
  956 
  957 	dev_info(&interface->dev, "%s: iMON device (intf%d) disconnected\n",
  958 		 __func__, ifnum);
  959 }
  960 
  961 static int imon_suspend(struct usb_interface *intf, pm_message_t message)
  962 {
  963 	struct imon_context *context = usb_get_intfdata(intf);
  964 
  965 	usb_kill_urb(context->rx_urb);
  966 
  967 	return 0;
  968 }
  969 
  970 static int imon_resume(struct usb_interface *intf)
  971 {
  972 	struct imon_context *context = usb_get_intfdata(intf);
  973 
  974 	usb_fill_int_urb(context->rx_urb, context->usbdev,
  975 		usb_rcvintpipe(context->usbdev,
  976 			context->rx_endpoint->bEndpointAddress),
  977 		context->usb_rx_buf, sizeof(context->usb_rx_buf),
  978 		usb_rx_callback, context,
  979 		context->rx_endpoint->bInterval);
  980 
  981 	return usb_submit_urb(context->rx_urb, GFP_ATOMIC);
  982 }
  983 
  984 module_usb_driver(imon_driver);
  985 
  986 
  987 
  988 
  989 
  990 /* LDV_COMMENT_BEGIN_MAIN */
  991 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
  992 
  993 /*###########################################################################*/
  994 
  995 /*############## Driver Environment Generator 0.2 output ####################*/
  996 
  997 /*###########################################################################*/
  998 
  999 
 1000 
 1001 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
 1002 void ldv_check_final_state(void);
 1003 
 1004 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
 1005 void ldv_check_return_value(int res);
 1006 
 1007 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
 1008 void ldv_check_return_value_probe(int res);
 1009 
 1010 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
 1011 void ldv_initialize(void);
 1012 
 1013 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
 1014 void ldv_handler_precall(void);
 1015 
 1016 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
 1017 int nondet_int(void);
 1018 
 1019 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
 1020 int LDV_IN_INTERRUPT;
 1021 
 1022 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
 1023 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
 1024 
 1025 
 1026 
 1027 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
 1028 	/*============================= VARIABLE DECLARATION PART   =============================*/
 1029 	/** STRUCT: struct type: usb_driver, struct name: imon_driver **/
 1030 	/* content: static int imon_probe(struct usb_interface *interface, const struct usb_device_id *id)*/
 1031 	/* LDV_COMMENT_BEGIN_PREP */
 1032 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 1033 	#define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
 1034 	#define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
 1035 	#define MOD_NAME	"lirc_imon"
 1036 	#define MOD_VERSION	"0.8"
 1037 	#define DISPLAY_MINOR_BASE	144
 1038 	#define DEVICE_NAME	"lcd%d"
 1039 	#define BUF_CHUNK_SIZE	4
 1040 	#define BUF_SIZE	128
 1041 	#define BIT_DURATION	250	
 1042 	#define IMON_DATA_BUF_SZ	35
 1043 	/* LDV_COMMENT_END_PREP */
 1044 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "imon_probe" */
 1045 	struct usb_interface * var_group1;
 1046 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "imon_probe" */
 1047 	const struct usb_device_id * var_imon_probe_12_p1;
 1048 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "imon_probe" */
 1049 	static int res_imon_probe_12;
 1050 	/* content: static void imon_disconnect(struct usb_interface *interface)*/
 1051 	/* LDV_COMMENT_BEGIN_PREP */
 1052 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 1053 	#define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
 1054 	#define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
 1055 	#define MOD_NAME	"lirc_imon"
 1056 	#define MOD_VERSION	"0.8"
 1057 	#define DISPLAY_MINOR_BASE	144
 1058 	#define DEVICE_NAME	"lcd%d"
 1059 	#define BUF_CHUNK_SIZE	4
 1060 	#define BUF_SIZE	128
 1061 	#define BIT_DURATION	250	
 1062 	#define IMON_DATA_BUF_SZ	35
 1063 	/* LDV_COMMENT_END_PREP */
 1064 	/* content: static int imon_suspend(struct usb_interface *intf, pm_message_t message)*/
 1065 	/* LDV_COMMENT_BEGIN_PREP */
 1066 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 1067 	#define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
 1068 	#define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
 1069 	#define MOD_NAME	"lirc_imon"
 1070 	#define MOD_VERSION	"0.8"
 1071 	#define DISPLAY_MINOR_BASE	144
 1072 	#define DEVICE_NAME	"lcd%d"
 1073 	#define BUF_CHUNK_SIZE	4
 1074 	#define BUF_SIZE	128
 1075 	#define BIT_DURATION	250	
 1076 	#define IMON_DATA_BUF_SZ	35
 1077 	/* LDV_COMMENT_END_PREP */
 1078 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "imon_suspend" */
 1079 	pm_message_t  var_imon_suspend_14_p1;
 1080 	/* content: static int imon_resume(struct usb_interface *intf)*/
 1081 	/* LDV_COMMENT_BEGIN_PREP */
 1082 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 1083 	#define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
 1084 	#define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
 1085 	#define MOD_NAME	"lirc_imon"
 1086 	#define MOD_VERSION	"0.8"
 1087 	#define DISPLAY_MINOR_BASE	144
 1088 	#define DEVICE_NAME	"lcd%d"
 1089 	#define BUF_CHUNK_SIZE	4
 1090 	#define BUF_SIZE	128
 1091 	#define BIT_DURATION	250	
 1092 	#define IMON_DATA_BUF_SZ	35
 1093 	/* LDV_COMMENT_END_PREP */
 1094 
 1095 
 1096 
 1097 
 1098 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 1099 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 1100 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 1101 	LDV_IN_INTERRUPT=1;
 1102 
 1103 
 1104 
 1105 
 1106 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 1107 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 1108 	/*============================= FUNCTION CALL SECTION       =============================*/
 1109 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 1110 	ldv_initialize();
 1111 	int ldv_s_imon_driver_usb_driver = 0;
 1112 
 1113 
 1114 	while(  nondet_int()
 1115 		|| !(ldv_s_imon_driver_usb_driver == 0)
 1116 	) {
 1117 
 1118 		switch(nondet_int()) {
 1119 
 1120 			case 0: {
 1121 
 1122 				/** STRUCT: struct type: usb_driver, struct name: imon_driver **/
 1123 				if(ldv_s_imon_driver_usb_driver==0) {
 1124 
 1125 				/* content: static int imon_probe(struct usb_interface *interface, const struct usb_device_id *id)*/
 1126 				/* LDV_COMMENT_BEGIN_PREP */
 1127 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 1128 				#define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
 1129 				#define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
 1130 				#define MOD_NAME	"lirc_imon"
 1131 				#define MOD_VERSION	"0.8"
 1132 				#define DISPLAY_MINOR_BASE	144
 1133 				#define DEVICE_NAME	"lcd%d"
 1134 				#define BUF_CHUNK_SIZE	4
 1135 				#define BUF_SIZE	128
 1136 				#define BIT_DURATION	250	
 1137 				#define IMON_DATA_BUF_SZ	35
 1138 				/* LDV_COMMENT_END_PREP */
 1139 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "imon_driver". Standart function test for correct return result. */
 1140 				res_imon_probe_12 = imon_probe( var_group1, var_imon_probe_12_p1);
 1141 				 ldv_check_return_value(res_imon_probe_12);
 1142 				 ldv_check_return_value_probe(res_imon_probe_12);
 1143 				 if(res_imon_probe_12) 
 1144 					goto ldv_module_exit;
 1145 				ldv_s_imon_driver_usb_driver++;
 1146 
 1147 				}
 1148 
 1149 			}
 1150 
 1151 			break;
 1152 			case 1: {
 1153 
 1154 				/** STRUCT: struct type: usb_driver, struct name: imon_driver **/
 1155 				if(ldv_s_imon_driver_usb_driver==1) {
 1156 
 1157 				/* content: static int imon_suspend(struct usb_interface *intf, pm_message_t message)*/
 1158 				/* LDV_COMMENT_BEGIN_PREP */
 1159 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 1160 				#define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
 1161 				#define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
 1162 				#define MOD_NAME	"lirc_imon"
 1163 				#define MOD_VERSION	"0.8"
 1164 				#define DISPLAY_MINOR_BASE	144
 1165 				#define DEVICE_NAME	"lcd%d"
 1166 				#define BUF_CHUNK_SIZE	4
 1167 				#define BUF_SIZE	128
 1168 				#define BIT_DURATION	250	
 1169 				#define IMON_DATA_BUF_SZ	35
 1170 				/* LDV_COMMENT_END_PREP */
 1171 				/* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "imon_driver" */
 1172 				ldv_handler_precall();
 1173 				imon_suspend( var_group1, var_imon_suspend_14_p1);
 1174 				ldv_s_imon_driver_usb_driver++;
 1175 
 1176 				}
 1177 
 1178 			}
 1179 
 1180 			break;
 1181 			case 2: {
 1182 
 1183 				/** STRUCT: struct type: usb_driver, struct name: imon_driver **/
 1184 				if(ldv_s_imon_driver_usb_driver==2) {
 1185 
 1186 				/* content: static int imon_resume(struct usb_interface *intf)*/
 1187 				/* LDV_COMMENT_BEGIN_PREP */
 1188 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 1189 				#define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
 1190 				#define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
 1191 				#define MOD_NAME	"lirc_imon"
 1192 				#define MOD_VERSION	"0.8"
 1193 				#define DISPLAY_MINOR_BASE	144
 1194 				#define DEVICE_NAME	"lcd%d"
 1195 				#define BUF_CHUNK_SIZE	4
 1196 				#define BUF_SIZE	128
 1197 				#define BIT_DURATION	250	
 1198 				#define IMON_DATA_BUF_SZ	35
 1199 				/* LDV_COMMENT_END_PREP */
 1200 				/* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "imon_driver" */
 1201 				ldv_handler_precall();
 1202 				imon_resume( var_group1);
 1203 				ldv_s_imon_driver_usb_driver++;
 1204 
 1205 				}
 1206 
 1207 			}
 1208 
 1209 			break;
 1210 			case 3: {
 1211 
 1212 				/** STRUCT: struct type: usb_driver, struct name: imon_driver **/
 1213 				if(ldv_s_imon_driver_usb_driver==3) {
 1214 
 1215 				/* content: static void imon_disconnect(struct usb_interface *interface)*/
 1216 				/* LDV_COMMENT_BEGIN_PREP */
 1217 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 1218 				#define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
 1219 				#define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
 1220 				#define MOD_NAME	"lirc_imon"
 1221 				#define MOD_VERSION	"0.8"
 1222 				#define DISPLAY_MINOR_BASE	144
 1223 				#define DEVICE_NAME	"lcd%d"
 1224 				#define BUF_CHUNK_SIZE	4
 1225 				#define BUF_SIZE	128
 1226 				#define BIT_DURATION	250	
 1227 				#define IMON_DATA_BUF_SZ	35
 1228 				/* LDV_COMMENT_END_PREP */
 1229 				/* LDV_COMMENT_FUNCTION_CALL Function from field "disconnect" from driver structure with callbacks "imon_driver" */
 1230 				ldv_handler_precall();
 1231 				imon_disconnect( var_group1);
 1232 				ldv_s_imon_driver_usb_driver=0;
 1233 
 1234 				}
 1235 
 1236 			}
 1237 
 1238 			break;
 1239 			default: break;
 1240 
 1241 		}
 1242 
 1243 	}
 1244 
 1245 	ldv_module_exit: 
 1246 
 1247 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 1248 	ldv_final: ldv_check_final_state();
 1249 
 1250 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 1251 	return;
 1252 
 1253 }
 1254 #endif
 1255 
 1256 /* LDV_COMMENT_END_MAIN */                 1 
    2 #include <linux/kernel.h>
    3 bool ldv_is_err(const void *ptr);
    4 bool ldv_is_err_or_null(const void *ptr);
    5 void* ldv_err_ptr(long error);
    6 long ldv_ptr_err(const void *ptr);
    7 
    8 #include <linux/mutex.h>
    9 #include <verifier/rcv.h>
   10 
   11 struct usb_device;
   12 extern void __ldv_usb_lock_device(struct usb_device *udev);
   13 extern void __ldv_usb_unlock_device(struct usb_device *udev);
   14 extern int __ldv_usb_trylock_device(struct usb_device *udev);
   15 
   16 extern int mutex_lock_interruptible(struct mutex *lock);
   17 extern int mutex_lock_killable(struct mutex *lock);
   18 extern void mutex_lock(struct mutex *lock);
   19 extern int ldv_mutex_lock_interruptible_ctx_lock_of_imon_context(struct mutex *lock);
   20 extern int ldv_mutex_lock_killable_ctx_lock_of_imon_context(struct mutex *lock);
   21 extern void ldv_mutex_lock_nested_ctx_lock_of_imon_context(struct mutex *lock, unsigned int subclass);
   22 extern void ldv_mutex_lock_ctx_lock_of_imon_context(struct mutex *lock);
   23 extern int ldv_mutex_trylock_ctx_lock_of_imon_context(struct mutex *lock);
   24 extern int ldv_atomic_dec_and_mutex_lock_ctx_lock_of_imon_context(atomic_t *cnt, struct mutex *lock);
   25 extern int ldv_mutex_is_locked_ctx_lock_of_imon_context(struct mutex *lock);
   26 extern void ldv_mutex_unlock_ctx_lock_of_imon_context(struct mutex *lock);
   27 
   28 extern void ldv_usb_lock_device_ctx_lock_of_imon_context(void);
   29 extern void ldv_usb_unlock_device_ctx_lock_of_imon_context(void);
   30 extern int ldv_usb_trylock_device_ctx_lock_of_imon_context(void);
   31 extern int ldv_usb_lock_device_for_reset_ctx_lock_of_imon_context(void);
   32 extern int ldv_mutex_lock_interruptible_driver_lock(struct mutex *lock);
   33 extern int ldv_mutex_lock_killable_driver_lock(struct mutex *lock);
   34 extern void ldv_mutex_lock_nested_driver_lock(struct mutex *lock, unsigned int subclass);
   35 extern void ldv_mutex_lock_driver_lock(struct mutex *lock);
   36 extern int ldv_mutex_trylock_driver_lock(struct mutex *lock);
   37 extern int ldv_atomic_dec_and_mutex_lock_driver_lock(atomic_t *cnt, struct mutex *lock);
   38 extern int ldv_mutex_is_locked_driver_lock(struct mutex *lock);
   39 extern void ldv_mutex_unlock_driver_lock(struct mutex *lock);
   40 
   41 extern void ldv_usb_lock_device_driver_lock(void);
   42 extern void ldv_usb_unlock_device_driver_lock(void);
   43 extern int ldv_usb_trylock_device_driver_lock(void);
   44 extern int ldv_usb_lock_device_for_reset_driver_lock(void);
   45 extern int ldv_mutex_lock_interruptible_i_mutex_of_inode(struct mutex *lock);
   46 extern int ldv_mutex_lock_killable_i_mutex_of_inode(struct mutex *lock);
   47 extern void ldv_mutex_lock_nested_i_mutex_of_inode(struct mutex *lock, unsigned int subclass);
   48 extern void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock);
   49 extern int ldv_mutex_trylock_i_mutex_of_inode(struct mutex *lock);
   50 extern int ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode(atomic_t *cnt, struct mutex *lock);
   51 extern int ldv_mutex_is_locked_i_mutex_of_inode(struct mutex *lock);
   52 extern void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock);
   53 
   54 extern void ldv_usb_lock_device_i_mutex_of_inode(void);
   55 extern void ldv_usb_unlock_device_i_mutex_of_inode(void);
   56 extern int ldv_usb_trylock_device_i_mutex_of_inode(void);
   57 extern int ldv_usb_lock_device_for_reset_i_mutex_of_inode(void);
   58 extern int ldv_mutex_lock_interruptible_lock(struct mutex *lock);
   59 extern int ldv_mutex_lock_killable_lock(struct mutex *lock);
   60 extern void ldv_mutex_lock_nested_lock(struct mutex *lock, unsigned int subclass);
   61 extern void ldv_mutex_lock_lock(struct mutex *lock);
   62 extern int ldv_mutex_trylock_lock(struct mutex *lock);
   63 extern int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt, struct mutex *lock);
   64 extern int ldv_mutex_is_locked_lock(struct mutex *lock);
   65 extern void ldv_mutex_unlock_lock(struct mutex *lock);
   66 
   67 extern void ldv_usb_lock_device_lock(void);
   68 extern void ldv_usb_unlock_device_lock(void);
   69 extern int ldv_usb_trylock_device_lock(void);
   70 extern int ldv_usb_lock_device_for_reset_lock(void);
   71 extern int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock);
   72 extern int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock);
   73 extern void ldv_mutex_lock_nested_mutex_of_device(struct mutex *lock, unsigned int subclass);
   74 extern void ldv_mutex_lock_mutex_of_device(struct mutex *lock);
   75 extern int ldv_mutex_trylock_mutex_of_device(struct mutex *lock);
   76 extern int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt, struct mutex *lock);
   77 extern int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock);
   78 extern void ldv_mutex_unlock_mutex_of_device(struct mutex *lock);
   79 
   80 extern void ldv_usb_lock_device_mutex_of_device(void);
   81 extern void ldv_usb_unlock_device_mutex_of_device(void);
   82 extern int ldv_usb_trylock_device_mutex_of_device(void);
   83 extern int ldv_usb_lock_device_for_reset_mutex_of_device(void);
   84 #line 1 "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.3-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.3-rc1.tar.xz/csd_deg_dscv/7460/dscv_tempdir/dscv/ri/32_7a/drivers/staging/media/lirc/lirc_imon.c"
   85 
   86 /*
   87  *   lirc_imon.c:  LIRC/VFD/LCD driver for SoundGraph iMON IR/VFD/LCD
   88  *		   including the iMON PAD model
   89  *
   90  *   Copyright(C) 2004  Venky Raju(dev@venky.ws)
   91  *   Copyright(C) 2009  Jarod Wilson <jarod@wilsonet.com>
   92  *
   93  *   lirc_imon is free software; you can redistribute it and/or modify
   94  *   it under the terms of the GNU General Public License as published by
   95  *   the Free Software Foundation; either version 2 of the License, or
   96  *   (at your option) any later version.
   97  *
   98  *   This program is distributed in the hope that it will be useful,
   99  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  100  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  101  *   GNU General Public License for more details.
  102  *
  103  *   You should have received a copy of the GNU General Public License
  104  *   along with this program; if not, write to the Free Software
  105  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  106  */
  107 
  108 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  109 
  110 #include <linux/errno.h>
  111 #include <linux/kernel.h>
  112 #include <linux/module.h>
  113 #include <linux/slab.h>
  114 #include <linux/uaccess.h>
  115 #include <linux/usb.h>
  116 
  117 #include <media/lirc.h>
  118 #include <media/lirc_dev.h>
  119 
  120 
  121 #define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
  122 #define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
  123 #define MOD_NAME	"lirc_imon"
  124 #define MOD_VERSION	"0.8"
  125 
  126 #define DISPLAY_MINOR_BASE	144
  127 #define DEVICE_NAME	"lcd%d"
  128 
  129 #define BUF_CHUNK_SIZE	4
  130 #define BUF_SIZE	128
  131 
  132 #define BIT_DURATION	250	/* each bit received is 250us */
  133 
  134 /*** P R O T O T Y P E S ***/
  135 
  136 /* USB Callback prototypes */
  137 static int imon_probe(struct usb_interface *interface,
  138 		      const struct usb_device_id *id);
  139 static void imon_disconnect(struct usb_interface *interface);
  140 static void usb_rx_callback(struct urb *urb);
  141 static void usb_tx_callback(struct urb *urb);
  142 
  143 /* suspend/resume support */
  144 static int imon_resume(struct usb_interface *intf);
  145 static int imon_suspend(struct usb_interface *intf, pm_message_t message);
  146 
  147 /* Display file_operations function prototypes */
  148 static int display_open(struct inode *inode, struct file *file);
  149 static int display_close(struct inode *inode, struct file *file);
  150 
  151 /* VFD write operation */
  152 static ssize_t vfd_write(struct file *file, const char __user *buf,
  153 			 size_t n_bytes, loff_t *pos);
  154 
  155 /* LIRC driver function prototypes */
  156 static int ir_open(void *data);
  157 static void ir_close(void *data);
  158 
  159 /*** G L O B A L S ***/
  160 #define IMON_DATA_BUF_SZ	35
  161 
  162 struct imon_context {
  163 	struct usb_device *usbdev;
  164 	/* Newer devices have two interfaces */
  165 	int display;			/* not all controllers do */
  166 	int display_isopen;		/* display port has been opened */
  167 	int ir_isopen;			/* IR port open	*/
  168 	int dev_present;		/* USB device presence */
  169 	struct mutex ctx_lock;		/* to lock this object */
  170 	wait_queue_head_t remove_ok;	/* For unexpected USB disconnects */
  171 
  172 	int vfd_proto_6p;		/* some VFD require a 6th packet */
  173 
  174 	struct lirc_driver *driver;
  175 	struct usb_endpoint_descriptor *rx_endpoint;
  176 	struct usb_endpoint_descriptor *tx_endpoint;
  177 	struct urb *rx_urb;
  178 	struct urb *tx_urb;
  179 	unsigned char usb_rx_buf[8];
  180 	unsigned char usb_tx_buf[8];
  181 
  182 	struct rx_data {
  183 		int count;		/* length of 0 or 1 sequence */
  184 		int prev_bit;		/* logic level of sequence */
  185 		int initial_space;	/* initial space flag */
  186 	} rx;
  187 
  188 	struct tx_t {
  189 		unsigned char data_buf[IMON_DATA_BUF_SZ]; /* user data buffer */
  190 		struct completion finished;	/* wait for write to finish */
  191 		atomic_t busy;			/* write in progress */
  192 		int status;			/* status of tx completion */
  193 	} tx;
  194 };
  195 
  196 static const struct file_operations display_fops = {
  197 	.owner		= THIS_MODULE,
  198 	.open		= &display_open,
  199 	.write		= &vfd_write,
  200 	.release	= &display_close,
  201 	.llseek		= noop_llseek,
  202 };
  203 
  204 /*
  205  * USB Device ID for iMON USB Control Boards
  206  *
  207  * The Windows drivers contain 6 different inf files, more or less one for
  208  * each new device until the 0x0034-0x0046 devices, which all use the same
  209  * driver. Some of the devices in the 34-46 range haven't been definitively
  210  * identified yet. Early devices have either a TriGem Computer, Inc. or a
  211  * Samsung vendor ID (0x0aa8 and 0x04e8 respectively), while all later
  212  * devices use the SoundGraph vendor ID (0x15c2).
  213  */
  214 static struct usb_device_id imon_usb_id_table[] = {
  215 	/* TriGem iMON (IR only) -- TG_iMON.inf */
  216 	{ USB_DEVICE(0x0aa8, 0x8001) },
  217 
  218 	/* SoundGraph iMON (IR only) -- sg_imon.inf */
  219 	{ USB_DEVICE(0x04e8, 0xff30) },
  220 
  221 	/* SoundGraph iMON VFD (IR & VFD) -- iMON_VFD.inf */
  222 	{ USB_DEVICE(0x0aa8, 0xffda) },
  223 
  224 	/* SoundGraph iMON SS (IR & VFD) -- iMON_SS.inf */
  225 	{ USB_DEVICE(0x15c2, 0xffda) },
  226 
  227 	{}
  228 };
  229 
  230 /* Some iMON VFD models requires a 6th packet for VFD writes */
  231 static struct usb_device_id vfd_proto_6p_list[] = {
  232 	{ USB_DEVICE(0x15c2, 0xffda) },
  233 	{}
  234 };
  235 
  236 /* Some iMON devices have no lcd/vfd, don't set one up */
  237 static struct usb_device_id ir_only_list[] = {
  238 	{ USB_DEVICE(0x0aa8, 0x8001) },
  239 	{ USB_DEVICE(0x04e8, 0xff30) },
  240 	{}
  241 };
  242 
  243 /* USB Device data */
  244 static struct usb_driver imon_driver = {
  245 	.name		= MOD_NAME,
  246 	.probe		= imon_probe,
  247 	.disconnect	= imon_disconnect,
  248 	.suspend	= imon_suspend,
  249 	.resume		= imon_resume,
  250 	.id_table	= imon_usb_id_table,
  251 };
  252 
  253 static struct usb_class_driver imon_class = {
  254 	.name		= DEVICE_NAME,
  255 	.fops		= &display_fops,
  256 	.minor_base	= DISPLAY_MINOR_BASE,
  257 };
  258 
  259 /* to prevent races between open() and disconnect(), probing, etc */
  260 static DEFINE_MUTEX(driver_lock);
  261 
  262 static int debug;
  263 
  264 /***  M O D U L E   C O D E ***/
  265 
  266 MODULE_AUTHOR(MOD_AUTHOR);
  267 MODULE_DESCRIPTION(MOD_DESC);
  268 MODULE_VERSION(MOD_VERSION);
  269 MODULE_LICENSE("GPL");
  270 MODULE_DEVICE_TABLE(usb, imon_usb_id_table);
  271 module_param(debug, int, S_IRUGO | S_IWUSR);
  272 MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes(default: no)");
  273 
  274 static void free_imon_context(struct imon_context *context)
  275 {
  276 	struct device *dev = context->driver->dev;
  277 
  278 	usb_free_urb(context->tx_urb);
  279 	usb_free_urb(context->rx_urb);
  280 	lirc_buffer_free(context->driver->rbuf);
  281 	kfree(context->driver->rbuf);
  282 	kfree(context->driver);
  283 	kfree(context);
  284 
  285 	dev_dbg(dev, "%s: iMON context freed\n", __func__);
  286 }
  287 
  288 static void deregister_from_lirc(struct imon_context *context)
  289 {
  290 	int retval;
  291 	int minor = context->driver->minor;
  292 
  293 	retval = lirc_unregister_driver(minor);
  294 	if (retval)
  295 		dev_err(&context->usbdev->dev,
  296 			"unable to deregister from lirc(%d)", retval);
  297 	else
  298 		dev_info(&context->usbdev->dev,
  299 			 "Deregistered iMON driver (minor:%d)\n", minor);
  300 
  301 }
  302 
  303 /**
  304  * Called when the Display device (e.g. /dev/lcd0)
  305  * is opened by the application.
  306  */
  307 static int display_open(struct inode *inode, struct file *file)
  308 {
  309 	struct usb_interface *interface;
  310 	struct imon_context *context = NULL;
  311 	int subminor;
  312 	int retval = 0;
  313 
  314 	/* prevent races with disconnect */
  315 	mutex_lock(&driver_lock);
  316 
  317 	subminor = iminor(inode);
  318 	interface = usb_find_interface(&imon_driver, subminor);
  319 	if (!interface) {
  320 		pr_err("%s: could not find interface for minor %d\n",
  321 		       __func__, subminor);
  322 		retval = -ENODEV;
  323 		goto exit;
  324 	}
  325 	context = usb_get_intfdata(interface);
  326 
  327 	if (!context) {
  328 		dev_err(&interface->dev, "no context found for minor %d\n",
  329 			subminor);
  330 		retval = -ENODEV;
  331 		goto exit;
  332 	}
  333 
  334 	mutex_lock(&context->ctx_lock);
  335 
  336 	if (!context->display) {
  337 		dev_err(&interface->dev,
  338 			"%s: display not supported by device\n", __func__);
  339 		retval = -ENODEV;
  340 	} else if (context->display_isopen) {
  341 		dev_err(&interface->dev,
  342 			"%s: display port is already open\n", __func__);
  343 		retval = -EBUSY;
  344 	} else {
  345 		context->display_isopen = 1;
  346 		file->private_data = context;
  347 		dev_info(context->driver->dev, "display port opened\n");
  348 	}
  349 
  350 	mutex_unlock(&context->ctx_lock);
  351 
  352 exit:
  353 	mutex_unlock(&driver_lock);
  354 	return retval;
  355 }
  356 
  357 /**
  358  * Called when the display device (e.g. /dev/lcd0)
  359  * is closed by the application.
  360  */
  361 static int display_close(struct inode *inode, struct file *file)
  362 {
  363 	struct imon_context *context = NULL;
  364 	int retval = 0;
  365 
  366 	context = file->private_data;
  367 
  368 	if (!context) {
  369 		pr_err("%s: no context for device\n", __func__);
  370 		return -ENODEV;
  371 	}
  372 
  373 	mutex_lock(&context->ctx_lock);
  374 
  375 	if (!context->display) {
  376 		dev_err(&context->usbdev->dev,
  377 			"%s: display not supported by device\n", __func__);
  378 		retval = -ENODEV;
  379 	} else if (!context->display_isopen) {
  380 		dev_err(&context->usbdev->dev,
  381 			"%s: display is not open\n", __func__);
  382 		retval = -EIO;
  383 	} else {
  384 		context->display_isopen = 0;
  385 		dev_info(context->driver->dev, "display port closed\n");
  386 		if (!context->dev_present && !context->ir_isopen) {
  387 			/*
  388 			 * Device disconnected before close and IR port is not
  389 			 * open. If IR port is open, context will be deleted by
  390 			 * ir_close.
  391 			 */
  392 			mutex_unlock(&context->ctx_lock);
  393 			free_imon_context(context);
  394 			return retval;
  395 		}
  396 	}
  397 
  398 	mutex_unlock(&context->ctx_lock);
  399 	return retval;
  400 }
  401 
  402 /**
  403  * Sends a packet to the device -- this function must be called
  404  * with context->ctx_lock held.
  405  */
  406 static int send_packet(struct imon_context *context)
  407 {
  408 	unsigned int pipe;
  409 	int interval = 0;
  410 	int retval = 0;
  411 
  412 	/* Check if we need to use control or interrupt urb */
  413 	pipe = usb_sndintpipe(context->usbdev,
  414 			      context->tx_endpoint->bEndpointAddress);
  415 	interval = context->tx_endpoint->bInterval;
  416 
  417 	usb_fill_int_urb(context->tx_urb, context->usbdev, pipe,
  418 			 context->usb_tx_buf,
  419 			 sizeof(context->usb_tx_buf),
  420 			 usb_tx_callback, context, interval);
  421 
  422 	context->tx_urb->actual_length = 0;
  423 
  424 	init_completion(&context->tx.finished);
  425 	atomic_set(&context->tx.busy, 1);
  426 
  427 	retval = usb_submit_urb(context->tx_urb, GFP_KERNEL);
  428 	if (retval) {
  429 		atomic_set(&context->tx.busy, 0);
  430 		dev_err(&context->usbdev->dev, "error submitting urb(%d)\n",
  431 			retval);
  432 	} else {
  433 		/* Wait for transmission to complete (or abort) */
  434 		mutex_unlock(&context->ctx_lock);
  435 		retval = wait_for_completion_interruptible(
  436 				&context->tx.finished);
  437 		if (retval)
  438 			dev_err(&context->usbdev->dev,
  439 				"%s: task interrupted\n", __func__);
  440 		mutex_lock(&context->ctx_lock);
  441 
  442 		retval = context->tx.status;
  443 		if (retval)
  444 			dev_err(&context->usbdev->dev,
  445 				"packet tx failed (%d)\n", retval);
  446 	}
  447 
  448 	return retval;
  449 }
  450 
  451 /**
  452  * Writes data to the VFD.  The iMON VFD is 2x16 characters
  453  * and requires data in 5 consecutive USB interrupt packets,
  454  * each packet but the last carrying 7 bytes.
  455  *
  456  * I don't know if the VFD board supports features such as
  457  * scrolling, clearing rows, blanking, etc. so at
  458  * the caller must provide a full screen of data.  If fewer
  459  * than 32 bytes are provided spaces will be appended to
  460  * generate a full screen.
  461  */
  462 static ssize_t vfd_write(struct file *file, const char __user *buf,
  463 			 size_t n_bytes, loff_t *pos)
  464 {
  465 	int i;
  466 	int offset;
  467 	int seq;
  468 	int retval = 0;
  469 	struct imon_context *context;
  470 	const unsigned char vfd_packet6[] = {
  471 		0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF };
  472 	int *data_buf = NULL;
  473 
  474 	context = file->private_data;
  475 	if (!context) {
  476 		pr_err("%s: no context for device\n", __func__);
  477 		return -ENODEV;
  478 	}
  479 
  480 	mutex_lock(&context->ctx_lock);
  481 
  482 	if (!context->dev_present) {
  483 		dev_err(&context->usbdev->dev,
  484 			"%s: no iMON device present\n", __func__);
  485 		retval = -ENODEV;
  486 		goto exit;
  487 	}
  488 
  489 	if (n_bytes <= 0 || n_bytes > IMON_DATA_BUF_SZ - 3) {
  490 		dev_err(&context->usbdev->dev,
  491 			"%s: invalid payload size\n", __func__);
  492 		retval = -EINVAL;
  493 		goto exit;
  494 	}
  495 
  496 	data_buf = memdup_user(buf, n_bytes);
  497 	if (IS_ERR(data_buf)) {
  498 		retval = PTR_ERR(data_buf);
  499 		data_buf = NULL;
  500 		goto exit;
  501 	}
  502 
  503 	memcpy(context->tx.data_buf, data_buf, n_bytes);
  504 
  505 	/* Pad with spaces */
  506 	for (i = n_bytes; i < IMON_DATA_BUF_SZ - 3; ++i)
  507 		context->tx.data_buf[i] = ' ';
  508 
  509 	for (i = IMON_DATA_BUF_SZ - 3; i < IMON_DATA_BUF_SZ; ++i)
  510 		context->tx.data_buf[i] = 0xFF;
  511 
  512 	offset = 0;
  513 	seq = 0;
  514 
  515 	do {
  516 		memcpy(context->usb_tx_buf, context->tx.data_buf + offset, 7);
  517 		context->usb_tx_buf[7] = (unsigned char) seq;
  518 
  519 		retval = send_packet(context);
  520 		if (retval) {
  521 			dev_err(&context->usbdev->dev,
  522 				"send packet failed for packet #%d\n",
  523 				seq / 2);
  524 			goto exit;
  525 		} else {
  526 			seq += 2;
  527 			offset += 7;
  528 		}
  529 
  530 	} while (offset < IMON_DATA_BUF_SZ);
  531 
  532 	if (context->vfd_proto_6p) {
  533 		/* Send packet #6 */
  534 		memcpy(context->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6));
  535 		context->usb_tx_buf[7] = (unsigned char) seq;
  536 		retval = send_packet(context);
  537 		if (retval)
  538 			dev_err(&context->usbdev->dev,
  539 				"send packet failed for packet #%d\n",
  540 				seq / 2);
  541 	}
  542 
  543 exit:
  544 	mutex_unlock(&context->ctx_lock);
  545 	kfree(data_buf);
  546 
  547 	return (!retval) ? n_bytes : retval;
  548 }
  549 
  550 /**
  551  * Callback function for USB core API: transmit data
  552  */
  553 static void usb_tx_callback(struct urb *urb)
  554 {
  555 	struct imon_context *context;
  556 
  557 	if (!urb)
  558 		return;
  559 	context = (struct imon_context *)urb->context;
  560 	if (!context)
  561 		return;
  562 
  563 	context->tx.status = urb->status;
  564 
  565 	/* notify waiters that write has finished */
  566 	atomic_set(&context->tx.busy, 0);
  567 	complete(&context->tx.finished);
  568 }
  569 
  570 /**
  571  * Called by lirc_dev when the application opens /dev/lirc
  572  */
  573 static int ir_open(void *data)
  574 {
  575 	struct imon_context *context;
  576 
  577 	/* prevent races with disconnect */
  578 	mutex_lock(&driver_lock);
  579 
  580 	context = data;
  581 
  582 	/* initial IR protocol decode variables */
  583 	context->rx.count = 0;
  584 	context->rx.initial_space = 1;
  585 	context->rx.prev_bit = 0;
  586 
  587 	context->ir_isopen = 1;
  588 	dev_info(context->driver->dev, "IR port opened\n");
  589 
  590 	mutex_unlock(&driver_lock);
  591 	return 0;
  592 }
  593 
  594 /**
  595  * Called by lirc_dev when the application closes /dev/lirc
  596  */
  597 static void ir_close(void *data)
  598 {
  599 	struct imon_context *context;
  600 
  601 	context = data;
  602 	if (!context) {
  603 		pr_err("%s: no context for device\n", __func__);
  604 		return;
  605 	}
  606 
  607 	mutex_lock(&context->ctx_lock);
  608 
  609 	context->ir_isopen = 0;
  610 	dev_info(context->driver->dev, "IR port closed\n");
  611 
  612 	if (!context->dev_present) {
  613 		/*
  614 		 * Device disconnected while IR port was still open. Driver
  615 		 * was not deregistered at disconnect time, so do it now.
  616 		 */
  617 		deregister_from_lirc(context);
  618 
  619 		if (!context->display_isopen) {
  620 			mutex_unlock(&context->ctx_lock);
  621 			free_imon_context(context);
  622 			return;
  623 		}
  624 		/*
  625 		 * If display port is open, context will be deleted by
  626 		 * display_close
  627 		 */
  628 	}
  629 
  630 	mutex_unlock(&context->ctx_lock);
  631 }
  632 
  633 /**
  634  * Convert bit count to time duration (in us) and submit
  635  * the value to lirc_dev.
  636  */
  637 static void submit_data(struct imon_context *context)
  638 {
  639 	unsigned char buf[4];
  640 	int value = context->rx.count;
  641 	int i;
  642 
  643 	dev_dbg(context->driver->dev, "submitting data to LIRC\n");
  644 
  645 	value *= BIT_DURATION;
  646 	value &= PULSE_MASK;
  647 	if (context->rx.prev_bit)
  648 		value |= PULSE_BIT;
  649 
  650 	for (i = 0; i < 4; ++i)
  651 		buf[i] = value>>(i*8);
  652 
  653 	lirc_buffer_write(context->driver->rbuf, buf);
  654 	wake_up(&context->driver->rbuf->wait_poll);
  655 }
  656 
  657 /**
  658  * Process the incoming packet
  659  */
  660 static void imon_incoming_packet(struct imon_context *context,
  661 				 struct urb *urb, int intf)
  662 {
  663 	int len = urb->actual_length;
  664 	unsigned char *buf = urb->transfer_buffer;
  665 	struct device *dev = context->driver->dev;
  666 	int octet, bit;
  667 	unsigned char mask;
  668 
  669 	/*
  670 	 * just bail out if no listening IR client
  671 	 */
  672 	if (!context->ir_isopen)
  673 		return;
  674 
  675 	if (len != 8) {
  676 		dev_warn(dev, "imon %s: invalid incoming packet size (len = %d, intf%d)\n",
  677 			__func__, len, intf);
  678 		return;
  679 	}
  680 
  681 	if (debug)
  682 		dev_info(dev, "raw packet: %*ph\n", len, buf);
  683 	/*
  684 	 * Translate received data to pulse and space lengths.
  685 	 * Received data is active low, i.e. pulses are 0 and
  686 	 * spaces are 1.
  687 	 *
  688 	 * My original algorithm was essentially similar to
  689 	 * Changwoo Ryu's with the exception that he switched
  690 	 * the incoming bits to active high and also fed an
  691 	 * initial space to LIRC at the start of a new sequence
  692 	 * if the previous bit was a pulse.
  693 	 *
  694 	 * I've decided to adopt his algorithm.
  695 	 */
  696 
  697 	if (buf[7] == 1 && context->rx.initial_space) {
  698 		/* LIRC requires a leading space */
  699 		context->rx.prev_bit = 0;
  700 		context->rx.count = 4;
  701 		submit_data(context);
  702 		context->rx.count = 0;
  703 	}
  704 
  705 	for (octet = 0; octet < 5; ++octet) {
  706 		mask = 0x80;
  707 		for (bit = 0; bit < 8; ++bit) {
  708 			int curr_bit = !(buf[octet] & mask);
  709 
  710 			if (curr_bit != context->rx.prev_bit) {
  711 				if (context->rx.count) {
  712 					submit_data(context);
  713 					context->rx.count = 0;
  714 				}
  715 				context->rx.prev_bit = curr_bit;
  716 			}
  717 			++context->rx.count;
  718 			mask >>= 1;
  719 		}
  720 	}
  721 
  722 	if (buf[7] == 10) {
  723 		if (context->rx.count) {
  724 			submit_data(context);
  725 			context->rx.count = 0;
  726 		}
  727 		context->rx.initial_space = context->rx.prev_bit;
  728 	}
  729 }
  730 
  731 /**
  732  * Callback function for USB core API: receive data
  733  */
  734 static void usb_rx_callback(struct urb *urb)
  735 {
  736 	struct imon_context *context;
  737 	int intfnum = 0;
  738 
  739 	if (!urb)
  740 		return;
  741 
  742 	context = (struct imon_context *)urb->context;
  743 	if (!context)
  744 		return;
  745 
  746 	switch (urb->status) {
  747 	case -ENOENT:		/* usbcore unlink successful! */
  748 		return;
  749 
  750 	case 0:
  751 		imon_incoming_packet(context, urb, intfnum);
  752 		break;
  753 
  754 	default:
  755 		dev_warn(context->driver->dev, "imon %s: status(%d): ignored\n",
  756 			 __func__, urb->status);
  757 		break;
  758 	}
  759 
  760 	usb_submit_urb(context->rx_urb, GFP_ATOMIC);
  761 }
  762 
  763 /**
  764  * Callback function for USB core API: Probe
  765  */
  766 static int imon_probe(struct usb_interface *interface,
  767 		      const struct usb_device_id *id)
  768 {
  769 	struct usb_device *usbdev = NULL;
  770 	struct usb_host_interface *iface_desc = NULL;
  771 	struct usb_endpoint_descriptor *rx_endpoint = NULL;
  772 	struct usb_endpoint_descriptor *tx_endpoint = NULL;
  773 	struct urb *rx_urb = NULL;
  774 	struct urb *tx_urb = NULL;
  775 	struct lirc_driver *driver = NULL;
  776 	struct lirc_buffer *rbuf = NULL;
  777 	struct device *dev = &interface->dev;
  778 	int ifnum;
  779 	int lirc_minor = 0;
  780 	int num_endpts;
  781 	int retval = -ENOMEM;
  782 	int display_ep_found = 0;
  783 	int ir_ep_found = 0;
  784 	int vfd_proto_6p = 0;
  785 	struct imon_context *context = NULL;
  786 	int i;
  787 	u16 vendor, product;
  788 
  789 	/* prevent races probing devices w/multiple interfaces */
  790 	mutex_lock(&driver_lock);
  791 
  792 	context = kzalloc(sizeof(struct imon_context), GFP_KERNEL);
  793 	if (!context)
  794 		goto driver_unlock;
  795 
  796 	/*
  797 	 * Try to auto-detect the type of display if the user hasn't set
  798 	 * it by hand via the display_type modparam. Default is VFD.
  799 	 */
  800 	if (usb_match_id(interface, ir_only_list))
  801 		context->display = 0;
  802 	else
  803 		context->display = 1;
  804 
  805 	usbdev     = usb_get_dev(interface_to_usbdev(interface));
  806 	iface_desc = interface->cur_altsetting;
  807 	num_endpts = iface_desc->desc.bNumEndpoints;
  808 	ifnum      = iface_desc->desc.bInterfaceNumber;
  809 	vendor     = le16_to_cpu(usbdev->descriptor.idVendor);
  810 	product    = le16_to_cpu(usbdev->descriptor.idProduct);
  811 
  812 	dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n",
  813 		__func__, vendor, product, ifnum);
  814 
  815 	/*
  816 	 * Scan the endpoint list and set:
  817 	 *	first input endpoint = IR endpoint
  818 	 *	first output endpoint = display endpoint
  819 	 */
  820 	for (i = 0; i < num_endpts && !(ir_ep_found && display_ep_found); ++i) {
  821 		struct usb_endpoint_descriptor *ep;
  822 		int ep_dir;
  823 		int ep_type;
  824 
  825 		ep = &iface_desc->endpoint[i].desc;
  826 		ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
  827 		ep_type = usb_endpoint_type(ep);
  828 
  829 		if (!ir_ep_found &&
  830 			ep_dir == USB_DIR_IN &&
  831 			ep_type == USB_ENDPOINT_XFER_INT) {
  832 
  833 			rx_endpoint = ep;
  834 			ir_ep_found = 1;
  835 			dev_dbg(dev, "%s: found IR endpoint\n", __func__);
  836 
  837 		} else if (!display_ep_found && ep_dir == USB_DIR_OUT &&
  838 			   ep_type == USB_ENDPOINT_XFER_INT) {
  839 			tx_endpoint = ep;
  840 			display_ep_found = 1;
  841 			dev_dbg(dev, "%s: found display endpoint\n", __func__);
  842 		}
  843 	}
  844 
  845 	/*
  846 	 * Some iMON receivers have no display. Unfortunately, it seems
  847 	 * that SoundGraph recycles device IDs between devices both with
  848 	 * and without... :\
  849 	 */
  850 	if (context->display == 0) {
  851 		display_ep_found = 0;
  852 		dev_dbg(dev, "%s: device has no display\n", __func__);
  853 	}
  854 
  855 	/* Input endpoint is mandatory */
  856 	if (!ir_ep_found) {
  857 		dev_err(dev, "%s: no valid input (IR) endpoint found.\n",
  858 			__func__);
  859 		retval = -ENODEV;
  860 		goto free_context;
  861 	}
  862 
  863 	/* Determine if display requires 6 packets */
  864 	if (display_ep_found) {
  865 		if (usb_match_id(interface, vfd_proto_6p_list))
  866 			vfd_proto_6p = 1;
  867 
  868 		dev_dbg(dev, "%s: vfd_proto_6p: %d\n",
  869 			__func__, vfd_proto_6p);
  870 	}
  871 
  872 	driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
  873 	if (!driver)
  874 		goto free_context;
  875 
  876 	rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
  877 	if (!rbuf)
  878 		goto free_driver;
  879 
  880 	if (lirc_buffer_init(rbuf, BUF_CHUNK_SIZE, BUF_SIZE)) {
  881 		dev_err(dev, "%s: lirc_buffer_init failed\n", __func__);
  882 		goto free_rbuf;
  883 	}
  884 	rx_urb = usb_alloc_urb(0, GFP_KERNEL);
  885 	if (!rx_urb) {
  886 		dev_err(dev, "%s: usb_alloc_urb failed for IR urb\n", __func__);
  887 		goto free_lirc_buf;
  888 	}
  889 	tx_urb = usb_alloc_urb(0, GFP_KERNEL);
  890 	if (!tx_urb) {
  891 		dev_err(dev, "%s: usb_alloc_urb failed for display urb\n",
  892 		    __func__);
  893 		goto free_rx_urb;
  894 	}
  895 
  896 	mutex_init(&context->ctx_lock);
  897 	context->vfd_proto_6p = vfd_proto_6p;
  898 
  899 	strcpy(driver->name, MOD_NAME);
  900 	driver->minor = -1;
  901 	driver->code_length = BUF_CHUNK_SIZE * 8;
  902 	driver->sample_rate = 0;
  903 	driver->features = LIRC_CAN_REC_MODE2;
  904 	driver->data = context;
  905 	driver->rbuf = rbuf;
  906 	driver->set_use_inc = ir_open;
  907 	driver->set_use_dec = ir_close;
  908 	driver->dev = &interface->dev;
  909 	driver->owner = THIS_MODULE;
  910 
  911 	mutex_lock(&context->ctx_lock);
  912 
  913 	context->driver = driver;
  914 	/* start out in keyboard mode */
  915 
  916 	lirc_minor = lirc_register_driver(driver);
  917 	if (lirc_minor < 0) {
  918 		dev_err(dev, "%s: lirc_register_driver failed\n", __func__);
  919 		goto free_tx_urb;
  920 	}
  921 
  922 	dev_info(dev, "Registered iMON driver (lirc minor: %d)\n",
  923 			lirc_minor);
  924 
  925 	/* Needed while unregistering! */
  926 	driver->minor = lirc_minor;
  927 
  928 	context->usbdev = usbdev;
  929 	context->dev_present = 1;
  930 	context->rx_endpoint = rx_endpoint;
  931 	context->rx_urb = rx_urb;
  932 
  933 	/*
  934 	 * tx is used to send characters to lcd/vfd, associate RF
  935 	 * remotes, set IR protocol, and maybe more...
  936 	 */
  937 	context->tx_endpoint = tx_endpoint;
  938 	context->tx_urb = tx_urb;
  939 
  940 	if (display_ep_found)
  941 		context->display = 1;
  942 
  943 	usb_fill_int_urb(context->rx_urb, context->usbdev,
  944 		usb_rcvintpipe(context->usbdev,
  945 			context->rx_endpoint->bEndpointAddress),
  946 		context->usb_rx_buf, sizeof(context->usb_rx_buf),
  947 		usb_rx_callback, context,
  948 		context->rx_endpoint->bInterval);
  949 
  950 	retval = usb_submit_urb(context->rx_urb, GFP_KERNEL);
  951 	if (retval) {
  952 		dev_err(dev, "usb_submit_urb failed for intf0 (%d)\n", retval);
  953 		goto unregister_lirc;
  954 	}
  955 
  956 	usb_set_intfdata(interface, context);
  957 
  958 	if (context->display && ifnum == 0) {
  959 		dev_dbg(dev, "%s: Registering iMON display with sysfs\n",
  960 			__func__);
  961 
  962 		if (usb_register_dev(interface, &imon_class)) {
  963 			/* Not a fatal error, so ignore */
  964 			dev_info(dev, "%s: could not get a minor number for display\n",
  965 				 __func__);
  966 		}
  967 	}
  968 
  969 	dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n",
  970 		vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum);
  971 
  972 	/* Everything went fine. Just unlock and return retval (with is 0) */
  973 	goto driver_unlock;
  974 
  975 unregister_lirc:
  976 	lirc_unregister_driver(driver->minor);
  977 
  978 free_tx_urb:
  979 	usb_free_urb(tx_urb);
  980 
  981 free_rx_urb:
  982 	usb_free_urb(rx_urb);
  983 
  984 free_lirc_buf:
  985 	lirc_buffer_free(rbuf);
  986 
  987 free_rbuf:
  988 	kfree(rbuf);
  989 
  990 free_driver:
  991 	kfree(driver);
  992 free_context:
  993 	kfree(context);
  994 	context = NULL;
  995 
  996 driver_unlock:
  997 	mutex_unlock(&driver_lock);
  998 
  999 	return retval;
 1000 }
 1001 
 1002 /**
 1003  * Callback function for USB core API: disconnect
 1004  */
 1005 static void imon_disconnect(struct usb_interface *interface)
 1006 {
 1007 	struct imon_context *context;
 1008 	int ifnum;
 1009 
 1010 	/* prevent races with ir_open()/display_open() */
 1011 	mutex_lock(&driver_lock);
 1012 
 1013 	context = usb_get_intfdata(interface);
 1014 	ifnum = interface->cur_altsetting->desc.bInterfaceNumber;
 1015 
 1016 	mutex_lock(&context->ctx_lock);
 1017 
 1018 	usb_set_intfdata(interface, NULL);
 1019 
 1020 	/* Abort ongoing write */
 1021 	if (atomic_read(&context->tx.busy)) {
 1022 		usb_kill_urb(context->tx_urb);
 1023 		complete_all(&context->tx.finished);
 1024 	}
 1025 
 1026 	context->dev_present = 0;
 1027 	usb_kill_urb(context->rx_urb);
 1028 	if (context->display)
 1029 		usb_deregister_dev(interface, &imon_class);
 1030 
 1031 	if (!context->ir_isopen && !context->dev_present) {
 1032 		deregister_from_lirc(context);
 1033 		mutex_unlock(&context->ctx_lock);
 1034 		if (!context->display_isopen)
 1035 			free_imon_context(context);
 1036 	} else
 1037 		mutex_unlock(&context->ctx_lock);
 1038 
 1039 	mutex_unlock(&driver_lock);
 1040 
 1041 	dev_info(&interface->dev, "%s: iMON device (intf%d) disconnected\n",
 1042 		 __func__, ifnum);
 1043 }
 1044 
 1045 static int imon_suspend(struct usb_interface *intf, pm_message_t message)
 1046 {
 1047 	struct imon_context *context = usb_get_intfdata(intf);
 1048 
 1049 	usb_kill_urb(context->rx_urb);
 1050 
 1051 	return 0;
 1052 }
 1053 
 1054 static int imon_resume(struct usb_interface *intf)
 1055 {
 1056 	struct imon_context *context = usb_get_intfdata(intf);
 1057 
 1058 	usb_fill_int_urb(context->rx_urb, context->usbdev,
 1059 		usb_rcvintpipe(context->usbdev,
 1060 			context->rx_endpoint->bEndpointAddress),
 1061 		context->usb_rx_buf, sizeof(context->usb_rx_buf),
 1062 		usb_rx_callback, context,
 1063 		context->rx_endpoint->bInterval);
 1064 
 1065 	return usb_submit_urb(context->rx_urb, GFP_ATOMIC);
 1066 }
 1067 
 1068 module_usb_driver(imon_driver);
 1069 
 1070 
 1071 
 1072 
 1073 
 1074 /* LDV_COMMENT_BEGIN_MAIN */
 1075 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
 1076 
 1077 /*###########################################################################*/
 1078 
 1079 /*############## Driver Environment Generator 0.2 output ####################*/
 1080 
 1081 /*###########################################################################*/
 1082 
 1083 
 1084 
 1085 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
 1086 void ldv_check_final_state(void);
 1087 
 1088 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
 1089 void ldv_check_return_value(int res);
 1090 
 1091 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
 1092 void ldv_check_return_value_probe(int res);
 1093 
 1094 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
 1095 void ldv_initialize(void);
 1096 
 1097 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
 1098 void ldv_handler_precall(void);
 1099 
 1100 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
 1101 int nondet_int(void);
 1102 
 1103 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
 1104 int LDV_IN_INTERRUPT;
 1105 
 1106 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
 1107 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
 1108 
 1109 
 1110 
 1111 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
 1112 	/*============================= VARIABLE DECLARATION PART   =============================*/
 1113 	/** STRUCT: struct type: usb_driver, struct name: imon_driver **/
 1114 	/* content: static int imon_probe(struct usb_interface *interface, const struct usb_device_id *id)*/
 1115 	/* LDV_COMMENT_BEGIN_PREP */
 1116 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 1117 	#define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
 1118 	#define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
 1119 	#define MOD_NAME	"lirc_imon"
 1120 	#define MOD_VERSION	"0.8"
 1121 	#define DISPLAY_MINOR_BASE	144
 1122 	#define DEVICE_NAME	"lcd%d"
 1123 	#define BUF_CHUNK_SIZE	4
 1124 	#define BUF_SIZE	128
 1125 	#define BIT_DURATION	250	
 1126 	#define IMON_DATA_BUF_SZ	35
 1127 	/* LDV_COMMENT_END_PREP */
 1128 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "imon_probe" */
 1129 	struct usb_interface * var_group1;
 1130 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "imon_probe" */
 1131 	const struct usb_device_id * var_imon_probe_12_p1;
 1132 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "imon_probe" */
 1133 	static int res_imon_probe_12;
 1134 	/* content: static void imon_disconnect(struct usb_interface *interface)*/
 1135 	/* LDV_COMMENT_BEGIN_PREP */
 1136 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 1137 	#define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
 1138 	#define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
 1139 	#define MOD_NAME	"lirc_imon"
 1140 	#define MOD_VERSION	"0.8"
 1141 	#define DISPLAY_MINOR_BASE	144
 1142 	#define DEVICE_NAME	"lcd%d"
 1143 	#define BUF_CHUNK_SIZE	4
 1144 	#define BUF_SIZE	128
 1145 	#define BIT_DURATION	250	
 1146 	#define IMON_DATA_BUF_SZ	35
 1147 	/* LDV_COMMENT_END_PREP */
 1148 	/* content: static int imon_suspend(struct usb_interface *intf, pm_message_t message)*/
 1149 	/* LDV_COMMENT_BEGIN_PREP */
 1150 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 1151 	#define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
 1152 	#define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
 1153 	#define MOD_NAME	"lirc_imon"
 1154 	#define MOD_VERSION	"0.8"
 1155 	#define DISPLAY_MINOR_BASE	144
 1156 	#define DEVICE_NAME	"lcd%d"
 1157 	#define BUF_CHUNK_SIZE	4
 1158 	#define BUF_SIZE	128
 1159 	#define BIT_DURATION	250	
 1160 	#define IMON_DATA_BUF_SZ	35
 1161 	/* LDV_COMMENT_END_PREP */
 1162 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "imon_suspend" */
 1163 	pm_message_t  var_imon_suspend_14_p1;
 1164 	/* content: static int imon_resume(struct usb_interface *intf)*/
 1165 	/* LDV_COMMENT_BEGIN_PREP */
 1166 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 1167 	#define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
 1168 	#define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
 1169 	#define MOD_NAME	"lirc_imon"
 1170 	#define MOD_VERSION	"0.8"
 1171 	#define DISPLAY_MINOR_BASE	144
 1172 	#define DEVICE_NAME	"lcd%d"
 1173 	#define BUF_CHUNK_SIZE	4
 1174 	#define BUF_SIZE	128
 1175 	#define BIT_DURATION	250	
 1176 	#define IMON_DATA_BUF_SZ	35
 1177 	/* LDV_COMMENT_END_PREP */
 1178 
 1179 
 1180 
 1181 
 1182 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 1183 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 1184 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 1185 	LDV_IN_INTERRUPT=1;
 1186 
 1187 
 1188 
 1189 
 1190 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 1191 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 1192 	/*============================= FUNCTION CALL SECTION       =============================*/
 1193 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 1194 	ldv_initialize();
 1195 	int ldv_s_imon_driver_usb_driver = 0;
 1196 
 1197 
 1198 	while(  nondet_int()
 1199 		|| !(ldv_s_imon_driver_usb_driver == 0)
 1200 	) {
 1201 
 1202 		switch(nondet_int()) {
 1203 
 1204 			case 0: {
 1205 
 1206 				/** STRUCT: struct type: usb_driver, struct name: imon_driver **/
 1207 				if(ldv_s_imon_driver_usb_driver==0) {
 1208 
 1209 				/* content: static int imon_probe(struct usb_interface *interface, const struct usb_device_id *id)*/
 1210 				/* LDV_COMMENT_BEGIN_PREP */
 1211 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 1212 				#define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
 1213 				#define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
 1214 				#define MOD_NAME	"lirc_imon"
 1215 				#define MOD_VERSION	"0.8"
 1216 				#define DISPLAY_MINOR_BASE	144
 1217 				#define DEVICE_NAME	"lcd%d"
 1218 				#define BUF_CHUNK_SIZE	4
 1219 				#define BUF_SIZE	128
 1220 				#define BIT_DURATION	250	
 1221 				#define IMON_DATA_BUF_SZ	35
 1222 				/* LDV_COMMENT_END_PREP */
 1223 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "imon_driver". Standart function test for correct return result. */
 1224 				res_imon_probe_12 = imon_probe( var_group1, var_imon_probe_12_p1);
 1225 				 ldv_check_return_value(res_imon_probe_12);
 1226 				 ldv_check_return_value_probe(res_imon_probe_12);
 1227 				 if(res_imon_probe_12) 
 1228 					goto ldv_module_exit;
 1229 				ldv_s_imon_driver_usb_driver++;
 1230 
 1231 				}
 1232 
 1233 			}
 1234 
 1235 			break;
 1236 			case 1: {
 1237 
 1238 				/** STRUCT: struct type: usb_driver, struct name: imon_driver **/
 1239 				if(ldv_s_imon_driver_usb_driver==1) {
 1240 
 1241 				/* content: static int imon_suspend(struct usb_interface *intf, pm_message_t message)*/
 1242 				/* LDV_COMMENT_BEGIN_PREP */
 1243 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 1244 				#define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
 1245 				#define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
 1246 				#define MOD_NAME	"lirc_imon"
 1247 				#define MOD_VERSION	"0.8"
 1248 				#define DISPLAY_MINOR_BASE	144
 1249 				#define DEVICE_NAME	"lcd%d"
 1250 				#define BUF_CHUNK_SIZE	4
 1251 				#define BUF_SIZE	128
 1252 				#define BIT_DURATION	250	
 1253 				#define IMON_DATA_BUF_SZ	35
 1254 				/* LDV_COMMENT_END_PREP */
 1255 				/* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "imon_driver" */
 1256 				ldv_handler_precall();
 1257 				imon_suspend( var_group1, var_imon_suspend_14_p1);
 1258 				ldv_s_imon_driver_usb_driver++;
 1259 
 1260 				}
 1261 
 1262 			}
 1263 
 1264 			break;
 1265 			case 2: {
 1266 
 1267 				/** STRUCT: struct type: usb_driver, struct name: imon_driver **/
 1268 				if(ldv_s_imon_driver_usb_driver==2) {
 1269 
 1270 				/* content: static int imon_resume(struct usb_interface *intf)*/
 1271 				/* LDV_COMMENT_BEGIN_PREP */
 1272 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 1273 				#define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
 1274 				#define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
 1275 				#define MOD_NAME	"lirc_imon"
 1276 				#define MOD_VERSION	"0.8"
 1277 				#define DISPLAY_MINOR_BASE	144
 1278 				#define DEVICE_NAME	"lcd%d"
 1279 				#define BUF_CHUNK_SIZE	4
 1280 				#define BUF_SIZE	128
 1281 				#define BIT_DURATION	250	
 1282 				#define IMON_DATA_BUF_SZ	35
 1283 				/* LDV_COMMENT_END_PREP */
 1284 				/* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "imon_driver" */
 1285 				ldv_handler_precall();
 1286 				imon_resume( var_group1);
 1287 				ldv_s_imon_driver_usb_driver++;
 1288 
 1289 				}
 1290 
 1291 			}
 1292 
 1293 			break;
 1294 			case 3: {
 1295 
 1296 				/** STRUCT: struct type: usb_driver, struct name: imon_driver **/
 1297 				if(ldv_s_imon_driver_usb_driver==3) {
 1298 
 1299 				/* content: static void imon_disconnect(struct usb_interface *interface)*/
 1300 				/* LDV_COMMENT_BEGIN_PREP */
 1301 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 1302 				#define MOD_AUTHOR	"Venky Raju <dev@venky.ws>"
 1303 				#define MOD_DESC	"Driver for SoundGraph iMON MultiMedia IR/Display"
 1304 				#define MOD_NAME	"lirc_imon"
 1305 				#define MOD_VERSION	"0.8"
 1306 				#define DISPLAY_MINOR_BASE	144
 1307 				#define DEVICE_NAME	"lcd%d"
 1308 				#define BUF_CHUNK_SIZE	4
 1309 				#define BUF_SIZE	128
 1310 				#define BIT_DURATION	250	
 1311 				#define IMON_DATA_BUF_SZ	35
 1312 				/* LDV_COMMENT_END_PREP */
 1313 				/* LDV_COMMENT_FUNCTION_CALL Function from field "disconnect" from driver structure with callbacks "imon_driver" */
 1314 				ldv_handler_precall();
 1315 				imon_disconnect( var_group1);
 1316 				ldv_s_imon_driver_usb_driver=0;
 1317 
 1318 				}
 1319 
 1320 			}
 1321 
 1322 			break;
 1323 			default: break;
 1324 
 1325 		}
 1326 
 1327 	}
 1328 
 1329 	ldv_module_exit: 
 1330 
 1331 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 1332 	ldv_final: ldv_check_final_state();
 1333 
 1334 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 1335 	return;
 1336 
 1337 }
 1338 #endif
 1339 
 1340 /* LDV_COMMENT_END_MAIN */
 1341 
 1342 #line 84 "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.3-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.3-rc1.tar.xz/csd_deg_dscv/7460/dscv_tempdir/dscv/ri/32_7a/drivers/staging/media/lirc/lirc_imon.o.c.prepared"                 1 
    2 
    3 #include <linux/mutex.h>
    4 #include <linux/errno.h>
    5 #include <verifier/rcv.h>
    6 #include <kernel-model/ERR.inc>
    7 
    8 static int ldv_mutex_ctx_lock_of_imon_context = 1;
    9 
   10 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_ctx_lock_of_imon_context') Check that mutex 'ctx_lock_of_imon_context' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */
   11 int ldv_mutex_lock_interruptible_ctx_lock_of_imon_context(struct mutex *lock)
   12 {
   13   int nondetermined;
   14 
   15   /* LDV_COMMENT_ASSERT Mutex 'ctx_lock_of_imon_context' must be unlocked */
   16   ldv_assert(ldv_mutex_ctx_lock_of_imon_context == 1);
   17 
   18   /* LDV_COMMENT_OTHER Construct nondetermined result*/
   19   nondetermined = ldv_undef_int();
   20 
   21   /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'ctx_lock_of_imon_context' */
   22   if (nondetermined)
   23   {
   24     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'ctx_lock_of_imon_context' */
   25     ldv_mutex_ctx_lock_of_imon_context = 2;
   26     /* LDV_COMMENT_RETURN Finish with success */
   27     return 0;
   28   }
   29   else
   30   {
   31     /* LDV_COMMENT_RETURN Finish with fail. Mutex 'ctx_lock_of_imon_context' is keeped unlocked */
   32     return -EINTR;
   33   }
   34 }
   35 
   36 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_ctx_lock_of_imon_context') Check that mutex 'ctx_lock_of_imon_context' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/
   37 int ldv_mutex_lock_killable_ctx_lock_of_imon_context(struct mutex *lock)
   38 {
   39   int nondetermined;
   40 
   41   /* LDV_COMMENT_ASSERT Mutex 'ctx_lock_of_imon_context' must be unlocked */
   42   ldv_assert(ldv_mutex_ctx_lock_of_imon_context == 1);
   43 
   44   /* LDV_COMMENT_OTHER Construct nondetermined result */
   45   nondetermined = ldv_undef_int();
   46 
   47   /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'ctx_lock_of_imon_context' */
   48   if (nondetermined)
   49   {
   50     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'ctx_lock_of_imon_context' */
   51     ldv_mutex_ctx_lock_of_imon_context = 2;
   52     /* LDV_COMMENT_RETURN Finish with success*/
   53     return 0;
   54   }
   55   else
   56   {
   57     /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'ctx_lock_of_imon_context' is keeped unlocked */
   58     return -EINTR;
   59   }
   60 }
   61 
   62 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_ctx_lock_of_imon_context') Check that mutex 'ctx_lock_of_imon_context' was not locked and lock it */
   63 void ldv_mutex_lock_ctx_lock_of_imon_context(struct mutex *lock)
   64 {
   65   /* LDV_COMMENT_ASSERT Mutex 'ctx_lock_of_imon_context' must be unlocked */
   66   ldv_assert(ldv_mutex_ctx_lock_of_imon_context == 1);
   67   /* LDV_COMMENT_CHANGE_STATE Lock mutex 'ctx_lock_of_imon_context' */
   68   ldv_mutex_ctx_lock_of_imon_context = 2;
   69 }
   70 
   71 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_ctx_lock_of_imon_context') Check that mutex 'ctx_lock_of_imon_context' was not locked and nondeterministically lock it. Return 0 on fails */
   72 int ldv_mutex_trylock_ctx_lock_of_imon_context(struct mutex *lock)
   73 {
   74   int is_mutex_held_by_another_thread;
   75 
   76   /* LDV_COMMENT_ASSERT It may be an error if mutex 'ctx_lock_of_imon_context' is locked at this point */
   77   ldv_assert(ldv_mutex_ctx_lock_of_imon_context == 1);
   78 
   79   /* LDV_COMMENT_OTHER Construct nondetermined result */
   80   is_mutex_held_by_another_thread = ldv_undef_int();
   81 
   82   /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'ctx_lock_of_imon_context' */
   83   if (is_mutex_held_by_another_thread)
   84   {
   85     /* LDV_COMMENT_RETURN Finish with fail */
   86     return 0;
   87   }
   88   else
   89   {
   90     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'ctx_lock_of_imon_context' */
   91     ldv_mutex_ctx_lock_of_imon_context = 2;
   92     /* LDV_COMMENT_RETURN Finish with success */
   93     return 1;
   94   }
   95 }
   96 
   97 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_ctx_lock_of_imon_context') Lock mutex 'ctx_lock_of_imon_context' if atomic decrement result is zero */
   98 int ldv_atomic_dec_and_mutex_lock_ctx_lock_of_imon_context(atomic_t *cnt, struct mutex *lock)
   99 {
  100   int atomic_value_after_dec;
  101 
  102   /* LDV_COMMENT_ASSERT Mutex 'ctx_lock_of_imon_context' must be unlocked (since we may lock it in this function) */
  103   ldv_assert(ldv_mutex_ctx_lock_of_imon_context == 1);
  104 
  105   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
  106   atomic_value_after_dec = ldv_undef_int();
  107 
  108   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
  109   if (atomic_value_after_dec == 0)
  110   {
  111     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'ctx_lock_of_imon_context', as atomic has decremented to zero */
  112     ldv_mutex_ctx_lock_of_imon_context = 2;
  113     /* LDV_COMMENT_RETURN Return 1 with locked mutex 'ctx_lock_of_imon_context' */
  114     return 1;
  115   }
  116 
  117   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'ctx_lock_of_imon_context' */
  118   return 0;
  119 }
  120 
  121 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */
  122 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_ctx_lock_of_imon_context') Check whether mutex 'ctx_lock_of_imon_context' was locked */
  123 int ldv_mutex_is_locked_ctx_lock_of_imon_context(struct mutex *lock)
  124 {
  125   int nondetermined;
  126 
  127   if(ldv_mutex_ctx_lock_of_imon_context == 1)
  128   {
  129     /* LDV_COMMENT_OTHER Construct nondetermined result */
  130     nondetermined = ldv_undef_int();
  131 
  132     /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'ctx_lock_of_imon_context' was locked */
  133     if(nondetermined)
  134     {
  135       /* LDV_COMMENT_RETURN Mutex 'ctx_lock_of_imon_context' was unlocked */
  136       return 0;
  137     }
  138     else
  139     {
  140       /* LDV_COMMENT_RETURN Mutex 'ctx_lock_of_imon_context' was locked */
  141       return 1;
  142     }
  143   }
  144   else
  145   {
  146     /* LDV_COMMENT_RETURN Mutex 'ctx_lock_of_imon_context' was locked */
  147     return 1;
  148   }
  149 }
  150 
  151 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_ctx_lock_of_imon_context') Check that mutex 'ctx_lock_of_imon_context' was locked and unlock it */
  152 void ldv_mutex_unlock_ctx_lock_of_imon_context(struct mutex *lock)
  153 {
  154   /* LDV_COMMENT_ASSERT Mutex 'ctx_lock_of_imon_context' must be locked */
  155   ldv_assert(ldv_mutex_ctx_lock_of_imon_context == 2);
  156   /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'ctx_lock_of_imon_context' */
  157   ldv_mutex_ctx_lock_of_imon_context = 1;
  158 }
  159 
  160 
  161 
  162 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device') Acquires the usb lock and checks for double usb lock*/
  163 void ldv_usb_lock_device_ctx_lock_of_imon_context(void)
  164 {
  165   /* LDV_COMMENT_CHANGE_STATE Lock usb_lock 'ctx_lock_of_imon_context' */
  166   ldv_mutex_lock_ctx_lock_of_imon_context(NULL);
  167 }
  168 
  169 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_trylock_device') Tries to acquire the usb lock and returns 1 if successful*/
  170 int ldv_usb_trylock_device_ctx_lock_of_imon_context(void)
  171 {
  172   return ldv_mutex_trylock_ctx_lock_of_imon_context(NULL);
  173 }
  174 
  175 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device_for_reset') Tries to acquire the usb lock and returns 0 if successful*/
  176 int ldv_usb_lock_device_for_reset_ctx_lock_of_imon_context(void)
  177 {
  178   if(ldv_undef_int()) {
  179     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'ctx_lock_of_imon_context' */
  180     ldv_mutex_lock_ctx_lock_of_imon_context(NULL);
  181     /* LDV_COMMENT_RETURN Finish with success */
  182     return 0;
  183   } else
  184   /* LDV_COMMENT_RETURN Usb lock is not acquired*/
  185   return ldv_undef_int_negative();
  186 }
  187 
  188 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_unlock_device') Releases the usb lock and checks that usb lock was acquired before*/
  189 void ldv_usb_unlock_device_ctx_lock_of_imon_context(void) {
  190   /* LDV_COMMENT_CHANGE_STATE Unlock usb_lock 'ctx_lock_of_imon_context' */
  191   ldv_mutex_unlock_ctx_lock_of_imon_context(NULL);
  192 }
  193 
  194 static int ldv_mutex_driver_lock = 1;
  195 
  196 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_driver_lock') Check that mutex 'driver_lock' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */
  197 int ldv_mutex_lock_interruptible_driver_lock(struct mutex *lock)
  198 {
  199   int nondetermined;
  200 
  201   /* LDV_COMMENT_ASSERT Mutex 'driver_lock' must be unlocked */
  202   ldv_assert(ldv_mutex_driver_lock == 1);
  203 
  204   /* LDV_COMMENT_OTHER Construct nondetermined result*/
  205   nondetermined = ldv_undef_int();
  206 
  207   /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'driver_lock' */
  208   if (nondetermined)
  209   {
  210     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'driver_lock' */
  211     ldv_mutex_driver_lock = 2;
  212     /* LDV_COMMENT_RETURN Finish with success */
  213     return 0;
  214   }
  215   else
  216   {
  217     /* LDV_COMMENT_RETURN Finish with fail. Mutex 'driver_lock' is keeped unlocked */
  218     return -EINTR;
  219   }
  220 }
  221 
  222 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_driver_lock') Check that mutex 'driver_lock' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/
  223 int ldv_mutex_lock_killable_driver_lock(struct mutex *lock)
  224 {
  225   int nondetermined;
  226 
  227   /* LDV_COMMENT_ASSERT Mutex 'driver_lock' must be unlocked */
  228   ldv_assert(ldv_mutex_driver_lock == 1);
  229 
  230   /* LDV_COMMENT_OTHER Construct nondetermined result */
  231   nondetermined = ldv_undef_int();
  232 
  233   /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'driver_lock' */
  234   if (nondetermined)
  235   {
  236     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'driver_lock' */
  237     ldv_mutex_driver_lock = 2;
  238     /* LDV_COMMENT_RETURN Finish with success*/
  239     return 0;
  240   }
  241   else
  242   {
  243     /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'driver_lock' is keeped unlocked */
  244     return -EINTR;
  245   }
  246 }
  247 
  248 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_driver_lock') Check that mutex 'driver_lock' was not locked and lock it */
  249 void ldv_mutex_lock_driver_lock(struct mutex *lock)
  250 {
  251   /* LDV_COMMENT_ASSERT Mutex 'driver_lock' must be unlocked */
  252   ldv_assert(ldv_mutex_driver_lock == 1);
  253   /* LDV_COMMENT_CHANGE_STATE Lock mutex 'driver_lock' */
  254   ldv_mutex_driver_lock = 2;
  255 }
  256 
  257 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_driver_lock') Check that mutex 'driver_lock' was not locked and nondeterministically lock it. Return 0 on fails */
  258 int ldv_mutex_trylock_driver_lock(struct mutex *lock)
  259 {
  260   int is_mutex_held_by_another_thread;
  261 
  262   /* LDV_COMMENT_ASSERT It may be an error if mutex 'driver_lock' is locked at this point */
  263   ldv_assert(ldv_mutex_driver_lock == 1);
  264 
  265   /* LDV_COMMENT_OTHER Construct nondetermined result */
  266   is_mutex_held_by_another_thread = ldv_undef_int();
  267 
  268   /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'driver_lock' */
  269   if (is_mutex_held_by_another_thread)
  270   {
  271     /* LDV_COMMENT_RETURN Finish with fail */
  272     return 0;
  273   }
  274   else
  275   {
  276     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'driver_lock' */
  277     ldv_mutex_driver_lock = 2;
  278     /* LDV_COMMENT_RETURN Finish with success */
  279     return 1;
  280   }
  281 }
  282 
  283 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_driver_lock') Lock mutex 'driver_lock' if atomic decrement result is zero */
  284 int ldv_atomic_dec_and_mutex_lock_driver_lock(atomic_t *cnt, struct mutex *lock)
  285 {
  286   int atomic_value_after_dec;
  287 
  288   /* LDV_COMMENT_ASSERT Mutex 'driver_lock' must be unlocked (since we may lock it in this function) */
  289   ldv_assert(ldv_mutex_driver_lock == 1);
  290 
  291   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
  292   atomic_value_after_dec = ldv_undef_int();
  293 
  294   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
  295   if (atomic_value_after_dec == 0)
  296   {
  297     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'driver_lock', as atomic has decremented to zero */
  298     ldv_mutex_driver_lock = 2;
  299     /* LDV_COMMENT_RETURN Return 1 with locked mutex 'driver_lock' */
  300     return 1;
  301   }
  302 
  303   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'driver_lock' */
  304   return 0;
  305 }
  306 
  307 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */
  308 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_driver_lock') Check whether mutex 'driver_lock' was locked */
  309 int ldv_mutex_is_locked_driver_lock(struct mutex *lock)
  310 {
  311   int nondetermined;
  312 
  313   if(ldv_mutex_driver_lock == 1)
  314   {
  315     /* LDV_COMMENT_OTHER Construct nondetermined result */
  316     nondetermined = ldv_undef_int();
  317 
  318     /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'driver_lock' was locked */
  319     if(nondetermined)
  320     {
  321       /* LDV_COMMENT_RETURN Mutex 'driver_lock' was unlocked */
  322       return 0;
  323     }
  324     else
  325     {
  326       /* LDV_COMMENT_RETURN Mutex 'driver_lock' was locked */
  327       return 1;
  328     }
  329   }
  330   else
  331   {
  332     /* LDV_COMMENT_RETURN Mutex 'driver_lock' was locked */
  333     return 1;
  334   }
  335 }
  336 
  337 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_driver_lock') Check that mutex 'driver_lock' was locked and unlock it */
  338 void ldv_mutex_unlock_driver_lock(struct mutex *lock)
  339 {
  340   /* LDV_COMMENT_ASSERT Mutex 'driver_lock' must be locked */
  341   ldv_assert(ldv_mutex_driver_lock == 2);
  342   /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'driver_lock' */
  343   ldv_mutex_driver_lock = 1;
  344 }
  345 
  346 
  347 
  348 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device') Acquires the usb lock and checks for double usb lock*/
  349 void ldv_usb_lock_device_driver_lock(void)
  350 {
  351   /* LDV_COMMENT_CHANGE_STATE Lock usb_lock 'driver_lock' */
  352   ldv_mutex_lock_driver_lock(NULL);
  353 }
  354 
  355 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_trylock_device') Tries to acquire the usb lock and returns 1 if successful*/
  356 int ldv_usb_trylock_device_driver_lock(void)
  357 {
  358   return ldv_mutex_trylock_driver_lock(NULL);
  359 }
  360 
  361 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device_for_reset') Tries to acquire the usb lock and returns 0 if successful*/
  362 int ldv_usb_lock_device_for_reset_driver_lock(void)
  363 {
  364   if(ldv_undef_int()) {
  365     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'driver_lock' */
  366     ldv_mutex_lock_driver_lock(NULL);
  367     /* LDV_COMMENT_RETURN Finish with success */
  368     return 0;
  369   } else
  370   /* LDV_COMMENT_RETURN Usb lock is not acquired*/
  371   return ldv_undef_int_negative();
  372 }
  373 
  374 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_unlock_device') Releases the usb lock and checks that usb lock was acquired before*/
  375 void ldv_usb_unlock_device_driver_lock(void) {
  376   /* LDV_COMMENT_CHANGE_STATE Unlock usb_lock 'driver_lock' */
  377   ldv_mutex_unlock_driver_lock(NULL);
  378 }
  379 
  380 static int ldv_mutex_i_mutex_of_inode = 1;
  381 
  382 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */
  383 int ldv_mutex_lock_interruptible_i_mutex_of_inode(struct mutex *lock)
  384 {
  385   int nondetermined;
  386 
  387   /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked */
  388   ldv_assert(ldv_mutex_i_mutex_of_inode == 1);
  389 
  390   /* LDV_COMMENT_OTHER Construct nondetermined result*/
  391   nondetermined = ldv_undef_int();
  392 
  393   /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'i_mutex_of_inode' */
  394   if (nondetermined)
  395   {
  396     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */
  397     ldv_mutex_i_mutex_of_inode = 2;
  398     /* LDV_COMMENT_RETURN Finish with success */
  399     return 0;
  400   }
  401   else
  402   {
  403     /* LDV_COMMENT_RETURN Finish with fail. Mutex 'i_mutex_of_inode' is keeped unlocked */
  404     return -EINTR;
  405   }
  406 }
  407 
  408 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/
  409 int ldv_mutex_lock_killable_i_mutex_of_inode(struct mutex *lock)
  410 {
  411   int nondetermined;
  412 
  413   /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked */
  414   ldv_assert(ldv_mutex_i_mutex_of_inode == 1);
  415 
  416   /* LDV_COMMENT_OTHER Construct nondetermined result */
  417   nondetermined = ldv_undef_int();
  418 
  419   /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'i_mutex_of_inode' */
  420   if (nondetermined)
  421   {
  422     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */
  423     ldv_mutex_i_mutex_of_inode = 2;
  424     /* LDV_COMMENT_RETURN Finish with success*/
  425     return 0;
  426   }
  427   else
  428   {
  429     /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'i_mutex_of_inode' is keeped unlocked */
  430     return -EINTR;
  431   }
  432 }
  433 
  434 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' was not locked and lock it */
  435 void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock)
  436 {
  437   /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked */
  438   ldv_assert(ldv_mutex_i_mutex_of_inode == 1);
  439   /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */
  440   ldv_mutex_i_mutex_of_inode = 2;
  441 }
  442 
  443 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' was not locked and nondeterministically lock it. Return 0 on fails */
  444 int ldv_mutex_trylock_i_mutex_of_inode(struct mutex *lock)
  445 {
  446   int is_mutex_held_by_another_thread;
  447 
  448   /* LDV_COMMENT_ASSERT It may be an error if mutex 'i_mutex_of_inode' is locked at this point */
  449   ldv_assert(ldv_mutex_i_mutex_of_inode == 1);
  450 
  451   /* LDV_COMMENT_OTHER Construct nondetermined result */
  452   is_mutex_held_by_another_thread = ldv_undef_int();
  453 
  454   /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'i_mutex_of_inode' */
  455   if (is_mutex_held_by_another_thread)
  456   {
  457     /* LDV_COMMENT_RETURN Finish with fail */
  458     return 0;
  459   }
  460   else
  461   {
  462     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */
  463     ldv_mutex_i_mutex_of_inode = 2;
  464     /* LDV_COMMENT_RETURN Finish with success */
  465     return 1;
  466   }
  467 }
  468 
  469 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode') Lock mutex 'i_mutex_of_inode' if atomic decrement result is zero */
  470 int ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode(atomic_t *cnt, struct mutex *lock)
  471 {
  472   int atomic_value_after_dec;
  473 
  474   /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked (since we may lock it in this function) */
  475   ldv_assert(ldv_mutex_i_mutex_of_inode == 1);
  476 
  477   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
  478   atomic_value_after_dec = ldv_undef_int();
  479 
  480   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
  481   if (atomic_value_after_dec == 0)
  482   {
  483     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode', as atomic has decremented to zero */
  484     ldv_mutex_i_mutex_of_inode = 2;
  485     /* LDV_COMMENT_RETURN Return 1 with locked mutex 'i_mutex_of_inode' */
  486     return 1;
  487   }
  488 
  489   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'i_mutex_of_inode' */
  490   return 0;
  491 }
  492 
  493 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */
  494 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_i_mutex_of_inode') Check whether mutex 'i_mutex_of_inode' was locked */
  495 int ldv_mutex_is_locked_i_mutex_of_inode(struct mutex *lock)
  496 {
  497   int nondetermined;
  498 
  499   if(ldv_mutex_i_mutex_of_inode == 1)
  500   {
  501     /* LDV_COMMENT_OTHER Construct nondetermined result */
  502     nondetermined = ldv_undef_int();
  503 
  504     /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'i_mutex_of_inode' was locked */
  505     if(nondetermined)
  506     {
  507       /* LDV_COMMENT_RETURN Mutex 'i_mutex_of_inode' was unlocked */
  508       return 0;
  509     }
  510     else
  511     {
  512       /* LDV_COMMENT_RETURN Mutex 'i_mutex_of_inode' was locked */
  513       return 1;
  514     }
  515   }
  516   else
  517   {
  518     /* LDV_COMMENT_RETURN Mutex 'i_mutex_of_inode' was locked */
  519     return 1;
  520   }
  521 }
  522 
  523 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_i_mutex_of_inode') Check that mutex 'i_mutex_of_inode' was locked and unlock it */
  524 void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock)
  525 {
  526   /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be locked */
  527   ldv_assert(ldv_mutex_i_mutex_of_inode == 2);
  528   /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'i_mutex_of_inode' */
  529   ldv_mutex_i_mutex_of_inode = 1;
  530 }
  531 
  532 
  533 
  534 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device') Acquires the usb lock and checks for double usb lock*/
  535 void ldv_usb_lock_device_i_mutex_of_inode(void)
  536 {
  537   /* LDV_COMMENT_CHANGE_STATE Lock usb_lock 'i_mutex_of_inode' */
  538   ldv_mutex_lock_i_mutex_of_inode(NULL);
  539 }
  540 
  541 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_trylock_device') Tries to acquire the usb lock and returns 1 if successful*/
  542 int ldv_usb_trylock_device_i_mutex_of_inode(void)
  543 {
  544   return ldv_mutex_trylock_i_mutex_of_inode(NULL);
  545 }
  546 
  547 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device_for_reset') Tries to acquire the usb lock and returns 0 if successful*/
  548 int ldv_usb_lock_device_for_reset_i_mutex_of_inode(void)
  549 {
  550   if(ldv_undef_int()) {
  551     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'i_mutex_of_inode' */
  552     ldv_mutex_lock_i_mutex_of_inode(NULL);
  553     /* LDV_COMMENT_RETURN Finish with success */
  554     return 0;
  555   } else
  556   /* LDV_COMMENT_RETURN Usb lock is not acquired*/
  557   return ldv_undef_int_negative();
  558 }
  559 
  560 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_unlock_device') Releases the usb lock and checks that usb lock was acquired before*/
  561 void ldv_usb_unlock_device_i_mutex_of_inode(void) {
  562   /* LDV_COMMENT_CHANGE_STATE Unlock usb_lock 'i_mutex_of_inode' */
  563   ldv_mutex_unlock_i_mutex_of_inode(NULL);
  564 }
  565 
  566 static int ldv_mutex_lock = 1;
  567 
  568 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_lock') Check that mutex 'lock' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */
  569 int ldv_mutex_lock_interruptible_lock(struct mutex *lock)
  570 {
  571   int nondetermined;
  572 
  573   /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked */
  574   ldv_assert(ldv_mutex_lock == 1);
  575 
  576   /* LDV_COMMENT_OTHER Construct nondetermined result*/
  577   nondetermined = ldv_undef_int();
  578 
  579   /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'lock' */
  580   if (nondetermined)
  581   {
  582     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */
  583     ldv_mutex_lock = 2;
  584     /* LDV_COMMENT_RETURN Finish with success */
  585     return 0;
  586   }
  587   else
  588   {
  589     /* LDV_COMMENT_RETURN Finish with fail. Mutex 'lock' is keeped unlocked */
  590     return -EINTR;
  591   }
  592 }
  593 
  594 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_lock') Check that mutex 'lock' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/
  595 int ldv_mutex_lock_killable_lock(struct mutex *lock)
  596 {
  597   int nondetermined;
  598 
  599   /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked */
  600   ldv_assert(ldv_mutex_lock == 1);
  601 
  602   /* LDV_COMMENT_OTHER Construct nondetermined result */
  603   nondetermined = ldv_undef_int();
  604 
  605   /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'lock' */
  606   if (nondetermined)
  607   {
  608     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */
  609     ldv_mutex_lock = 2;
  610     /* LDV_COMMENT_RETURN Finish with success*/
  611     return 0;
  612   }
  613   else
  614   {
  615     /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'lock' is keeped unlocked */
  616     return -EINTR;
  617   }
  618 }
  619 
  620 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_lock') Check that mutex 'lock' was not locked and lock it */
  621 void ldv_mutex_lock_lock(struct mutex *lock)
  622 {
  623   /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked */
  624   ldv_assert(ldv_mutex_lock == 1);
  625   /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */
  626   ldv_mutex_lock = 2;
  627 }
  628 
  629 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_lock') Check that mutex 'lock' was not locked and nondeterministically lock it. Return 0 on fails */
  630 int ldv_mutex_trylock_lock(struct mutex *lock)
  631 {
  632   int is_mutex_held_by_another_thread;
  633 
  634   /* LDV_COMMENT_ASSERT It may be an error if mutex 'lock' is locked at this point */
  635   ldv_assert(ldv_mutex_lock == 1);
  636 
  637   /* LDV_COMMENT_OTHER Construct nondetermined result */
  638   is_mutex_held_by_another_thread = ldv_undef_int();
  639 
  640   /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'lock' */
  641   if (is_mutex_held_by_another_thread)
  642   {
  643     /* LDV_COMMENT_RETURN Finish with fail */
  644     return 0;
  645   }
  646   else
  647   {
  648     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */
  649     ldv_mutex_lock = 2;
  650     /* LDV_COMMENT_RETURN Finish with success */
  651     return 1;
  652   }
  653 }
  654 
  655 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_lock') Lock mutex 'lock' if atomic decrement result is zero */
  656 int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt, struct mutex *lock)
  657 {
  658   int atomic_value_after_dec;
  659 
  660   /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked (since we may lock it in this function) */
  661   ldv_assert(ldv_mutex_lock == 1);
  662 
  663   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
  664   atomic_value_after_dec = ldv_undef_int();
  665 
  666   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
  667   if (atomic_value_after_dec == 0)
  668   {
  669     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock', as atomic has decremented to zero */
  670     ldv_mutex_lock = 2;
  671     /* LDV_COMMENT_RETURN Return 1 with locked mutex 'lock' */
  672     return 1;
  673   }
  674 
  675   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'lock' */
  676   return 0;
  677 }
  678 
  679 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */
  680 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_lock') Check whether mutex 'lock' was locked */
  681 int ldv_mutex_is_locked_lock(struct mutex *lock)
  682 {
  683   int nondetermined;
  684 
  685   if(ldv_mutex_lock == 1)
  686   {
  687     /* LDV_COMMENT_OTHER Construct nondetermined result */
  688     nondetermined = ldv_undef_int();
  689 
  690     /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'lock' was locked */
  691     if(nondetermined)
  692     {
  693       /* LDV_COMMENT_RETURN Mutex 'lock' was unlocked */
  694       return 0;
  695     }
  696     else
  697     {
  698       /* LDV_COMMENT_RETURN Mutex 'lock' was locked */
  699       return 1;
  700     }
  701   }
  702   else
  703   {
  704     /* LDV_COMMENT_RETURN Mutex 'lock' was locked */
  705     return 1;
  706   }
  707 }
  708 
  709 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_lock') Check that mutex 'lock' was locked and unlock it */
  710 void ldv_mutex_unlock_lock(struct mutex *lock)
  711 {
  712   /* LDV_COMMENT_ASSERT Mutex 'lock' must be locked */
  713   ldv_assert(ldv_mutex_lock == 2);
  714   /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'lock' */
  715   ldv_mutex_lock = 1;
  716 }
  717 
  718 
  719 
  720 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device') Acquires the usb lock and checks for double usb lock*/
  721 void ldv_usb_lock_device_lock(void)
  722 {
  723   /* LDV_COMMENT_CHANGE_STATE Lock usb_lock 'lock' */
  724   ldv_mutex_lock_lock(NULL);
  725 }
  726 
  727 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_trylock_device') Tries to acquire the usb lock and returns 1 if successful*/
  728 int ldv_usb_trylock_device_lock(void)
  729 {
  730   return ldv_mutex_trylock_lock(NULL);
  731 }
  732 
  733 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device_for_reset') Tries to acquire the usb lock and returns 0 if successful*/
  734 int ldv_usb_lock_device_for_reset_lock(void)
  735 {
  736   if(ldv_undef_int()) {
  737     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'lock' */
  738     ldv_mutex_lock_lock(NULL);
  739     /* LDV_COMMENT_RETURN Finish with success */
  740     return 0;
  741   } else
  742   /* LDV_COMMENT_RETURN Usb lock is not acquired*/
  743   return ldv_undef_int_negative();
  744 }
  745 
  746 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_unlock_device') Releases the usb lock and checks that usb lock was acquired before*/
  747 void ldv_usb_unlock_device_lock(void) {
  748   /* LDV_COMMENT_CHANGE_STATE Unlock usb_lock 'lock' */
  749   ldv_mutex_unlock_lock(NULL);
  750 }
  751 
  752 static int ldv_mutex_mutex_of_device = 1;
  753 
  754 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_interruptible_mutex_of_device') Check that mutex 'mutex_of_device' was unlocked and nondeterministically lock it. Return the corresponding error code on fails */
  755 int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock)
  756 {
  757   int nondetermined;
  758 
  759   /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked */
  760   ldv_assert(ldv_mutex_mutex_of_device == 1);
  761 
  762   /* LDV_COMMENT_OTHER Construct nondetermined result*/
  763   nondetermined = ldv_undef_int();
  764 
  765   /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_device' */
  766   if (nondetermined)
  767   {
  768     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */
  769     ldv_mutex_mutex_of_device = 2;
  770     /* LDV_COMMENT_RETURN Finish with success */
  771     return 0;
  772   }
  773   else
  774   {
  775     /* LDV_COMMENT_RETURN Finish with fail. Mutex 'mutex_of_device' is keeped unlocked */
  776     return -EINTR;
  777   }
  778 }
  779 
  780 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_killable_mutex_of_device') Check that mutex 'mutex_of_device' wasn unlocked and nondeterministically lock it. Return the corresponding error code on fails*/
  781 int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock)
  782 {
  783   int nondetermined;
  784 
  785   /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked */
  786   ldv_assert(ldv_mutex_mutex_of_device == 1);
  787 
  788   /* LDV_COMMENT_OTHER Construct nondetermined result */
  789   nondetermined = ldv_undef_int();
  790 
  791   /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_device' */
  792   if (nondetermined)
  793   {
  794     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */
  795     ldv_mutex_mutex_of_device = 2;
  796     /* LDV_COMMENT_RETURN Finish with success*/
  797     return 0;
  798   }
  799   else
  800   {
  801     /* LDV_COMMENT_RETURN Finish with the fail. Mutex 'mutex_of_device' is keeped unlocked */
  802     return -EINTR;
  803   }
  804 }
  805 
  806 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_lock_mutex_of_device') Check that mutex 'mutex_of_device' was not locked and lock it */
  807 void ldv_mutex_lock_mutex_of_device(struct mutex *lock)
  808 {
  809   /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked */
  810   ldv_assert(ldv_mutex_mutex_of_device == 1);
  811   /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */
  812   ldv_mutex_mutex_of_device = 2;
  813 }
  814 
  815 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_trylock_mutex_of_device') Check that mutex 'mutex_of_device' was not locked and nondeterministically lock it. Return 0 on fails */
  816 int ldv_mutex_trylock_mutex_of_device(struct mutex *lock)
  817 {
  818   int is_mutex_held_by_another_thread;
  819 
  820   /* LDV_COMMENT_ASSERT It may be an error if mutex 'mutex_of_device' is locked at this point */
  821   ldv_assert(ldv_mutex_mutex_of_device == 1);
  822 
  823   /* LDV_COMMENT_OTHER Construct nondetermined result */
  824   is_mutex_held_by_another_thread = ldv_undef_int();
  825 
  826   /* LDV_COMMENT_ASSERT Nondeterministically lock mutex 'mutex_of_device' */
  827   if (is_mutex_held_by_another_thread)
  828   {
  829     /* LDV_COMMENT_RETURN Finish with fail */
  830     return 0;
  831   }
  832   else
  833   {
  834     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */
  835     ldv_mutex_mutex_of_device = 2;
  836     /* LDV_COMMENT_RETURN Finish with success */
  837     return 1;
  838   }
  839 }
  840 
  841 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_mutex_lock_mutex_of_device') Lock mutex 'mutex_of_device' if atomic decrement result is zero */
  842 int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt, struct mutex *lock)
  843 {
  844   int atomic_value_after_dec;
  845 
  846   /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked (since we may lock it in this function) */
  847   ldv_assert(ldv_mutex_mutex_of_device == 1);
  848 
  849   /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
  850   atomic_value_after_dec = ldv_undef_int();
  851 
  852   /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
  853   if (atomic_value_after_dec == 0)
  854   {
  855     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device', as atomic has decremented to zero */
  856     ldv_mutex_mutex_of_device = 2;
  857     /* LDV_COMMENT_RETURN Return 1 with locked mutex 'mutex_of_device' */
  858     return 1;
  859   }
  860 
  861   /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking mutex 'mutex_of_device' */
  862   return 0;
  863 }
  864 
  865 /* TODO Syncronize with 39_7a ldv_spin_is_locked! */
  866 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_is_locked_mutex_of_device') Check whether mutex 'mutex_of_device' was locked */
  867 int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock)
  868 {
  869   int nondetermined;
  870 
  871   if(ldv_mutex_mutex_of_device == 1)
  872   {
  873     /* LDV_COMMENT_OTHER Construct nondetermined result */
  874     nondetermined = ldv_undef_int();
  875 
  876     /* LDV_COMMENT_ASSERT Nondeterministically understand whether mutex 'mutex_of_device' was locked */
  877     if(nondetermined)
  878     {
  879       /* LDV_COMMENT_RETURN Mutex 'mutex_of_device' was unlocked */
  880       return 0;
  881     }
  882     else
  883     {
  884       /* LDV_COMMENT_RETURN Mutex 'mutex_of_device' was locked */
  885       return 1;
  886     }
  887   }
  888   else
  889   {
  890     /* LDV_COMMENT_RETURN Mutex 'mutex_of_device' was locked */
  891     return 1;
  892   }
  893 }
  894 
  895 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_mutex_unlock_mutex_of_device') Check that mutex 'mutex_of_device' was locked and unlock it */
  896 void ldv_mutex_unlock_mutex_of_device(struct mutex *lock)
  897 {
  898   /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be locked */
  899   ldv_assert(ldv_mutex_mutex_of_device == 2);
  900   /* LDV_COMMENT_CHANGE_STATE Unlock mutex 'mutex_of_device' */
  901   ldv_mutex_mutex_of_device = 1;
  902 }
  903 
  904 
  905 
  906 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device') Acquires the usb lock and checks for double usb lock*/
  907 void ldv_usb_lock_device_mutex_of_device(void)
  908 {
  909   /* LDV_COMMENT_CHANGE_STATE Lock usb_lock 'mutex_of_device' */
  910   ldv_mutex_lock_mutex_of_device(NULL);
  911 }
  912 
  913 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_trylock_device') Tries to acquire the usb lock and returns 1 if successful*/
  914 int ldv_usb_trylock_device_mutex_of_device(void)
  915 {
  916   return ldv_mutex_trylock_mutex_of_device(NULL);
  917 }
  918 
  919 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_lock_device_for_reset') Tries to acquire the usb lock and returns 0 if successful*/
  920 int ldv_usb_lock_device_for_reset_mutex_of_device(void)
  921 {
  922   if(ldv_undef_int()) {
  923     /* LDV_COMMENT_CHANGE_STATE Lock mutex 'mutex_of_device' */
  924     ldv_mutex_lock_mutex_of_device(NULL);
  925     /* LDV_COMMENT_RETURN Finish with success */
  926     return 0;
  927   } else
  928   /* LDV_COMMENT_RETURN Usb lock is not acquired*/
  929   return ldv_undef_int_negative();
  930 }
  931 
  932 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_usb_unlock_device') Releases the usb lock and checks that usb lock was acquired before*/
  933 void ldv_usb_unlock_device_mutex_of_device(void) {
  934   /* LDV_COMMENT_CHANGE_STATE Unlock usb_lock 'mutex_of_device' */
  935   ldv_mutex_unlock_mutex_of_device(NULL);
  936 }
  937 
  938 
  939 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all mutexes are unlocked at the end */
  940 void ldv_check_final_state(void)
  941 {
  942   /* LDV_COMMENT_ASSERT Mutex 'ctx_lock_of_imon_context' must be unlocked at the end */
  943   ldv_assert(ldv_mutex_ctx_lock_of_imon_context == 1);
  944   /* LDV_COMMENT_ASSERT Mutex 'driver_lock' must be unlocked at the end */
  945   ldv_assert(ldv_mutex_driver_lock == 1);
  946   /* LDV_COMMENT_ASSERT Mutex 'i_mutex_of_inode' must be unlocked at the end */
  947   ldv_assert(ldv_mutex_i_mutex_of_inode == 1);
  948   /* LDV_COMMENT_ASSERT Mutex 'lock' must be unlocked at the end */
  949   ldv_assert(ldv_mutex_lock == 1);
  950   /* LDV_COMMENT_ASSERT Mutex 'mutex_of_device' must be unlocked at the end */
  951   ldv_assert(ldv_mutex_mutex_of_device == 1);
  952 }                 1 #ifndef _LDV_RCV_H_
    2 #define _LDV_RCV_H_
    3 
    4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
    5    label like the standard assert(). */
    6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
    7 
    8 /* The error label wrapper. It is used because of some static verifiers (like
    9    BLAST) don't accept multiple error labels through a program. */
   10 static inline void ldv_error(void)
   11 {
   12   LDV_ERROR: goto LDV_ERROR;
   13 }
   14 
   15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
   16    avoided by verifiers. */
   17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
   18 
   19 /* Infinite loop, that causes verifiers to skip such paths. */
   20 static inline void ldv_stop(void) {
   21   LDV_STOP: goto LDV_STOP;
   22 }
   23 
   24 /* Special nondeterministic functions. */
   25 int ldv_undef_int(void);
   26 void *ldv_undef_ptr(void);
   27 unsigned long ldv_undef_ulong(void);
   28 long ldv_undef_long(void);
   29 /* Return nondeterministic negative integer number. */
   30 static inline int ldv_undef_int_negative(void)
   31 {
   32   int ret = ldv_undef_int();
   33 
   34   ldv_assume(ret < 0);
   35 
   36   return ret;
   37 }
   38 /* Return nondeterministic nonpositive integer number. */
   39 static inline int ldv_undef_int_nonpositive(void)
   40 {
   41   int ret = ldv_undef_int();
   42 
   43   ldv_assume(ret <= 0);
   44 
   45   return ret;
   46 }
   47 
   48 /* Add explicit model for __builin_expect GCC function. Without the model a
   49    return value will be treated as nondetermined by verifiers. */
   50 static inline long __builtin_expect(long exp, long c)
   51 {
   52   return exp;
   53 }
   54 
   55 /* This function causes the program to exit abnormally. GCC implements this
   56 function by using a target-dependent mechanism (such as intentionally executing
   57 an illegal instruction) or by calling abort. The mechanism used may vary from
   58 release to release so you should not rely on any particular implementation.
   59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
   60 static inline void __builtin_trap(void)
   61 {
   62   ldv_assert(0);
   63 }
   64 
   65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
   66 #define LDV_PTR_MAX 2012
   67 
   68 #endif /* _LDV_RCV_H_ */                 1 /*
    2  * device.h - generic, centralized driver model
    3  *
    4  * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
    5  * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
    6  * Copyright (c) 2008-2009 Novell Inc.
    7  *
    8  * This file is released under the GPLv2
    9  *
   10  * See Documentation/driver-model/ for more information.
   11  */
   12 
   13 #ifndef _DEVICE_H_
   14 #define _DEVICE_H_
   15 
   16 #include <linux/ioport.h>
   17 #include <linux/kobject.h>
   18 #include <linux/klist.h>
   19 #include <linux/list.h>
   20 #include <linux/lockdep.h>
   21 #include <linux/compiler.h>
   22 #include <linux/types.h>
   23 #include <linux/mutex.h>
   24 #include <linux/pinctrl/devinfo.h>
   25 #include <linux/pm.h>
   26 #include <linux/atomic.h>
   27 #include <linux/ratelimit.h>
   28 #include <linux/uidgid.h>
   29 #include <linux/gfp.h>
   30 #include <asm/device.h>
   31 
   32 struct device;
   33 struct device_private;
   34 struct device_driver;
   35 struct driver_private;
   36 struct module;
   37 struct class;
   38 struct subsys_private;
   39 struct bus_type;
   40 struct device_node;
   41 struct fwnode_handle;
   42 struct iommu_ops;
   43 struct iommu_group;
   44 
   45 struct bus_attribute {
   46 	struct attribute	attr;
   47 	ssize_t (*show)(struct bus_type *bus, char *buf);
   48 	ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
   49 };
   50 
   51 #define BUS_ATTR(_name, _mode, _show, _store)	\
   52 	struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
   53 #define BUS_ATTR_RW(_name) \
   54 	struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
   55 #define BUS_ATTR_RO(_name) \
   56 	struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
   57 
   58 extern int __must_check bus_create_file(struct bus_type *,
   59 					struct bus_attribute *);
   60 extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
   61 
   62 /**
   63  * struct bus_type - The bus type of the device
   64  *
   65  * @name:	The name of the bus.
   66  * @dev_name:	Used for subsystems to enumerate devices like ("foo%u", dev->id).
   67  * @dev_root:	Default device to use as the parent.
   68  * @dev_attrs:	Default attributes of the devices on the bus.
   69  * @bus_groups:	Default attributes of the bus.
   70  * @dev_groups:	Default attributes of the devices on the bus.
   71  * @drv_groups: Default attributes of the device drivers on the bus.
   72  * @match:	Called, perhaps multiple times, whenever a new device or driver
   73  *		is added for this bus. It should return a nonzero value if the
   74  *		given device can be handled by the given driver.
   75  * @uevent:	Called when a device is added, removed, or a few other things
   76  *		that generate uevents to add the environment variables.
   77  * @probe:	Called when a new device or driver add to this bus, and callback
   78  *		the specific driver's probe to initial the matched device.
   79  * @remove:	Called when a device removed from this bus.
   80  * @shutdown:	Called at shut-down time to quiesce the device.
   81  *
   82  * @online:	Called to put the device back online (after offlining it).
   83  * @offline:	Called to put the device offline for hot-removal. May fail.
   84  *
   85  * @suspend:	Called when a device on this bus wants to go to sleep mode.
   86  * @resume:	Called to bring a device on this bus out of sleep mode.
   87  * @pm:		Power management operations of this bus, callback the specific
   88  *		device driver's pm-ops.
   89  * @iommu_ops:  IOMMU specific operations for this bus, used to attach IOMMU
   90  *              driver implementations to a bus and allow the driver to do
   91  *              bus-specific setup
   92  * @p:		The private data of the driver core, only the driver core can
   93  *		touch this.
   94  * @lock_key:	Lock class key for use by the lock validator
   95  *
   96  * A bus is a channel between the processor and one or more devices. For the
   97  * purposes of the device model, all devices are connected via a bus, even if
   98  * it is an internal, virtual, "platform" bus. Buses can plug into each other.
   99  * A USB controller is usually a PCI device, for example. The device model
  100  * represents the actual connections between buses and the devices they control.
  101  * A bus is represented by the bus_type structure. It contains the name, the
  102  * default attributes, the bus' methods, PM operations, and the driver core's
  103  * private data.
  104  */
  105 struct bus_type {
  106 	const char		*name;
  107 	const char		*dev_name;
  108 	struct device		*dev_root;
  109 	struct device_attribute	*dev_attrs;	/* use dev_groups instead */
  110 	const struct attribute_group **bus_groups;
  111 	const struct attribute_group **dev_groups;
  112 	const struct attribute_group **drv_groups;
  113 
  114 	int (*match)(struct device *dev, struct device_driver *drv);
  115 	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
  116 	int (*probe)(struct device *dev);
  117 	int (*remove)(struct device *dev);
  118 	void (*shutdown)(struct device *dev);
  119 
  120 	int (*online)(struct device *dev);
  121 	int (*offline)(struct device *dev);
  122 
  123 	int (*suspend)(struct device *dev, pm_message_t state);
  124 	int (*resume)(struct device *dev);
  125 
  126 	const struct dev_pm_ops *pm;
  127 
  128 	const struct iommu_ops *iommu_ops;
  129 
  130 	struct subsys_private *p;
  131 	struct lock_class_key lock_key;
  132 };
  133 
  134 extern int __must_check bus_register(struct bus_type *bus);
  135 
  136 extern void bus_unregister(struct bus_type *bus);
  137 
  138 extern int __must_check bus_rescan_devices(struct bus_type *bus);
  139 
  140 /* iterator helpers for buses */
  141 struct subsys_dev_iter {
  142 	struct klist_iter		ki;
  143 	const struct device_type	*type;
  144 };
  145 void subsys_dev_iter_init(struct subsys_dev_iter *iter,
  146 			 struct bus_type *subsys,
  147 			 struct device *start,
  148 			 const struct device_type *type);
  149 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
  150 void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
  151 
  152 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
  153 		     int (*fn)(struct device *dev, void *data));
  154 struct device *bus_find_device(struct bus_type *bus, struct device *start,
  155 			       void *data,
  156 			       int (*match)(struct device *dev, void *data));
  157 struct device *bus_find_device_by_name(struct bus_type *bus,
  158 				       struct device *start,
  159 				       const char *name);
  160 struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
  161 					struct device *hint);
  162 int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
  163 		     void *data, int (*fn)(struct device_driver *, void *));
  164 void bus_sort_breadthfirst(struct bus_type *bus,
  165 			   int (*compare)(const struct device *a,
  166 					  const struct device *b));
  167 /*
  168  * Bus notifiers: Get notified of addition/removal of devices
  169  * and binding/unbinding of drivers to devices.
  170  * In the long run, it should be a replacement for the platform
  171  * notify hooks.
  172  */
  173 struct notifier_block;
  174 
  175 extern int bus_register_notifier(struct bus_type *bus,
  176 				 struct notifier_block *nb);
  177 extern int bus_unregister_notifier(struct bus_type *bus,
  178 				   struct notifier_block *nb);
  179 
  180 /* All 4 notifers below get called with the target struct device *
  181  * as an argument. Note that those functions are likely to be called
  182  * with the device lock held in the core, so be careful.
  183  */
  184 #define BUS_NOTIFY_ADD_DEVICE		0x00000001 /* device added */
  185 #define BUS_NOTIFY_DEL_DEVICE		0x00000002 /* device to be removed */
  186 #define BUS_NOTIFY_REMOVED_DEVICE	0x00000003 /* device removed */
  187 #define BUS_NOTIFY_BIND_DRIVER		0x00000004 /* driver about to be
  188 						      bound */
  189 #define BUS_NOTIFY_BOUND_DRIVER		0x00000005 /* driver bound to device */
  190 #define BUS_NOTIFY_UNBIND_DRIVER	0x00000006 /* driver about to be
  191 						      unbound */
  192 #define BUS_NOTIFY_UNBOUND_DRIVER	0x00000007 /* driver is unbound
  193 						      from the device */
  194 
  195 extern struct kset *bus_get_kset(struct bus_type *bus);
  196 extern struct klist *bus_get_device_klist(struct bus_type *bus);
  197 
  198 /**
  199  * enum probe_type - device driver probe type to try
  200  *	Device drivers may opt in for special handling of their
  201  *	respective probe routines. This tells the core what to
  202  *	expect and prefer.
  203  *
  204  * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
  205  *	whether probed synchronously or asynchronously.
  206  * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
  207  *	probing order is not essential for booting the system may
  208  *	opt into executing their probes asynchronously.
  209  * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
  210  *	their probe routines to run synchronously with driver and
  211  *	device registration (with the exception of -EPROBE_DEFER
  212  *	handling - re-probing always ends up being done asynchronously).
  213  *
  214  * Note that the end goal is to switch the kernel to use asynchronous
  215  * probing by default, so annotating drivers with
  216  * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
  217  * to speed up boot process while we are validating the rest of the
  218  * drivers.
  219  */
  220 enum probe_type {
  221 	PROBE_DEFAULT_STRATEGY,
  222 	PROBE_PREFER_ASYNCHRONOUS,
  223 	PROBE_FORCE_SYNCHRONOUS,
  224 };
  225 
  226 /**
  227  * struct device_driver - The basic device driver structure
  228  * @name:	Name of the device driver.
  229  * @bus:	The bus which the device of this driver belongs to.
  230  * @owner:	The module owner.
  231  * @mod_name:	Used for built-in modules.
  232  * @suppress_bind_attrs: Disables bind/unbind via sysfs.
  233  * @probe_type:	Type of the probe (synchronous or asynchronous) to use.
  234  * @of_match_table: The open firmware table.
  235  * @acpi_match_table: The ACPI match table.
  236  * @probe:	Called to query the existence of a specific device,
  237  *		whether this driver can work with it, and bind the driver
  238  *		to a specific device.
  239  * @remove:	Called when the device is removed from the system to
  240  *		unbind a device from this driver.
  241  * @shutdown:	Called at shut-down time to quiesce the device.
  242  * @suspend:	Called to put the device to sleep mode. Usually to a
  243  *		low power state.
  244  * @resume:	Called to bring a device from sleep mode.
  245  * @groups:	Default attributes that get created by the driver core
  246  *		automatically.
  247  * @pm:		Power management operations of the device which matched
  248  *		this driver.
  249  * @p:		Driver core's private data, no one other than the driver
  250  *		core can touch this.
  251  *
  252  * The device driver-model tracks all of the drivers known to the system.
  253  * The main reason for this tracking is to enable the driver core to match
  254  * up drivers with new devices. Once drivers are known objects within the
  255  * system, however, a number of other things become possible. Device drivers
  256  * can export information and configuration variables that are independent
  257  * of any specific device.
  258  */
  259 struct device_driver {
  260 	const char		*name;
  261 	struct bus_type		*bus;
  262 
  263 	struct module		*owner;
  264 	const char		*mod_name;	/* used for built-in modules */
  265 
  266 	bool suppress_bind_attrs;	/* disables bind/unbind via sysfs */
  267 	enum probe_type probe_type;
  268 
  269 	const struct of_device_id	*of_match_table;
  270 	const struct acpi_device_id	*acpi_match_table;
  271 
  272 	int (*probe) (struct device *dev);
  273 	int (*remove) (struct device *dev);
  274 	void (*shutdown) (struct device *dev);
  275 	int (*suspend) (struct device *dev, pm_message_t state);
  276 	int (*resume) (struct device *dev);
  277 	const struct attribute_group **groups;
  278 
  279 	const struct dev_pm_ops *pm;
  280 
  281 	struct driver_private *p;
  282 };
  283 
  284 
  285 extern int __must_check driver_register(struct device_driver *drv);
  286 extern void driver_unregister(struct device_driver *drv);
  287 
  288 extern struct device_driver *driver_find(const char *name,
  289 					 struct bus_type *bus);
  290 extern int driver_probe_done(void);
  291 extern void wait_for_device_probe(void);
  292 
  293 
  294 /* sysfs interface for exporting driver attributes */
  295 
  296 struct driver_attribute {
  297 	struct attribute attr;
  298 	ssize_t (*show)(struct device_driver *driver, char *buf);
  299 	ssize_t (*store)(struct device_driver *driver, const char *buf,
  300 			 size_t count);
  301 };
  302 
  303 #define DRIVER_ATTR(_name, _mode, _show, _store) \
  304 	struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
  305 #define DRIVER_ATTR_RW(_name) \
  306 	struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
  307 #define DRIVER_ATTR_RO(_name) \
  308 	struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
  309 #define DRIVER_ATTR_WO(_name) \
  310 	struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
  311 
  312 extern int __must_check driver_create_file(struct device_driver *driver,
  313 					const struct driver_attribute *attr);
  314 extern void driver_remove_file(struct device_driver *driver,
  315 			       const struct driver_attribute *attr);
  316 
  317 extern int __must_check driver_for_each_device(struct device_driver *drv,
  318 					       struct device *start,
  319 					       void *data,
  320 					       int (*fn)(struct device *dev,
  321 							 void *));
  322 struct device *driver_find_device(struct device_driver *drv,
  323 				  struct device *start, void *data,
  324 				  int (*match)(struct device *dev, void *data));
  325 
  326 /**
  327  * struct subsys_interface - interfaces to device functions
  328  * @name:       name of the device function
  329  * @subsys:     subsytem of the devices to attach to
  330  * @node:       the list of functions registered at the subsystem
  331  * @add_dev:    device hookup to device function handler
  332  * @remove_dev: device hookup to device function handler
  333  *
  334  * Simple interfaces attached to a subsystem. Multiple interfaces can
  335  * attach to a subsystem and its devices. Unlike drivers, they do not
  336  * exclusively claim or control devices. Interfaces usually represent
  337  * a specific functionality of a subsystem/class of devices.
  338  */
  339 struct subsys_interface {
  340 	const char *name;
  341 	struct bus_type *subsys;
  342 	struct list_head node;
  343 	int (*add_dev)(struct device *dev, struct subsys_interface *sif);
  344 	void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
  345 };
  346 
  347 int subsys_interface_register(struct subsys_interface *sif);
  348 void subsys_interface_unregister(struct subsys_interface *sif);
  349 
  350 int subsys_system_register(struct bus_type *subsys,
  351 			   const struct attribute_group **groups);
  352 int subsys_virtual_register(struct bus_type *subsys,
  353 			    const struct attribute_group **groups);
  354 
  355 /**
  356  * struct class - device classes
  357  * @name:	Name of the class.
  358  * @owner:	The module owner.
  359  * @class_attrs: Default attributes of this class.
  360  * @dev_groups:	Default attributes of the devices that belong to the class.
  361  * @dev_kobj:	The kobject that represents this class and links it into the hierarchy.
  362  * @dev_uevent:	Called when a device is added, removed from this class, or a
  363  *		few other things that generate uevents to add the environment
  364  *		variables.
  365  * @devnode:	Callback to provide the devtmpfs.
  366  * @class_release: Called to release this class.
  367  * @dev_release: Called to release the device.
  368  * @suspend:	Used to put the device to sleep mode, usually to a low power
  369  *		state.
  370  * @resume:	Used to bring the device from the sleep mode.
  371  * @ns_type:	Callbacks so sysfs can detemine namespaces.
  372  * @namespace:	Namespace of the device belongs to this class.
  373  * @pm:		The default device power management operations of this class.
  374  * @p:		The private data of the driver core, no one other than the
  375  *		driver core can touch this.
  376  *
  377  * A class is a higher-level view of a device that abstracts out low-level
  378  * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
  379  * at the class level, they are all simply disks. Classes allow user space
  380  * to work with devices based on what they do, rather than how they are
  381  * connected or how they work.
  382  */
  383 struct class {
  384 	const char		*name;
  385 	struct module		*owner;
  386 
  387 	struct class_attribute		*class_attrs;
  388 	const struct attribute_group	**dev_groups;
  389 	struct kobject			*dev_kobj;
  390 
  391 	int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
  392 	char *(*devnode)(struct device *dev, umode_t *mode);
  393 
  394 	void (*class_release)(struct class *class);
  395 	void (*dev_release)(struct device *dev);
  396 
  397 	int (*suspend)(struct device *dev, pm_message_t state);
  398 	int (*resume)(struct device *dev);
  399 
  400 	const struct kobj_ns_type_operations *ns_type;
  401 	const void *(*namespace)(struct device *dev);
  402 
  403 	const struct dev_pm_ops *pm;
  404 
  405 	struct subsys_private *p;
  406 };
  407 
  408 struct class_dev_iter {
  409 	struct klist_iter		ki;
  410 	const struct device_type	*type;
  411 };
  412 
  413 extern struct kobject *sysfs_dev_block_kobj;
  414 extern struct kobject *sysfs_dev_char_kobj;
  415 extern int __must_check __class_register(struct class *class,
  416 					 struct lock_class_key *key);
  417 extern void class_unregister(struct class *class);
  418 
  419 /* This is a #define to keep the compiler from merging different
  420  * instances of the __key variable */
  421 #define class_register(class)			\
  422 ({						\
  423 	static struct lock_class_key __key;	\
  424 	__class_register(class, &__key);	\
  425 })
  426 
  427 struct class_compat;
  428 struct class_compat *class_compat_register(const char *name);
  429 void class_compat_unregister(struct class_compat *cls);
  430 int class_compat_create_link(struct class_compat *cls, struct device *dev,
  431 			     struct device *device_link);
  432 void class_compat_remove_link(struct class_compat *cls, struct device *dev,
  433 			      struct device *device_link);
  434 
  435 extern void class_dev_iter_init(struct class_dev_iter *iter,
  436 				struct class *class,
  437 				struct device *start,
  438 				const struct device_type *type);
  439 extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
  440 extern void class_dev_iter_exit(struct class_dev_iter *iter);
  441 
  442 extern int class_for_each_device(struct class *class, struct device *start,
  443 				 void *data,
  444 				 int (*fn)(struct device *dev, void *data));
  445 extern struct device *class_find_device(struct class *class,
  446 					struct device *start, const void *data,
  447 					int (*match)(struct device *, const void *));
  448 
  449 struct class_attribute {
  450 	struct attribute attr;
  451 	ssize_t (*show)(struct class *class, struct class_attribute *attr,
  452 			char *buf);
  453 	ssize_t (*store)(struct class *class, struct class_attribute *attr,
  454 			const char *buf, size_t count);
  455 };
  456 
  457 #define CLASS_ATTR(_name, _mode, _show, _store) \
  458 	struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store)
  459 #define CLASS_ATTR_RW(_name) \
  460 	struct class_attribute class_attr_##_name = __ATTR_RW(_name)
  461 #define CLASS_ATTR_RO(_name) \
  462 	struct class_attribute class_attr_##_name = __ATTR_RO(_name)
  463 
  464 extern int __must_check class_create_file_ns(struct class *class,
  465 					     const struct class_attribute *attr,
  466 					     const void *ns);
  467 extern void class_remove_file_ns(struct class *class,
  468 				 const struct class_attribute *attr,
  469 				 const void *ns);
  470 
  471 static inline int __must_check class_create_file(struct class *class,
  472 					const struct class_attribute *attr)
  473 {
  474 	return class_create_file_ns(class, attr, NULL);
  475 }
  476 
  477 static inline void class_remove_file(struct class *class,
  478 				     const struct class_attribute *attr)
  479 {
  480 	return class_remove_file_ns(class, attr, NULL);
  481 }
  482 
  483 /* Simple class attribute that is just a static string */
  484 struct class_attribute_string {
  485 	struct class_attribute attr;
  486 	char *str;
  487 };
  488 
  489 /* Currently read-only only */
  490 #define _CLASS_ATTR_STRING(_name, _mode, _str) \
  491 	{ __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
  492 #define CLASS_ATTR_STRING(_name, _mode, _str) \
  493 	struct class_attribute_string class_attr_##_name = \
  494 		_CLASS_ATTR_STRING(_name, _mode, _str)
  495 
  496 extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
  497                         char *buf);
  498 
  499 struct class_interface {
  500 	struct list_head	node;
  501 	struct class		*class;
  502 
  503 	int (*add_dev)		(struct device *, struct class_interface *);
  504 	void (*remove_dev)	(struct device *, struct class_interface *);
  505 };
  506 
  507 extern int __must_check class_interface_register(struct class_interface *);
  508 extern void class_interface_unregister(struct class_interface *);
  509 
  510 extern struct class * __must_check __class_create(struct module *owner,
  511 						  const char *name,
  512 						  struct lock_class_key *key);
  513 extern void class_destroy(struct class *cls);
  514 
  515 /* This is a #define to keep the compiler from merging different
  516  * instances of the __key variable */
  517 #define class_create(owner, name)		\
  518 ({						\
  519 	static struct lock_class_key __key;	\
  520 	__class_create(owner, name, &__key);	\
  521 })
  522 
  523 /*
  524  * The type of device, "struct device" is embedded in. A class
  525  * or bus can contain devices of different types
  526  * like "partitions" and "disks", "mouse" and "event".
  527  * This identifies the device type and carries type-specific
  528  * information, equivalent to the kobj_type of a kobject.
  529  * If "name" is specified, the uevent will contain it in
  530  * the DEVTYPE variable.
  531  */
  532 struct device_type {
  533 	const char *name;
  534 	const struct attribute_group **groups;
  535 	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
  536 	char *(*devnode)(struct device *dev, umode_t *mode,
  537 			 kuid_t *uid, kgid_t *gid);
  538 	void (*release)(struct device *dev);
  539 
  540 	const struct dev_pm_ops *pm;
  541 };
  542 
  543 /* interface for exporting device attributes */
  544 struct device_attribute {
  545 	struct attribute	attr;
  546 	ssize_t (*show)(struct device *dev, struct device_attribute *attr,
  547 			char *buf);
  548 	ssize_t (*store)(struct device *dev, struct device_attribute *attr,
  549 			 const char *buf, size_t count);
  550 };
  551 
  552 struct dev_ext_attribute {
  553 	struct device_attribute attr;
  554 	void *var;
  555 };
  556 
  557 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
  558 			  char *buf);
  559 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
  560 			   const char *buf, size_t count);
  561 ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
  562 			char *buf);
  563 ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
  564 			 const char *buf, size_t count);
  565 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
  566 			char *buf);
  567 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
  568 			 const char *buf, size_t count);
  569 
  570 #define DEVICE_ATTR(_name, _mode, _show, _store) \
  571 	struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
  572 #define DEVICE_ATTR_RW(_name) \
  573 	struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
  574 #define DEVICE_ATTR_RO(_name) \
  575 	struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
  576 #define DEVICE_ATTR_WO(_name) \
  577 	struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
  578 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \
  579 	struct dev_ext_attribute dev_attr_##_name = \
  580 		{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
  581 #define DEVICE_INT_ATTR(_name, _mode, _var) \
  582 	struct dev_ext_attribute dev_attr_##_name = \
  583 		{ __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
  584 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \
  585 	struct dev_ext_attribute dev_attr_##_name = \
  586 		{ __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
  587 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
  588 	struct device_attribute dev_attr_##_name =		\
  589 		__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
  590 
  591 extern int device_create_file(struct device *device,
  592 			      const struct device_attribute *entry);
  593 extern void device_remove_file(struct device *dev,
  594 			       const struct device_attribute *attr);
  595 extern bool device_remove_file_self(struct device *dev,
  596 				    const struct device_attribute *attr);
  597 extern int __must_check device_create_bin_file(struct device *dev,
  598 					const struct bin_attribute *attr);
  599 extern void device_remove_bin_file(struct device *dev,
  600 				   const struct bin_attribute *attr);
  601 
  602 /* device resource management */
  603 typedef void (*dr_release_t)(struct device *dev, void *res);
  604 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
  605 
  606 #ifdef CONFIG_DEBUG_DEVRES
  607 extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
  608 			     const char *name);
  609 #define devres_alloc(release, size, gfp) \
  610 	__devres_alloc(release, size, gfp, #release)
  611 #else
  612 extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp);
  613 #endif
  614 extern void devres_for_each_res(struct device *dev, dr_release_t release,
  615 				dr_match_t match, void *match_data,
  616 				void (*fn)(struct device *, void *, void *),
  617 				void *data);
  618 extern void devres_free(void *res);
  619 extern void devres_add(struct device *dev, void *res);
  620 extern void *devres_find(struct device *dev, dr_release_t release,
  621 			 dr_match_t match, void *match_data);
  622 extern void *devres_get(struct device *dev, void *new_res,
  623 			dr_match_t match, void *match_data);
  624 extern void *devres_remove(struct device *dev, dr_release_t release,
  625 			   dr_match_t match, void *match_data);
  626 extern int devres_destroy(struct device *dev, dr_release_t release,
  627 			  dr_match_t match, void *match_data);
  628 extern int devres_release(struct device *dev, dr_release_t release,
  629 			  dr_match_t match, void *match_data);
  630 
  631 /* devres group */
  632 extern void * __must_check devres_open_group(struct device *dev, void *id,
  633 					     gfp_t gfp);
  634 extern void devres_close_group(struct device *dev, void *id);
  635 extern void devres_remove_group(struct device *dev, void *id);
  636 extern int devres_release_group(struct device *dev, void *id);
  637 
  638 /* managed devm_k.alloc/kfree for device drivers */
  639 extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
  640 extern __printf(3, 0)
  641 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
  642 		      va_list ap);
  643 extern __printf(3, 4)
  644 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
  645 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
  646 {
  647 	return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
  648 }
  649 static inline void *devm_kmalloc_array(struct device *dev,
  650 				       size_t n, size_t size, gfp_t flags)
  651 {
  652 	if (size != 0 && n > SIZE_MAX / size)
  653 		return NULL;
  654 	return devm_kmalloc(dev, n * size, flags);
  655 }
  656 static inline void *devm_kcalloc(struct device *dev,
  657 				 size_t n, size_t size, gfp_t flags)
  658 {
  659 	return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
  660 }
  661 extern void devm_kfree(struct device *dev, void *p);
  662 extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
  663 extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
  664 			  gfp_t gfp);
  665 
  666 extern unsigned long devm_get_free_pages(struct device *dev,
  667 					 gfp_t gfp_mask, unsigned int order);
  668 extern void devm_free_pages(struct device *dev, unsigned long addr);
  669 
  670 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
  671 
  672 /* allows to add/remove a custom action to devres stack */
  673 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
  674 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
  675 
  676 struct device_dma_parameters {
  677 	/*
  678 	 * a low level driver may set these to teach IOMMU code about
  679 	 * sg limitations.
  680 	 */
  681 	unsigned int max_segment_size;
  682 	unsigned long segment_boundary_mask;
  683 };
  684 
  685 /**
  686  * struct device - The basic device structure
  687  * @parent:	The device's "parent" device, the device to which it is attached.
  688  * 		In most cases, a parent device is some sort of bus or host
  689  * 		controller. If parent is NULL, the device, is a top-level device,
  690  * 		which is not usually what you want.
  691  * @p:		Holds the private data of the driver core portions of the device.
  692  * 		See the comment of the struct device_private for detail.
  693  * @kobj:	A top-level, abstract class from which other classes are derived.
  694  * @init_name:	Initial name of the device.
  695  * @type:	The type of device.
  696  * 		This identifies the device type and carries type-specific
  697  * 		information.
  698  * @mutex:	Mutex to synchronize calls to its driver.
  699  * @bus:	Type of bus device is on.
  700  * @driver:	Which driver has allocated this
  701  * @platform_data: Platform data specific to the device.
  702  * 		Example: For devices on custom boards, as typical of embedded
  703  * 		and SOC based hardware, Linux often uses platform_data to point
  704  * 		to board-specific structures describing devices and how they
  705  * 		are wired.  That can include what ports are available, chip
  706  * 		variants, which GPIO pins act in what additional roles, and so
  707  * 		on.  This shrinks the "Board Support Packages" (BSPs) and
  708  * 		minimizes board-specific #ifdefs in drivers.
  709  * @driver_data: Private pointer for driver specific info.
  710  * @power:	For device power management.
  711  * 		See Documentation/power/devices.txt for details.
  712  * @pm_domain:	Provide callbacks that are executed during system suspend,
  713  * 		hibernation, system resume and during runtime PM transitions
  714  * 		along with subsystem-level and driver-level callbacks.
  715  * @pins:	For device pin management.
  716  *		See Documentation/pinctrl.txt for details.
  717  * @msi_list:	Hosts MSI descriptors
  718  * @msi_domain: The generic MSI domain this device is using.
  719  * @numa_node:	NUMA node this device is close to.
  720  * @dma_mask:	Dma mask (if dma'ble device).
  721  * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
  722  * 		hardware supports 64-bit addresses for consistent allocations
  723  * 		such descriptors.
  724  * @dma_pfn_offset: offset of DMA memory range relatively of RAM
  725  * @dma_parms:	A low level driver may set these to teach IOMMU code about
  726  * 		segment limitations.
  727  * @dma_pools:	Dma pools (if dma'ble device).
  728  * @dma_mem:	Internal for coherent mem override.
  729  * @cma_area:	Contiguous memory area for dma allocations
  730  * @archdata:	For arch-specific additions.
  731  * @of_node:	Associated device tree node.
  732  * @fwnode:	Associated device node supplied by platform firmware.
  733  * @devt:	For creating the sysfs "dev".
  734  * @id:		device instance
  735  * @devres_lock: Spinlock to protect the resource of the device.
  736  * @devres_head: The resources list of the device.
  737  * @knode_class: The node used to add the device to the class list.
  738  * @class:	The class of the device.
  739  * @groups:	Optional attribute groups.
  740  * @release:	Callback to free the device after all references have
  741  * 		gone away. This should be set by the allocator of the
  742  * 		device (i.e. the bus driver that discovered the device).
  743  * @iommu_group: IOMMU group the device belongs to.
  744  *
  745  * @offline_disabled: If set, the device is permanently online.
  746  * @offline:	Set after successful invocation of bus type's .offline().
  747  *
  748  * At the lowest level, every device in a Linux system is represented by an
  749  * instance of struct device. The device structure contains the information
  750  * that the device model core needs to model the system. Most subsystems,
  751  * however, track additional information about the devices they host. As a
  752  * result, it is rare for devices to be represented by bare device structures;
  753  * instead, that structure, like kobject structures, is usually embedded within
  754  * a higher-level representation of the device.
  755  */
  756 struct device {
  757 	struct device		*parent;
  758 
  759 	struct device_private	*p;
  760 
  761 	struct kobject kobj;
  762 	const char		*init_name; /* initial name of the device */
  763 	const struct device_type *type;
  764 
  765 	struct mutex		mutex;	/* mutex to synchronize calls to
  766 					 * its driver.
  767 					 */
  768 
  769 	struct bus_type	*bus;		/* type of bus device is on */
  770 	struct device_driver *driver;	/* which driver has allocated this
  771 					   device */
  772 	void		*platform_data;	/* Platform specific data, device
  773 					   core doesn't touch it */
  774 	void		*driver_data;	/* Driver data, set and get with
  775 					   dev_set/get_drvdata */
  776 	struct dev_pm_info	power;
  777 	struct dev_pm_domain	*pm_domain;
  778 
  779 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  780 	struct irq_domain	*msi_domain;
  781 #endif
  782 #ifdef CONFIG_PINCTRL
  783 	struct dev_pin_info	*pins;
  784 #endif
  785 #ifdef CONFIG_GENERIC_MSI_IRQ
  786 	struct list_head	msi_list;
  787 #endif
  788 
  789 #ifdef CONFIG_NUMA
  790 	int		numa_node;	/* NUMA node this device is close to */
  791 #endif
  792 	u64		*dma_mask;	/* dma mask (if dma'able device) */
  793 	u64		coherent_dma_mask;/* Like dma_mask, but for
  794 					     alloc_coherent mappings as
  795 					     not all hardware supports
  796 					     64 bit addresses for consistent
  797 					     allocations such descriptors. */
  798 	unsigned long	dma_pfn_offset;
  799 
  800 	struct device_dma_parameters *dma_parms;
  801 
  802 	struct list_head	dma_pools;	/* dma pools (if dma'ble) */
  803 
  804 	struct dma_coherent_mem	*dma_mem; /* internal for coherent mem
  805 					     override */
  806 #ifdef CONFIG_DMA_CMA
  807 	struct cma *cma_area;		/* contiguous memory area for dma
  808 					   allocations */
  809 #endif
  810 	/* arch specific additions */
  811 	struct dev_archdata	archdata;
  812 
  813 	struct device_node	*of_node; /* associated device tree node */
  814 	struct fwnode_handle	*fwnode; /* firmware device node */
  815 
  816 	dev_t			devt;	/* dev_t, creates the sysfs "dev" */
  817 	u32			id;	/* device instance */
  818 
  819 	spinlock_t		devres_lock;
  820 	struct list_head	devres_head;
  821 
  822 	struct klist_node	knode_class;
  823 	struct class		*class;
  824 	const struct attribute_group **groups;	/* optional groups */
  825 
  826 	void	(*release)(struct device *dev);
  827 	struct iommu_group	*iommu_group;
  828 
  829 	bool			offline_disabled:1;
  830 	bool			offline:1;
  831 };
  832 
  833 static inline struct device *kobj_to_dev(struct kobject *kobj)
  834 {
  835 	return container_of(kobj, struct device, kobj);
  836 }
  837 
  838 /* Get the wakeup routines, which depend on struct device */
  839 #include <linux/pm_wakeup.h>
  840 
  841 static inline const char *dev_name(const struct device *dev)
  842 {
  843 	/* Use the init name until the kobject becomes available */
  844 	if (dev->init_name)
  845 		return dev->init_name;
  846 
  847 	return kobject_name(&dev->kobj);
  848 }
  849 
  850 extern __printf(2, 3)
  851 int dev_set_name(struct device *dev, const char *name, ...);
  852 
  853 #ifdef CONFIG_NUMA
  854 static inline int dev_to_node(struct device *dev)
  855 {
  856 	return dev->numa_node;
  857 }
  858 static inline void set_dev_node(struct device *dev, int node)
  859 {
  860 	dev->numa_node = node;
  861 }
  862 #else
  863 static inline int dev_to_node(struct device *dev)
  864 {
  865 	return -1;
  866 }
  867 static inline void set_dev_node(struct device *dev, int node)
  868 {
  869 }
  870 #endif
  871 
  872 static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
  873 {
  874 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  875 	return dev->msi_domain;
  876 #else
  877 	return NULL;
  878 #endif
  879 }
  880 
  881 static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
  882 {
  883 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  884 	dev->msi_domain = d;
  885 #endif
  886 }
  887 
  888 static inline void *dev_get_drvdata(const struct device *dev)
  889 {
  890 	return dev->driver_data;
  891 }
  892 
  893 static inline void dev_set_drvdata(struct device *dev, void *data)
  894 {
  895 	dev->driver_data = data;
  896 }
  897 
  898 static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
  899 {
  900 	return dev ? dev->power.subsys_data : NULL;
  901 }
  902 
  903 static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
  904 {
  905 	return dev->kobj.uevent_suppress;
  906 }
  907 
  908 static inline void dev_set_uevent_suppress(struct device *dev, int val)
  909 {
  910 	dev->kobj.uevent_suppress = val;
  911 }
  912 
  913 static inline int device_is_registered(struct device *dev)
  914 {
  915 	return dev->kobj.state_in_sysfs;
  916 }
  917 
  918 static inline void device_enable_async_suspend(struct device *dev)
  919 {
  920 	if (!dev->power.is_prepared)
  921 		dev->power.async_suspend = true;
  922 }
  923 
  924 static inline void device_disable_async_suspend(struct device *dev)
  925 {
  926 	if (!dev->power.is_prepared)
  927 		dev->power.async_suspend = false;
  928 }
  929 
  930 static inline bool device_async_suspend_enabled(struct device *dev)
  931 {
  932 	return !!dev->power.async_suspend;
  933 }
  934 
  935 static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
  936 {
  937 	dev->power.ignore_children = enable;
  938 }
  939 
  940 static inline void dev_pm_syscore_device(struct device *dev, bool val)
  941 {
  942 #ifdef CONFIG_PM_SLEEP
  943 	dev->power.syscore = val;
  944 #endif
  945 }
  946 
  947 static inline void device_lock(struct device *dev)
  948 {
  949 	mutex_lock(&dev->mutex);
  950 }
  951 
  952 static inline int device_trylock(struct device *dev)
  953 {
  954 	return mutex_trylock(&dev->mutex);
  955 }
  956 
  957 static inline void device_unlock(struct device *dev)
  958 {
  959 	mutex_unlock(&dev->mutex);
  960 }
  961 
  962 static inline void device_lock_assert(struct device *dev)
  963 {
  964 	lockdep_assert_held(&dev->mutex);
  965 }
  966 
  967 static inline struct device_node *dev_of_node(struct device *dev)
  968 {
  969 	if (!IS_ENABLED(CONFIG_OF))
  970 		return NULL;
  971 	return dev->of_node;
  972 }
  973 
  974 void driver_init(void);
  975 
  976 /*
  977  * High level routines for use by the bus drivers
  978  */
  979 extern int __must_check device_register(struct device *dev);
  980 extern void device_unregister(struct device *dev);
  981 extern void device_initialize(struct device *dev);
  982 extern int __must_check device_add(struct device *dev);
  983 extern void device_del(struct device *dev);
  984 extern int device_for_each_child(struct device *dev, void *data,
  985 		     int (*fn)(struct device *dev, void *data));
  986 extern int device_for_each_child_reverse(struct device *dev, void *data,
  987 		     int (*fn)(struct device *dev, void *data));
  988 extern struct device *device_find_child(struct device *dev, void *data,
  989 				int (*match)(struct device *dev, void *data));
  990 extern int device_rename(struct device *dev, const char *new_name);
  991 extern int device_move(struct device *dev, struct device *new_parent,
  992 		       enum dpm_order dpm_order);
  993 extern const char *device_get_devnode(struct device *dev,
  994 				      umode_t *mode, kuid_t *uid, kgid_t *gid,
  995 				      const char **tmp);
  996 
  997 static inline bool device_supports_offline(struct device *dev)
  998 {
  999 	return dev->bus && dev->bus->offline && dev->bus->online;
 1000 }
 1001 
 1002 extern void lock_device_hotplug(void);
 1003 extern void unlock_device_hotplug(void);
 1004 extern int lock_device_hotplug_sysfs(void);
 1005 extern int device_offline(struct device *dev);
 1006 extern int device_online(struct device *dev);
 1007 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
 1008 extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
 1009 
 1010 /*
 1011  * Root device objects for grouping under /sys/devices
 1012  */
 1013 extern struct device *__root_device_register(const char *name,
 1014 					     struct module *owner);
 1015 
 1016 /* This is a macro to avoid include problems with THIS_MODULE */
 1017 #define root_device_register(name) \
 1018 	__root_device_register(name, THIS_MODULE)
 1019 
 1020 extern void root_device_unregister(struct device *root);
 1021 
 1022 static inline void *dev_get_platdata(const struct device *dev)
 1023 {
 1024 	return dev->platform_data;
 1025 }
 1026 
 1027 /*
 1028  * Manual binding of a device to driver. See drivers/base/bus.c
 1029  * for information on use.
 1030  */
 1031 extern int __must_check device_bind_driver(struct device *dev);
 1032 extern void device_release_driver(struct device *dev);
 1033 extern int  __must_check device_attach(struct device *dev);
 1034 extern int __must_check driver_attach(struct device_driver *drv);
 1035 extern void device_initial_probe(struct device *dev);
 1036 extern int __must_check device_reprobe(struct device *dev);
 1037 
 1038 /*
 1039  * Easy functions for dynamically creating devices on the fly
 1040  */
 1041 extern __printf(5, 0)
 1042 struct device *device_create_vargs(struct class *cls, struct device *parent,
 1043 				   dev_t devt, void *drvdata,
 1044 				   const char *fmt, va_list vargs);
 1045 extern __printf(5, 6)
 1046 struct device *device_create(struct class *cls, struct device *parent,
 1047 			     dev_t devt, void *drvdata,
 1048 			     const char *fmt, ...);
 1049 extern __printf(6, 7)
 1050 struct device *device_create_with_groups(struct class *cls,
 1051 			     struct device *parent, dev_t devt, void *drvdata,
 1052 			     const struct attribute_group **groups,
 1053 			     const char *fmt, ...);
 1054 extern void device_destroy(struct class *cls, dev_t devt);
 1055 
 1056 /*
 1057  * Platform "fixup" functions - allow the platform to have their say
 1058  * about devices and actions that the general device layer doesn't
 1059  * know about.
 1060  */
 1061 /* Notify platform of device discovery */
 1062 extern int (*platform_notify)(struct device *dev);
 1063 
 1064 extern int (*platform_notify_remove)(struct device *dev);
 1065 
 1066 
 1067 /*
 1068  * get_device - atomically increment the reference count for the device.
 1069  *
 1070  */
 1071 extern struct device *get_device(struct device *dev);
 1072 extern void put_device(struct device *dev);
 1073 
 1074 #ifdef CONFIG_DEVTMPFS
 1075 extern int devtmpfs_create_node(struct device *dev);
 1076 extern int devtmpfs_delete_node(struct device *dev);
 1077 extern int devtmpfs_mount(const char *mntdir);
 1078 #else
 1079 static inline int devtmpfs_create_node(struct device *dev) { return 0; }
 1080 static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
 1081 static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
 1082 #endif
 1083 
 1084 /* drivers/base/power/shutdown.c */
 1085 extern void device_shutdown(void);
 1086 
 1087 /* debugging and troubleshooting/diagnostic helpers. */
 1088 extern const char *dev_driver_string(const struct device *dev);
 1089 
 1090 
 1091 #ifdef CONFIG_PRINTK
 1092 
 1093 extern __printf(3, 0)
 1094 int dev_vprintk_emit(int level, const struct device *dev,
 1095 		     const char *fmt, va_list args);
 1096 extern __printf(3, 4)
 1097 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
 1098 
 1099 extern __printf(3, 4)
 1100 void dev_printk(const char *level, const struct device *dev,
 1101 		const char *fmt, ...);
 1102 extern __printf(2, 3)
 1103 void dev_emerg(const struct device *dev, const char *fmt, ...);
 1104 extern __printf(2, 3)
 1105 void dev_alert(const struct device *dev, const char *fmt, ...);
 1106 extern __printf(2, 3)
 1107 void dev_crit(const struct device *dev, const char *fmt, ...);
 1108 extern __printf(2, 3)
 1109 void dev_err(const struct device *dev, const char *fmt, ...);
 1110 extern __printf(2, 3)
 1111 void dev_warn(const struct device *dev, const char *fmt, ...);
 1112 extern __printf(2, 3)
 1113 void dev_notice(const struct device *dev, const char *fmt, ...);
 1114 extern __printf(2, 3)
 1115 void _dev_info(const struct device *dev, const char *fmt, ...);
 1116 
 1117 #else
 1118 
 1119 static inline __printf(3, 0)
 1120 int dev_vprintk_emit(int level, const struct device *dev,
 1121 		     const char *fmt, va_list args)
 1122 { return 0; }
 1123 static inline __printf(3, 4)
 1124 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
 1125 { return 0; }
 1126 
 1127 static inline void __dev_printk(const char *level, const struct device *dev,
 1128 				struct va_format *vaf)
 1129 {}
 1130 static inline __printf(3, 4)
 1131 void dev_printk(const char *level, const struct device *dev,
 1132 		const char *fmt, ...)
 1133 {}
 1134 
 1135 static inline __printf(2, 3)
 1136 void dev_emerg(const struct device *dev, const char *fmt, ...)
 1137 {}
 1138 static inline __printf(2, 3)
 1139 void dev_crit(const struct device *dev, const char *fmt, ...)
 1140 {}
 1141 static inline __printf(2, 3)
 1142 void dev_alert(const struct device *dev, const char *fmt, ...)
 1143 {}
 1144 static inline __printf(2, 3)
 1145 void dev_err(const struct device *dev, const char *fmt, ...)
 1146 {}
 1147 static inline __printf(2, 3)
 1148 void dev_warn(const struct device *dev, const char *fmt, ...)
 1149 {}
 1150 static inline __printf(2, 3)
 1151 void dev_notice(const struct device *dev, const char *fmt, ...)
 1152 {}
 1153 static inline __printf(2, 3)
 1154 void _dev_info(const struct device *dev, const char *fmt, ...)
 1155 {}
 1156 
 1157 #endif
 1158 
 1159 /*
 1160  * Stupid hackaround for existing uses of non-printk uses dev_info
 1161  *
 1162  * Note that the definition of dev_info below is actually _dev_info
 1163  * and a macro is used to avoid redefining dev_info
 1164  */
 1165 
 1166 #define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
 1167 
 1168 #if defined(CONFIG_DYNAMIC_DEBUG)
 1169 #define dev_dbg(dev, format, ...)		     \
 1170 do {						     \
 1171 	dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
 1172 } while (0)
 1173 #elif defined(DEBUG)
 1174 #define dev_dbg(dev, format, arg...)		\
 1175 	dev_printk(KERN_DEBUG, dev, format, ##arg)
 1176 #else
 1177 #define dev_dbg(dev, format, arg...)				\
 1178 ({								\
 1179 	if (0)							\
 1180 		dev_printk(KERN_DEBUG, dev, format, ##arg);	\
 1181 })
 1182 #endif
 1183 
 1184 #ifdef CONFIG_PRINTK
 1185 #define dev_level_once(dev_level, dev, fmt, ...)			\
 1186 do {									\
 1187 	static bool __print_once __read_mostly;				\
 1188 									\
 1189 	if (!__print_once) {						\
 1190 		__print_once = true;					\
 1191 		dev_level(dev, fmt, ##__VA_ARGS__);			\
 1192 	}								\
 1193 } while (0)
 1194 #else
 1195 #define dev_level_once(dev_level, dev, fmt, ...)			\
 1196 do {									\
 1197 	if (0)								\
 1198 		dev_level(dev, fmt, ##__VA_ARGS__);			\
 1199 } while (0)
 1200 #endif
 1201 
 1202 #define dev_emerg_once(dev, fmt, ...)					\
 1203 	dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
 1204 #define dev_alert_once(dev, fmt, ...)					\
 1205 	dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
 1206 #define dev_crit_once(dev, fmt, ...)					\
 1207 	dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
 1208 #define dev_err_once(dev, fmt, ...)					\
 1209 	dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
 1210 #define dev_warn_once(dev, fmt, ...)					\
 1211 	dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
 1212 #define dev_notice_once(dev, fmt, ...)					\
 1213 	dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
 1214 #define dev_info_once(dev, fmt, ...)					\
 1215 	dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
 1216 #define dev_dbg_once(dev, fmt, ...)					\
 1217 	dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
 1218 
 1219 #define dev_level_ratelimited(dev_level, dev, fmt, ...)			\
 1220 do {									\
 1221 	static DEFINE_RATELIMIT_STATE(_rs,				\
 1222 				      DEFAULT_RATELIMIT_INTERVAL,	\
 1223 				      DEFAULT_RATELIMIT_BURST);		\
 1224 	if (__ratelimit(&_rs))						\
 1225 		dev_level(dev, fmt, ##__VA_ARGS__);			\
 1226 } while (0)
 1227 
 1228 #define dev_emerg_ratelimited(dev, fmt, ...)				\
 1229 	dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
 1230 #define dev_alert_ratelimited(dev, fmt, ...)				\
 1231 	dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
 1232 #define dev_crit_ratelimited(dev, fmt, ...)				\
 1233 	dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
 1234 #define dev_err_ratelimited(dev, fmt, ...)				\
 1235 	dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
 1236 #define dev_warn_ratelimited(dev, fmt, ...)				\
 1237 	dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
 1238 #define dev_notice_ratelimited(dev, fmt, ...)				\
 1239 	dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
 1240 #define dev_info_ratelimited(dev, fmt, ...)				\
 1241 	dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
 1242 #if defined(CONFIG_DYNAMIC_DEBUG)
 1243 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
 1244 #define dev_dbg_ratelimited(dev, fmt, ...)				\
 1245 do {									\
 1246 	static DEFINE_RATELIMIT_STATE(_rs,				\
 1247 				      DEFAULT_RATELIMIT_INTERVAL,	\
 1248 				      DEFAULT_RATELIMIT_BURST);		\
 1249 	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);			\
 1250 	if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&	\
 1251 	    __ratelimit(&_rs))						\
 1252 		__dynamic_dev_dbg(&descriptor, dev, fmt,		\
 1253 				  ##__VA_ARGS__);			\
 1254 } while (0)
 1255 #elif defined(DEBUG)
 1256 #define dev_dbg_ratelimited(dev, fmt, ...)				\
 1257 do {									\
 1258 	static DEFINE_RATELIMIT_STATE(_rs,				\
 1259 				      DEFAULT_RATELIMIT_INTERVAL,	\
 1260 				      DEFAULT_RATELIMIT_BURST);		\
 1261 	if (__ratelimit(&_rs))						\
 1262 		dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);	\
 1263 } while (0)
 1264 #else
 1265 #define dev_dbg_ratelimited(dev, fmt, ...)			\
 1266 	no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 1267 #endif
 1268 
 1269 #ifdef VERBOSE_DEBUG
 1270 #define dev_vdbg	dev_dbg
 1271 #else
 1272 #define dev_vdbg(dev, format, arg...)				\
 1273 ({								\
 1274 	if (0)							\
 1275 		dev_printk(KERN_DEBUG, dev, format, ##arg);	\
 1276 })
 1277 #endif
 1278 
 1279 /*
 1280  * dev_WARN*() acts like dev_printk(), but with the key difference of
 1281  * using WARN/WARN_ONCE to include file/line information and a backtrace.
 1282  */
 1283 #define dev_WARN(dev, format, arg...) \
 1284 	WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
 1285 
 1286 #define dev_WARN_ONCE(dev, condition, format, arg...) \
 1287 	WARN_ONCE(condition, "%s %s: " format, \
 1288 			dev_driver_string(dev), dev_name(dev), ## arg)
 1289 
 1290 /* Create alias, so I can be autoloaded. */
 1291 #define MODULE_ALIAS_CHARDEV(major,minor) \
 1292 	MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
 1293 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \
 1294 	MODULE_ALIAS("char-major-" __stringify(major) "-*")
 1295 
 1296 #ifdef CONFIG_SYSFS_DEPRECATED
 1297 extern long sysfs_deprecated;
 1298 #else
 1299 #define sysfs_deprecated 0
 1300 #endif
 1301 
 1302 /**
 1303  * module_driver() - Helper macro for drivers that don't do anything
 1304  * special in module init/exit. This eliminates a lot of boilerplate.
 1305  * Each module may only use this macro once, and calling it replaces
 1306  * module_init() and module_exit().
 1307  *
 1308  * @__driver: driver name
 1309  * @__register: register function for this driver type
 1310  * @__unregister: unregister function for this driver type
 1311  * @...: Additional arguments to be passed to __register and __unregister.
 1312  *
 1313  * Use this macro to construct bus specific macros for registering
 1314  * drivers, and do not use it on its own.
 1315  */
 1316 #define module_driver(__driver, __register, __unregister, ...) \
 1317 static int __init __driver##_init(void) \
 1318 { \
 1319 	return __register(&(__driver) , ##__VA_ARGS__); \
 1320 } \
 1321 module_init(__driver##_init); \
 1322 static void __exit __driver##_exit(void) \
 1323 { \
 1324 	__unregister(&(__driver) , ##__VA_ARGS__); \
 1325 } \
 1326 module_exit(__driver##_exit);
 1327 
 1328 /**
 1329  * builtin_driver() - Helper macro for drivers that don't do anything
 1330  * special in init and have no exit. This eliminates some boilerplate.
 1331  * Each driver may only use this macro once, and calling it replaces
 1332  * device_initcall (or in some cases, the legacy __initcall).  This is
 1333  * meant to be a direct parallel of module_driver() above but without
 1334  * the __exit stuff that is not used for builtin cases.
 1335  *
 1336  * @__driver: driver name
 1337  * @__register: register function for this driver type
 1338  * @...: Additional arguments to be passed to __register
 1339  *
 1340  * Use this macro to construct bus specific macros for registering
 1341  * drivers, and do not use it on its own.
 1342  */
 1343 #define builtin_driver(__driver, __register, ...) \
 1344 static int __init __driver##_init(void) \
 1345 { \
 1346 	return __register(&(__driver) , ##__VA_ARGS__); \
 1347 } \
 1348 device_initcall(__driver##_init);
 1349 
 1350 #endif /* _DEVICE_H_ */                 1 /*
    2  * A generic kernel FIFO implementation
    3  *
    4  * Copyright (C) 2013 Stefani Seibold <stefani@seibold.net>
    5  *
    6  * This program is free software; you can redistribute it and/or modify
    7  * it under the terms of the GNU General Public License as published by
    8  * the Free Software Foundation; either version 2 of the License, or
    9  * (at your option) any later version.
   10  *
   11  * This program is distributed in the hope that it will be useful,
   12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   14  * GNU General Public License for more details.
   15  *
   16  * You should have received a copy of the GNU General Public License
   17  * along with this program; if not, write to the Free Software
   18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
   19  *
   20  */
   21 
   22 #ifndef _LINUX_KFIFO_H
   23 #define _LINUX_KFIFO_H
   24 
   25 /*
   26  * How to porting drivers to the new generic FIFO API:
   27  *
   28  * - Modify the declaration of the "struct kfifo *" object into a
   29  *   in-place "struct kfifo" object
   30  * - Init the in-place object with kfifo_alloc() or kfifo_init()
   31  *   Note: The address of the in-place "struct kfifo" object must be
   32  *   passed as the first argument to this functions
   33  * - Replace the use of __kfifo_put into kfifo_in and __kfifo_get
   34  *   into kfifo_out
   35  * - Replace the use of kfifo_put into kfifo_in_spinlocked and kfifo_get
   36  *   into kfifo_out_spinlocked
   37  *   Note: the spinlock pointer formerly passed to kfifo_init/kfifo_alloc
   38  *   must be passed now to the kfifo_in_spinlocked and kfifo_out_spinlocked
   39  *   as the last parameter
   40  * - The formerly __kfifo_* functions are renamed into kfifo_*
   41  */
   42 
   43 /*
   44  * Note about locking : There is no locking required until only * one reader
   45  * and one writer is using the fifo and no kfifo_reset() will be * called
   46  *  kfifo_reset_out() can be safely used, until it will be only called
   47  * in the reader thread.
   48  *  For multiple writer and one reader there is only a need to lock the writer.
   49  * And vice versa for only one writer and multiple reader there is only a need
   50  * to lock the reader.
   51  */
   52 
   53 #include <linux/kernel.h>
   54 #include <linux/spinlock.h>
   55 #include <linux/stddef.h>
   56 #include <linux/scatterlist.h>
   57 
   58 struct __kfifo {
   59 	unsigned int	in;
   60 	unsigned int	out;
   61 	unsigned int	mask;
   62 	unsigned int	esize;
   63 	void		*data;
   64 };
   65 
   66 #define __STRUCT_KFIFO_COMMON(datatype, recsize, ptrtype) \
   67 	union { \
   68 		struct __kfifo	kfifo; \
   69 		datatype	*type; \
   70 		const datatype	*const_type; \
   71 		char		(*rectype)[recsize]; \
   72 		ptrtype		*ptr; \
   73 		ptrtype const	*ptr_const; \
   74 	}
   75 
   76 #define __STRUCT_KFIFO(type, size, recsize, ptrtype) \
   77 { \
   78 	__STRUCT_KFIFO_COMMON(type, recsize, ptrtype); \
   79 	type		buf[((size < 2) || (size & (size - 1))) ? -1 : size]; \
   80 }
   81 
   82 #define STRUCT_KFIFO(type, size) \
   83 	struct __STRUCT_KFIFO(type, size, 0, type)
   84 
   85 #define __STRUCT_KFIFO_PTR(type, recsize, ptrtype) \
   86 { \
   87 	__STRUCT_KFIFO_COMMON(type, recsize, ptrtype); \
   88 	type		buf[0]; \
   89 }
   90 
   91 #define STRUCT_KFIFO_PTR(type) \
   92 	struct __STRUCT_KFIFO_PTR(type, 0, type)
   93 
   94 /*
   95  * define compatibility "struct kfifo" for dynamic allocated fifos
   96  */
   97 struct kfifo __STRUCT_KFIFO_PTR(unsigned char, 0, void);
   98 
   99 #define STRUCT_KFIFO_REC_1(size) \
  100 	struct __STRUCT_KFIFO(unsigned char, size, 1, void)
  101 
  102 #define STRUCT_KFIFO_REC_2(size) \
  103 	struct __STRUCT_KFIFO(unsigned char, size, 2, void)
  104 
  105 /*
  106  * define kfifo_rec types
  107  */
  108 struct kfifo_rec_ptr_1 __STRUCT_KFIFO_PTR(unsigned char, 1, void);
  109 struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void);
  110 
  111 /*
  112  * helper macro to distinguish between real in place fifo where the fifo
  113  * array is a part of the structure and the fifo type where the array is
  114  * outside of the fifo structure.
  115  */
  116 #define	__is_kfifo_ptr(fifo)	(sizeof(*fifo) == sizeof(struct __kfifo))
  117 
  118 /**
  119  * DECLARE_KFIFO_PTR - macro to declare a fifo pointer object
  120  * @fifo: name of the declared fifo
  121  * @type: type of the fifo elements
  122  */
  123 #define DECLARE_KFIFO_PTR(fifo, type)	STRUCT_KFIFO_PTR(type) fifo
  124 
  125 /**
  126  * DECLARE_KFIFO - macro to declare a fifo object
  127  * @fifo: name of the declared fifo
  128  * @type: type of the fifo elements
  129  * @size: the number of elements in the fifo, this must be a power of 2
  130  */
  131 #define DECLARE_KFIFO(fifo, type, size)	STRUCT_KFIFO(type, size) fifo
  132 
  133 /**
  134  * INIT_KFIFO - Initialize a fifo declared by DECLARE_KFIFO
  135  * @fifo: name of the declared fifo datatype
  136  */
  137 #define INIT_KFIFO(fifo) \
  138 (void)({ \
  139 	typeof(&(fifo)) __tmp = &(fifo); \
  140 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  141 	__kfifo->in = 0; \
  142 	__kfifo->out = 0; \
  143 	__kfifo->mask = __is_kfifo_ptr(__tmp) ? 0 : ARRAY_SIZE(__tmp->buf) - 1;\
  144 	__kfifo->esize = sizeof(*__tmp->buf); \
  145 	__kfifo->data = __is_kfifo_ptr(__tmp) ?  NULL : __tmp->buf; \
  146 })
  147 
  148 /**
  149  * DEFINE_KFIFO - macro to define and initialize a fifo
  150  * @fifo: name of the declared fifo datatype
  151  * @type: type of the fifo elements
  152  * @size: the number of elements in the fifo, this must be a power of 2
  153  *
  154  * Note: the macro can be used for global and local fifo data type variables.
  155  */
  156 #define DEFINE_KFIFO(fifo, type, size) \
  157 	DECLARE_KFIFO(fifo, type, size) = \
  158 	(typeof(fifo)) { \
  159 		{ \
  160 			{ \
  161 			.in	= 0, \
  162 			.out	= 0, \
  163 			.mask	= __is_kfifo_ptr(&(fifo)) ? \
  164 				  0 : \
  165 				  ARRAY_SIZE((fifo).buf) - 1, \
  166 			.esize	= sizeof(*(fifo).buf), \
  167 			.data	= __is_kfifo_ptr(&(fifo)) ? \
  168 				NULL : \
  169 				(fifo).buf, \
  170 			} \
  171 		} \
  172 	}
  173 
  174 
  175 static inline unsigned int __must_check
  176 __kfifo_uint_must_check_helper(unsigned int val)
  177 {
  178 	return val;
  179 }
  180 
  181 static inline int __must_check
  182 __kfifo_int_must_check_helper(int val)
  183 {
  184 	return val;
  185 }
  186 
  187 /**
  188  * kfifo_initialized - Check if the fifo is initialized
  189  * @fifo: address of the fifo to check
  190  *
  191  * Return %true if fifo is initialized, otherwise %false.
  192  * Assumes the fifo was 0 before.
  193  */
  194 #define kfifo_initialized(fifo) ((fifo)->kfifo.mask)
  195 
  196 /**
  197  * kfifo_esize - returns the size of the element managed by the fifo
  198  * @fifo: address of the fifo to be used
  199  */
  200 #define kfifo_esize(fifo)	((fifo)->kfifo.esize)
  201 
  202 /**
  203  * kfifo_recsize - returns the size of the record length field
  204  * @fifo: address of the fifo to be used
  205  */
  206 #define kfifo_recsize(fifo)	(sizeof(*(fifo)->rectype))
  207 
  208 /**
  209  * kfifo_size - returns the size of the fifo in elements
  210  * @fifo: address of the fifo to be used
  211  */
  212 #define kfifo_size(fifo)	((fifo)->kfifo.mask + 1)
  213 
  214 /**
  215  * kfifo_reset - removes the entire fifo content
  216  * @fifo: address of the fifo to be used
  217  *
  218  * Note: usage of kfifo_reset() is dangerous. It should be only called when the
  219  * fifo is exclusived locked or when it is secured that no other thread is
  220  * accessing the fifo.
  221  */
  222 #define kfifo_reset(fifo) \
  223 (void)({ \
  224 	typeof((fifo) + 1) __tmp = (fifo); \
  225 	__tmp->kfifo.in = __tmp->kfifo.out = 0; \
  226 })
  227 
  228 /**
  229  * kfifo_reset_out - skip fifo content
  230  * @fifo: address of the fifo to be used
  231  *
  232  * Note: The usage of kfifo_reset_out() is safe until it will be only called
  233  * from the reader thread and there is only one concurrent reader. Otherwise
  234  * it is dangerous and must be handled in the same way as kfifo_reset().
  235  */
  236 #define kfifo_reset_out(fifo)	\
  237 (void)({ \
  238 	typeof((fifo) + 1) __tmp = (fifo); \
  239 	__tmp->kfifo.out = __tmp->kfifo.in; \
  240 })
  241 
  242 /**
  243  * kfifo_len - returns the number of used elements in the fifo
  244  * @fifo: address of the fifo to be used
  245  */
  246 #define kfifo_len(fifo) \
  247 ({ \
  248 	typeof((fifo) + 1) __tmpl = (fifo); \
  249 	__tmpl->kfifo.in - __tmpl->kfifo.out; \
  250 })
  251 
  252 /**
  253  * kfifo_is_empty - returns true if the fifo is empty
  254  * @fifo: address of the fifo to be used
  255  */
  256 #define	kfifo_is_empty(fifo) \
  257 ({ \
  258 	typeof((fifo) + 1) __tmpq = (fifo); \
  259 	__tmpq->kfifo.in == __tmpq->kfifo.out; \
  260 })
  261 
  262 /**
  263  * kfifo_is_full - returns true if the fifo is full
  264  * @fifo: address of the fifo to be used
  265  */
  266 #define	kfifo_is_full(fifo) \
  267 ({ \
  268 	typeof((fifo) + 1) __tmpq = (fifo); \
  269 	kfifo_len(__tmpq) > __tmpq->kfifo.mask; \
  270 })
  271 
  272 /**
  273  * kfifo_avail - returns the number of unused elements in the fifo
  274  * @fifo: address of the fifo to be used
  275  */
  276 #define	kfifo_avail(fifo) \
  277 __kfifo_uint_must_check_helper( \
  278 ({ \
  279 	typeof((fifo) + 1) __tmpq = (fifo); \
  280 	const size_t __recsize = sizeof(*__tmpq->rectype); \
  281 	unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \
  282 	(__recsize) ? ((__avail <= __recsize) ? 0 : \
  283 	__kfifo_max_r(__avail - __recsize, __recsize)) : \
  284 	__avail; \
  285 }) \
  286 )
  287 
  288 /**
  289  * kfifo_skip - skip output data
  290  * @fifo: address of the fifo to be used
  291  */
  292 #define	kfifo_skip(fifo) \
  293 (void)({ \
  294 	typeof((fifo) + 1) __tmp = (fifo); \
  295 	const size_t __recsize = sizeof(*__tmp->rectype); \
  296 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  297 	if (__recsize) \
  298 		__kfifo_skip_r(__kfifo, __recsize); \
  299 	else \
  300 		__kfifo->out++; \
  301 })
  302 
  303 /**
  304  * kfifo_peek_len - gets the size of the next fifo record
  305  * @fifo: address of the fifo to be used
  306  *
  307  * This function returns the size of the next fifo record in number of bytes.
  308  */
  309 #define kfifo_peek_len(fifo) \
  310 __kfifo_uint_must_check_helper( \
  311 ({ \
  312 	typeof((fifo) + 1) __tmp = (fifo); \
  313 	const size_t __recsize = sizeof(*__tmp->rectype); \
  314 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  315 	(!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \
  316 	__kfifo_len_r(__kfifo, __recsize); \
  317 }) \
  318 )
  319 
  320 /**
  321  * kfifo_alloc - dynamically allocates a new fifo buffer
  322  * @fifo: pointer to the fifo
  323  * @size: the number of elements in the fifo, this must be a power of 2
  324  * @gfp_mask: get_free_pages mask, passed to kmalloc()
  325  *
  326  * This macro dynamically allocates a new fifo buffer.
  327  *
  328  * The numer of elements will be rounded-up to a power of 2.
  329  * The fifo will be release with kfifo_free().
  330  * Return 0 if no error, otherwise an error code.
  331  */
  332 #define kfifo_alloc(fifo, size, gfp_mask) \
  333 __kfifo_int_must_check_helper( \
  334 ({ \
  335 	typeof((fifo) + 1) __tmp = (fifo); \
  336 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  337 	__is_kfifo_ptr(__tmp) ? \
  338 	__kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
  339 	-EINVAL; \
  340 }) \
  341 )
  342 
  343 /**
  344  * kfifo_free - frees the fifo
  345  * @fifo: the fifo to be freed
  346  */
  347 #define kfifo_free(fifo) \
  348 ({ \
  349 	typeof((fifo) + 1) __tmp = (fifo); \
  350 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  351 	if (__is_kfifo_ptr(__tmp)) \
  352 		__kfifo_free(__kfifo); \
  353 })
  354 
  355 /**
  356  * kfifo_init - initialize a fifo using a preallocated buffer
  357  * @fifo: the fifo to assign the buffer
  358  * @buffer: the preallocated buffer to be used
  359  * @size: the size of the internal buffer, this have to be a power of 2
  360  *
  361  * This macro initialize a fifo using a preallocated buffer.
  362  *
  363  * The numer of elements will be rounded-up to a power of 2.
  364  * Return 0 if no error, otherwise an error code.
  365  */
  366 #define kfifo_init(fifo, buffer, size) \
  367 ({ \
  368 	typeof((fifo) + 1) __tmp = (fifo); \
  369 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  370 	__is_kfifo_ptr(__tmp) ? \
  371 	__kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \
  372 	-EINVAL; \
  373 })
  374 
  375 /**
  376  * kfifo_put - put data into the fifo
  377  * @fifo: address of the fifo to be used
  378  * @val: the data to be added
  379  *
  380  * This macro copies the given value into the fifo.
  381  * It returns 0 if the fifo was full. Otherwise it returns the number
  382  * processed elements.
  383  *
  384  * Note that with only one concurrent reader and one concurrent
  385  * writer, you don't need extra locking to use these macro.
  386  */
  387 #define	kfifo_put(fifo, val) \
  388 ({ \
  389 	typeof((fifo) + 1) __tmp = (fifo); \
  390 	typeof(*__tmp->const_type) __val = (val); \
  391 	unsigned int __ret; \
  392 	size_t __recsize = sizeof(*__tmp->rectype); \
  393 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  394 	if (__recsize) \
  395 		__ret = __kfifo_in_r(__kfifo, &__val, sizeof(__val), \
  396 			__recsize); \
  397 	else { \
  398 		__ret = !kfifo_is_full(__tmp); \
  399 		if (__ret) { \
  400 			(__is_kfifo_ptr(__tmp) ? \
  401 			((typeof(__tmp->type))__kfifo->data) : \
  402 			(__tmp->buf) \
  403 			)[__kfifo->in & __tmp->kfifo.mask] = \
  404 				(typeof(*__tmp->type))__val; \
  405 			smp_wmb(); \
  406 			__kfifo->in++; \
  407 		} \
  408 	} \
  409 	__ret; \
  410 })
  411 
  412 /**
  413  * kfifo_get - get data from the fifo
  414  * @fifo: address of the fifo to be used
  415  * @val: address where to store the data
  416  *
  417  * This macro reads the data from the fifo.
  418  * It returns 0 if the fifo was empty. Otherwise it returns the number
  419  * processed elements.
  420  *
  421  * Note that with only one concurrent reader and one concurrent
  422  * writer, you don't need extra locking to use these macro.
  423  */
  424 #define	kfifo_get(fifo, val) \
  425 __kfifo_uint_must_check_helper( \
  426 ({ \
  427 	typeof((fifo) + 1) __tmp = (fifo); \
  428 	typeof(__tmp->ptr) __val = (val); \
  429 	unsigned int __ret; \
  430 	const size_t __recsize = sizeof(*__tmp->rectype); \
  431 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  432 	if (__recsize) \
  433 		__ret = __kfifo_out_r(__kfifo, __val, sizeof(*__val), \
  434 			__recsize); \
  435 	else { \
  436 		__ret = !kfifo_is_empty(__tmp); \
  437 		if (__ret) { \
  438 			*(typeof(__tmp->type))__val = \
  439 				(__is_kfifo_ptr(__tmp) ? \
  440 				((typeof(__tmp->type))__kfifo->data) : \
  441 				(__tmp->buf) \
  442 				)[__kfifo->out & __tmp->kfifo.mask]; \
  443 			smp_wmb(); \
  444 			__kfifo->out++; \
  445 		} \
  446 	} \
  447 	__ret; \
  448 }) \
  449 )
  450 
  451 /**
  452  * kfifo_peek - get data from the fifo without removing
  453  * @fifo: address of the fifo to be used
  454  * @val: address where to store the data
  455  *
  456  * This reads the data from the fifo without removing it from the fifo.
  457  * It returns 0 if the fifo was empty. Otherwise it returns the number
  458  * processed elements.
  459  *
  460  * Note that with only one concurrent reader and one concurrent
  461  * writer, you don't need extra locking to use these macro.
  462  */
  463 #define	kfifo_peek(fifo, val) \
  464 __kfifo_uint_must_check_helper( \
  465 ({ \
  466 	typeof((fifo) + 1) __tmp = (fifo); \
  467 	typeof(__tmp->ptr) __val = (val); \
  468 	unsigned int __ret; \
  469 	const size_t __recsize = sizeof(*__tmp->rectype); \
  470 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  471 	if (__recsize) \
  472 		__ret = __kfifo_out_peek_r(__kfifo, __val, sizeof(*__val), \
  473 			__recsize); \
  474 	else { \
  475 		__ret = !kfifo_is_empty(__tmp); \
  476 		if (__ret) { \
  477 			*(typeof(__tmp->type))__val = \
  478 				(__is_kfifo_ptr(__tmp) ? \
  479 				((typeof(__tmp->type))__kfifo->data) : \
  480 				(__tmp->buf) \
  481 				)[__kfifo->out & __tmp->kfifo.mask]; \
  482 			smp_wmb(); \
  483 		} \
  484 	} \
  485 	__ret; \
  486 }) \
  487 )
  488 
  489 /**
  490  * kfifo_in - put data into the fifo
  491  * @fifo: address of the fifo to be used
  492  * @buf: the data to be added
  493  * @n: number of elements to be added
  494  *
  495  * This macro copies the given buffer into the fifo and returns the
  496  * number of copied elements.
  497  *
  498  * Note that with only one concurrent reader and one concurrent
  499  * writer, you don't need extra locking to use these macro.
  500  */
  501 #define	kfifo_in(fifo, buf, n) \
  502 ({ \
  503 	typeof((fifo) + 1) __tmp = (fifo); \
  504 	typeof(__tmp->ptr_const) __buf = (buf); \
  505 	unsigned long __n = (n); \
  506 	const size_t __recsize = sizeof(*__tmp->rectype); \
  507 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  508 	(__recsize) ?\
  509 	__kfifo_in_r(__kfifo, __buf, __n, __recsize) : \
  510 	__kfifo_in(__kfifo, __buf, __n); \
  511 })
  512 
  513 /**
  514  * kfifo_in_spinlocked - put data into the fifo using a spinlock for locking
  515  * @fifo: address of the fifo to be used
  516  * @buf: the data to be added
  517  * @n: number of elements to be added
  518  * @lock: pointer to the spinlock to use for locking
  519  *
  520  * This macro copies the given values buffer into the fifo and returns the
  521  * number of copied elements.
  522  */
  523 #define	kfifo_in_spinlocked(fifo, buf, n, lock) \
  524 ({ \
  525 	unsigned long __flags; \
  526 	unsigned int __ret; \
  527 	spin_lock_irqsave(lock, __flags); \
  528 	__ret = kfifo_in(fifo, buf, n); \
  529 	spin_unlock_irqrestore(lock, __flags); \
  530 	__ret; \
  531 })
  532 
  533 /* alias for kfifo_in_spinlocked, will be removed in a future release */
  534 #define kfifo_in_locked(fifo, buf, n, lock) \
  535 		kfifo_in_spinlocked(fifo, buf, n, lock)
  536 
  537 /**
  538  * kfifo_out - get data from the fifo
  539  * @fifo: address of the fifo to be used
  540  * @buf: pointer to the storage buffer
  541  * @n: max. number of elements to get
  542  *
  543  * This macro get some data from the fifo and return the numbers of elements
  544  * copied.
  545  *
  546  * Note that with only one concurrent reader and one concurrent
  547  * writer, you don't need extra locking to use these macro.
  548  */
  549 #define	kfifo_out(fifo, buf, n) \
  550 __kfifo_uint_must_check_helper( \
  551 ({ \
  552 	typeof((fifo) + 1) __tmp = (fifo); \
  553 	typeof(__tmp->ptr) __buf = (buf); \
  554 	unsigned long __n = (n); \
  555 	const size_t __recsize = sizeof(*__tmp->rectype); \
  556 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  557 	(__recsize) ?\
  558 	__kfifo_out_r(__kfifo, __buf, __n, __recsize) : \
  559 	__kfifo_out(__kfifo, __buf, __n); \
  560 }) \
  561 )
  562 
  563 /**
  564  * kfifo_out_spinlocked - get data from the fifo using a spinlock for locking
  565  * @fifo: address of the fifo to be used
  566  * @buf: pointer to the storage buffer
  567  * @n: max. number of elements to get
  568  * @lock: pointer to the spinlock to use for locking
  569  *
  570  * This macro get the data from the fifo and return the numbers of elements
  571  * copied.
  572  */
  573 #define	kfifo_out_spinlocked(fifo, buf, n, lock) \
  574 __kfifo_uint_must_check_helper( \
  575 ({ \
  576 	unsigned long __flags; \
  577 	unsigned int __ret; \
  578 	spin_lock_irqsave(lock, __flags); \
  579 	__ret = kfifo_out(fifo, buf, n); \
  580 	spin_unlock_irqrestore(lock, __flags); \
  581 	__ret; \
  582 }) \
  583 )
  584 
  585 /* alias for kfifo_out_spinlocked, will be removed in a future release */
  586 #define kfifo_out_locked(fifo, buf, n, lock) \
  587 		kfifo_out_spinlocked(fifo, buf, n, lock)
  588 
  589 /**
  590  * kfifo_from_user - puts some data from user space into the fifo
  591  * @fifo: address of the fifo to be used
  592  * @from: pointer to the data to be added
  593  * @len: the length of the data to be added
  594  * @copied: pointer to output variable to store the number of copied bytes
  595  *
  596  * This macro copies at most @len bytes from the @from into the
  597  * fifo, depending of the available space and returns -EFAULT/0.
  598  *
  599  * Note that with only one concurrent reader and one concurrent
  600  * writer, you don't need extra locking to use these macro.
  601  */
  602 #define	kfifo_from_user(fifo, from, len, copied) \
  603 __kfifo_uint_must_check_helper( \
  604 ({ \
  605 	typeof((fifo) + 1) __tmp = (fifo); \
  606 	const void __user *__from = (from); \
  607 	unsigned int __len = (len); \
  608 	unsigned int *__copied = (copied); \
  609 	const size_t __recsize = sizeof(*__tmp->rectype); \
  610 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  611 	(__recsize) ? \
  612 	__kfifo_from_user_r(__kfifo, __from, __len,  __copied, __recsize) : \
  613 	__kfifo_from_user(__kfifo, __from, __len, __copied); \
  614 }) \
  615 )
  616 
  617 /**
  618  * kfifo_to_user - copies data from the fifo into user space
  619  * @fifo: address of the fifo to be used
  620  * @to: where the data must be copied
  621  * @len: the size of the destination buffer
  622  * @copied: pointer to output variable to store the number of copied bytes
  623  *
  624  * This macro copies at most @len bytes from the fifo into the
  625  * @to buffer and returns -EFAULT/0.
  626  *
  627  * Note that with only one concurrent reader and one concurrent
  628  * writer, you don't need extra locking to use these macro.
  629  */
  630 #define	kfifo_to_user(fifo, to, len, copied) \
  631 __kfifo_uint_must_check_helper( \
  632 ({ \
  633 	typeof((fifo) + 1) __tmp = (fifo); \
  634 	void __user *__to = (to); \
  635 	unsigned int __len = (len); \
  636 	unsigned int *__copied = (copied); \
  637 	const size_t __recsize = sizeof(*__tmp->rectype); \
  638 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  639 	(__recsize) ? \
  640 	__kfifo_to_user_r(__kfifo, __to, __len, __copied, __recsize) : \
  641 	__kfifo_to_user(__kfifo, __to, __len, __copied); \
  642 }) \
  643 )
  644 
  645 /**
  646  * kfifo_dma_in_prepare - setup a scatterlist for DMA input
  647  * @fifo: address of the fifo to be used
  648  * @sgl: pointer to the scatterlist array
  649  * @nents: number of entries in the scatterlist array
  650  * @len: number of elements to transfer
  651  *
  652  * This macro fills a scatterlist for DMA input.
  653  * It returns the number entries in the scatterlist array.
  654  *
  655  * Note that with only one concurrent reader and one concurrent
  656  * writer, you don't need extra locking to use these macros.
  657  */
  658 #define	kfifo_dma_in_prepare(fifo, sgl, nents, len) \
  659 ({ \
  660 	typeof((fifo) + 1) __tmp = (fifo); \
  661 	struct scatterlist *__sgl = (sgl); \
  662 	int __nents = (nents); \
  663 	unsigned int __len = (len); \
  664 	const size_t __recsize = sizeof(*__tmp->rectype); \
  665 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  666 	(__recsize) ? \
  667 	__kfifo_dma_in_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \
  668 	__kfifo_dma_in_prepare(__kfifo, __sgl, __nents, __len); \
  669 })
  670 
  671 /**
  672  * kfifo_dma_in_finish - finish a DMA IN operation
  673  * @fifo: address of the fifo to be used
  674  * @len: number of bytes to received
  675  *
  676  * This macro finish a DMA IN operation. The in counter will be updated by
  677  * the len parameter. No error checking will be done.
  678  *
  679  * Note that with only one concurrent reader and one concurrent
  680  * writer, you don't need extra locking to use these macros.
  681  */
  682 #define kfifo_dma_in_finish(fifo, len) \
  683 (void)({ \
  684 	typeof((fifo) + 1) __tmp = (fifo); \
  685 	unsigned int __len = (len); \
  686 	const size_t __recsize = sizeof(*__tmp->rectype); \
  687 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  688 	if (__recsize) \
  689 		__kfifo_dma_in_finish_r(__kfifo, __len, __recsize); \
  690 	else \
  691 		__kfifo->in += __len / sizeof(*__tmp->type); \
  692 })
  693 
  694 /**
  695  * kfifo_dma_out_prepare - setup a scatterlist for DMA output
  696  * @fifo: address of the fifo to be used
  697  * @sgl: pointer to the scatterlist array
  698  * @nents: number of entries in the scatterlist array
  699  * @len: number of elements to transfer
  700  *
  701  * This macro fills a scatterlist for DMA output which at most @len bytes
  702  * to transfer.
  703  * It returns the number entries in the scatterlist array.
  704  * A zero means there is no space available and the scatterlist is not filled.
  705  *
  706  * Note that with only one concurrent reader and one concurrent
  707  * writer, you don't need extra locking to use these macros.
  708  */
  709 #define	kfifo_dma_out_prepare(fifo, sgl, nents, len) \
  710 ({ \
  711 	typeof((fifo) + 1) __tmp = (fifo);  \
  712 	struct scatterlist *__sgl = (sgl); \
  713 	int __nents = (nents); \
  714 	unsigned int __len = (len); \
  715 	const size_t __recsize = sizeof(*__tmp->rectype); \
  716 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  717 	(__recsize) ? \
  718 	__kfifo_dma_out_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \
  719 	__kfifo_dma_out_prepare(__kfifo, __sgl, __nents, __len); \
  720 })
  721 
  722 /**
  723  * kfifo_dma_out_finish - finish a DMA OUT operation
  724  * @fifo: address of the fifo to be used
  725  * @len: number of bytes transferred
  726  *
  727  * This macro finish a DMA OUT operation. The out counter will be updated by
  728  * the len parameter. No error checking will be done.
  729  *
  730  * Note that with only one concurrent reader and one concurrent
  731  * writer, you don't need extra locking to use these macros.
  732  */
  733 #define kfifo_dma_out_finish(fifo, len) \
  734 (void)({ \
  735 	typeof((fifo) + 1) __tmp = (fifo); \
  736 	unsigned int __len = (len); \
  737 	const size_t __recsize = sizeof(*__tmp->rectype); \
  738 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  739 	if (__recsize) \
  740 		__kfifo_dma_out_finish_r(__kfifo, __recsize); \
  741 	else \
  742 		__kfifo->out += __len / sizeof(*__tmp->type); \
  743 })
  744 
  745 /**
  746  * kfifo_out_peek - gets some data from the fifo
  747  * @fifo: address of the fifo to be used
  748  * @buf: pointer to the storage buffer
  749  * @n: max. number of elements to get
  750  *
  751  * This macro get the data from the fifo and return the numbers of elements
  752  * copied. The data is not removed from the fifo.
  753  *
  754  * Note that with only one concurrent reader and one concurrent
  755  * writer, you don't need extra locking to use these macro.
  756  */
  757 #define	kfifo_out_peek(fifo, buf, n) \
  758 __kfifo_uint_must_check_helper( \
  759 ({ \
  760 	typeof((fifo) + 1) __tmp = (fifo); \
  761 	typeof(__tmp->ptr) __buf = (buf); \
  762 	unsigned long __n = (n); \
  763 	const size_t __recsize = sizeof(*__tmp->rectype); \
  764 	struct __kfifo *__kfifo = &__tmp->kfifo; \
  765 	(__recsize) ? \
  766 	__kfifo_out_peek_r(__kfifo, __buf, __n, __recsize) : \
  767 	__kfifo_out_peek(__kfifo, __buf, __n); \
  768 }) \
  769 )
  770 
  771 extern int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
  772 	size_t esize, gfp_t gfp_mask);
  773 
  774 extern void __kfifo_free(struct __kfifo *fifo);
  775 
  776 extern int __kfifo_init(struct __kfifo *fifo, void *buffer,
  777 	unsigned int size, size_t esize);
  778 
  779 extern unsigned int __kfifo_in(struct __kfifo *fifo,
  780 	const void *buf, unsigned int len);
  781 
  782 extern unsigned int __kfifo_out(struct __kfifo *fifo,
  783 	void *buf, unsigned int len);
  784 
  785 extern int __kfifo_from_user(struct __kfifo *fifo,
  786 	const void __user *from, unsigned long len, unsigned int *copied);
  787 
  788 extern int __kfifo_to_user(struct __kfifo *fifo,
  789 	void __user *to, unsigned long len, unsigned int *copied);
  790 
  791 extern unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
  792 	struct scatterlist *sgl, int nents, unsigned int len);
  793 
  794 extern unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
  795 	struct scatterlist *sgl, int nents, unsigned int len);
  796 
  797 extern unsigned int __kfifo_out_peek(struct __kfifo *fifo,
  798 	void *buf, unsigned int len);
  799 
  800 extern unsigned int __kfifo_in_r(struct __kfifo *fifo,
  801 	const void *buf, unsigned int len, size_t recsize);
  802 
  803 extern unsigned int __kfifo_out_r(struct __kfifo *fifo,
  804 	void *buf, unsigned int len, size_t recsize);
  805 
  806 extern int __kfifo_from_user_r(struct __kfifo *fifo,
  807 	const void __user *from, unsigned long len, unsigned int *copied,
  808 	size_t recsize);
  809 
  810 extern int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
  811 	unsigned long len, unsigned int *copied, size_t recsize);
  812 
  813 extern unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
  814 	struct scatterlist *sgl, int nents, unsigned int len, size_t recsize);
  815 
  816 extern void __kfifo_dma_in_finish_r(struct __kfifo *fifo,
  817 	unsigned int len, size_t recsize);
  818 
  819 extern unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
  820 	struct scatterlist *sgl, int nents, unsigned int len, size_t recsize);
  821 
  822 extern void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize);
  823 
  824 extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize);
  825 
  826 extern void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize);
  827 
  828 extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo,
  829 	void *buf, unsigned int len, size_t recsize);
  830 
  831 extern unsigned int __kfifo_max_r(unsigned int len, size_t recsize);
  832 
  833 #endif                 1 /*
    2  * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
    3  *
    4  * (C) SGI 2006, Christoph Lameter
    5  * 	Cleaned up and restructured to ease the addition of alternative
    6  * 	implementations of SLAB allocators.
    7  * (C) Linux Foundation 2008-2013
    8  *      Unified interface for all slab allocators
    9  */
   10 
   11 #ifndef _LINUX_SLAB_H
   12 #define	_LINUX_SLAB_H
   13 
   14 #include <linux/gfp.h>
   15 #include <linux/types.h>
   16 #include <linux/workqueue.h>
   17 
   18 
   19 /*
   20  * Flags to pass to kmem_cache_create().
   21  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
   22  */
   23 #define SLAB_DEBUG_FREE		0x00000100UL	/* DEBUG: Perform (expensive) checks on free */
   24 #define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
   25 #define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
   26 #define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
   27 #define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
   28 #define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
   29 #define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
   30 /*
   31  * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
   32  *
   33  * This delays freeing the SLAB page by a grace period, it does _NOT_
   34  * delay object freeing. This means that if you do kmem_cache_free()
   35  * that memory location is free to be reused at any time. Thus it may
   36  * be possible to see another object there in the same RCU grace period.
   37  *
   38  * This feature only ensures the memory location backing the object
   39  * stays valid, the trick to using this is relying on an independent
   40  * object validation pass. Something like:
   41  *
   42  *  rcu_read_lock()
   43  * again:
   44  *  obj = lockless_lookup(key);
   45  *  if (obj) {
   46  *    if (!try_get_ref(obj)) // might fail for free objects
   47  *      goto again;
   48  *
   49  *    if (obj->key != key) { // not the object we expected
   50  *      put_ref(obj);
   51  *      goto again;
   52  *    }
   53  *  }
   54  *  rcu_read_unlock();
   55  *
   56  * This is useful if we need to approach a kernel structure obliquely,
   57  * from its address obtained without the usual locking. We can lock
   58  * the structure to stabilize it and check it's still at the given address,
   59  * only if we can be sure that the memory has not been meanwhile reused
   60  * for some other kind of object (which our subsystem's lock might corrupt).
   61  *
   62  * rcu_read_lock before reading the address, then rcu_read_unlock after
   63  * taking the spinlock within the structure expected at that address.
   64  */
   65 #define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
   66 #define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
   67 #define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
   68 
   69 /* Flag to prevent checks on free */
   70 #ifdef CONFIG_DEBUG_OBJECTS
   71 # define SLAB_DEBUG_OBJECTS	0x00400000UL
   72 #else
   73 # define SLAB_DEBUG_OBJECTS	0x00000000UL
   74 #endif
   75 
   76 #define SLAB_NOLEAKTRACE	0x00800000UL	/* Avoid kmemleak tracing */
   77 
   78 /* Don't track use of uninitialized memory */
   79 #ifdef CONFIG_KMEMCHECK
   80 # define SLAB_NOTRACK		0x01000000UL
   81 #else
   82 # define SLAB_NOTRACK		0x00000000UL
   83 #endif
   84 #ifdef CONFIG_FAILSLAB
   85 # define SLAB_FAILSLAB		0x02000000UL	/* Fault injection mark */
   86 #else
   87 # define SLAB_FAILSLAB		0x00000000UL
   88 #endif
   89 
   90 /* The following flags affect the page allocator grouping pages by mobility */
   91 #define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
   92 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
   93 /*
   94  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
   95  *
   96  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
   97  *
   98  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
   99  * Both make kfree a no-op.
  100  */
  101 #define ZERO_SIZE_PTR ((void *)16)
  102 
  103 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
  104 				(unsigned long)ZERO_SIZE_PTR)
  105 
  106 #include <linux/kmemleak.h>
  107 #include <linux/kasan.h>
  108 
  109 struct mem_cgroup;
  110 /*
  111  * struct kmem_cache related prototypes
  112  */
  113 void __init kmem_cache_init(void);
  114 int slab_is_available(void);
  115 
  116 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
  117 			unsigned long,
  118 			void (*)(void *));
  119 void kmem_cache_destroy(struct kmem_cache *);
  120 int kmem_cache_shrink(struct kmem_cache *);
  121 
  122 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
  123 void memcg_deactivate_kmem_caches(struct mem_cgroup *);
  124 void memcg_destroy_kmem_caches(struct mem_cgroup *);
  125 
  126 /*
  127  * Please use this macro to create slab caches. Simply specify the
  128  * name of the structure and maybe some flags that are listed above.
  129  *
  130  * The alignment of the struct determines object alignment. If you
  131  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
  132  * then the objects will be properly aligned in SMP configurations.
  133  */
  134 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
  135 		sizeof(struct __struct), __alignof__(struct __struct),\
  136 		(__flags), NULL)
  137 
  138 /*
  139  * Common kmalloc functions provided by all allocators
  140  */
  141 void * __must_check __krealloc(const void *, size_t, gfp_t);
  142 void * __must_check krealloc(const void *, size_t, gfp_t);
  143 void kfree(const void *);
  144 void kzfree(const void *);
  145 size_t ksize(const void *);
  146 
  147 /*
  148  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
  149  * alignment larger than the alignment of a 64-bit integer.
  150  * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
  151  */
  152 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
  153 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
  154 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
  155 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
  156 #else
  157 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
  158 #endif
  159 
  160 /*
  161  * Kmalloc array related definitions
  162  */
  163 
  164 #ifdef CONFIG_SLAB
  165 /*
  166  * The largest kmalloc size supported by the SLAB allocators is
  167  * 32 megabyte (2^25) or the maximum allocatable page order if that is
  168  * less than 32 MB.
  169  *
  170  * WARNING: Its not easy to increase this value since the allocators have
  171  * to do various tricks to work around compiler limitations in order to
  172  * ensure proper constant folding.
  173  */
  174 #define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
  175 				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
  176 #define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH
  177 #ifndef KMALLOC_SHIFT_LOW
  178 #define KMALLOC_SHIFT_LOW	5
  179 #endif
  180 #endif
  181 
  182 #ifdef CONFIG_SLUB
  183 /*
  184  * SLUB directly allocates requests fitting in to an order-1 page
  185  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
  186  */
  187 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
  188 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT)
  189 #ifndef KMALLOC_SHIFT_LOW
  190 #define KMALLOC_SHIFT_LOW	3
  191 #endif
  192 #endif
  193 
  194 #ifdef CONFIG_SLOB
  195 /*
  196  * SLOB passes all requests larger than one page to the page allocator.
  197  * No kmalloc array is necessary since objects of different sizes can
  198  * be allocated from the same page.
  199  */
  200 #define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
  201 #define KMALLOC_SHIFT_MAX	30
  202 #ifndef KMALLOC_SHIFT_LOW
  203 #define KMALLOC_SHIFT_LOW	3
  204 #endif
  205 #endif
  206 
  207 /* Maximum allocatable size */
  208 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
  209 /* Maximum size for which we actually use a slab cache */
  210 #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
  211 /* Maximum order allocatable via the slab allocagtor */
  212 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
  213 
  214 /*
  215  * Kmalloc subsystem.
  216  */
  217 #ifndef KMALLOC_MIN_SIZE
  218 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
  219 #endif
  220 
  221 /*
  222  * This restriction comes from byte sized index implementation.
  223  * Page size is normally 2^12 bytes and, in this case, if we want to use
  224  * byte sized index which can represent 2^8 entries, the size of the object
  225  * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
  226  * If minimum size of kmalloc is less than 16, we use it as minimum object
  227  * size and give up to use byte sized index.
  228  */
  229 #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
  230                                (KMALLOC_MIN_SIZE) : 16)
  231 
  232 #ifndef CONFIG_SLOB
  233 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
  234 #ifdef CONFIG_ZONE_DMA
  235 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
  236 #endif
  237 
  238 /*
  239  * Figure out which kmalloc slab an allocation of a certain size
  240  * belongs to.
  241  * 0 = zero alloc
  242  * 1 =  65 .. 96 bytes
  243  * 2 = 129 .. 192 bytes
  244  * n = 2^(n-1)+1 .. 2^n
  245  */
  246 static __always_inline int kmalloc_index(size_t size)
  247 {
  248 	if (!size)
  249 		return 0;
  250 
  251 	if (size <= KMALLOC_MIN_SIZE)
  252 		return KMALLOC_SHIFT_LOW;
  253 
  254 	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
  255 		return 1;
  256 	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
  257 		return 2;
  258 	if (size <=          8) return 3;
  259 	if (size <=         16) return 4;
  260 	if (size <=         32) return 5;
  261 	if (size <=         64) return 6;
  262 	if (size <=        128) return 7;
  263 	if (size <=        256) return 8;
  264 	if (size <=        512) return 9;
  265 	if (size <=       1024) return 10;
  266 	if (size <=   2 * 1024) return 11;
  267 	if (size <=   4 * 1024) return 12;
  268 	if (size <=   8 * 1024) return 13;
  269 	if (size <=  16 * 1024) return 14;
  270 	if (size <=  32 * 1024) return 15;
  271 	if (size <=  64 * 1024) return 16;
  272 	if (size <= 128 * 1024) return 17;
  273 	if (size <= 256 * 1024) return 18;
  274 	if (size <= 512 * 1024) return 19;
  275 	if (size <= 1024 * 1024) return 20;
  276 	if (size <=  2 * 1024 * 1024) return 21;
  277 	if (size <=  4 * 1024 * 1024) return 22;
  278 	if (size <=  8 * 1024 * 1024) return 23;
  279 	if (size <=  16 * 1024 * 1024) return 24;
  280 	if (size <=  32 * 1024 * 1024) return 25;
  281 	if (size <=  64 * 1024 * 1024) return 26;
  282 	BUG();
  283 
  284 	/* Will never be reached. Needed because the compiler may complain */
  285 	return -1;
  286 }
  287 #endif /* !CONFIG_SLOB */
  288 
  289 void *__kmalloc(size_t size, gfp_t flags);
  290 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
  291 void kmem_cache_free(struct kmem_cache *, void *);
  292 
  293 /*
  294  * Bulk allocation and freeing operations. These are accellerated in an
  295  * allocator specific way to avoid taking locks repeatedly or building
  296  * metadata structures unnecessarily.
  297  *
  298  * Note that interrupts must be enabled when calling these functions.
  299  */
  300 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
  301 bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
  302 
  303 #ifdef CONFIG_NUMA
  304 void *__kmalloc_node(size_t size, gfp_t flags, int node);
  305 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
  306 #else
  307 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
  308 {
  309 	return __kmalloc(size, flags);
  310 }
  311 
  312 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
  313 {
  314 	return kmem_cache_alloc(s, flags);
  315 }
  316 #endif
  317 
  318 #ifdef CONFIG_TRACING
  319 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
  320 
  321 #ifdef CONFIG_NUMA
  322 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
  323 					   gfp_t gfpflags,
  324 					   int node, size_t size);
  325 #else
  326 static __always_inline void *
  327 kmem_cache_alloc_node_trace(struct kmem_cache *s,
  328 			      gfp_t gfpflags,
  329 			      int node, size_t size)
  330 {
  331 	return kmem_cache_alloc_trace(s, gfpflags, size);
  332 }
  333 #endif /* CONFIG_NUMA */
  334 
  335 #else /* CONFIG_TRACING */
  336 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
  337 		gfp_t flags, size_t size)
  338 {
  339 	void *ret = kmem_cache_alloc(s, flags);
  340 
  341 	kasan_kmalloc(s, ret, size);
  342 	return ret;
  343 }
  344 
  345 static __always_inline void *
  346 kmem_cache_alloc_node_trace(struct kmem_cache *s,
  347 			      gfp_t gfpflags,
  348 			      int node, size_t size)
  349 {
  350 	void *ret = kmem_cache_alloc_node(s, gfpflags, node);
  351 
  352 	kasan_kmalloc(s, ret, size);
  353 	return ret;
  354 }
  355 #endif /* CONFIG_TRACING */
  356 
  357 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
  358 
  359 #ifdef CONFIG_TRACING
  360 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
  361 #else
  362 static __always_inline void *
  363 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
  364 {
  365 	return kmalloc_order(size, flags, order);
  366 }
  367 #endif
  368 
  369 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
  370 {
  371 	unsigned int order = get_order(size);
  372 	return kmalloc_order_trace(size, flags, order);
  373 }
  374 
  375 /**
  376  * kmalloc - allocate memory
  377  * @size: how many bytes of memory are required.
  378  * @flags: the type of memory to allocate.
  379  *
  380  * kmalloc is the normal method of allocating memory
  381  * for objects smaller than page size in the kernel.
  382  *
  383  * The @flags argument may be one of:
  384  *
  385  * %GFP_USER - Allocate memory on behalf of user.  May sleep.
  386  *
  387  * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
  388  *
  389  * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools.
  390  *   For example, use this inside interrupt handlers.
  391  *
  392  * %GFP_HIGHUSER - Allocate pages from high memory.
  393  *
  394  * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
  395  *
  396  * %GFP_NOFS - Do not make any fs calls while trying to get memory.
  397  *
  398  * %GFP_NOWAIT - Allocation will not sleep.
  399  *
  400  * %__GFP_THISNODE - Allocate node-local memory only.
  401  *
  402  * %GFP_DMA - Allocation suitable for DMA.
  403  *   Should only be used for kmalloc() caches. Otherwise, use a
  404  *   slab created with SLAB_DMA.
  405  *
  406  * Also it is possible to set different flags by OR'ing
  407  * in one or more of the following additional @flags:
  408  *
  409  * %__GFP_COLD - Request cache-cold pages instead of
  410  *   trying to return cache-warm pages.
  411  *
  412  * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
  413  *
  414  * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
  415  *   (think twice before using).
  416  *
  417  * %__GFP_NORETRY - If memory is not immediately available,
  418  *   then give up at once.
  419  *
  420  * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
  421  *
  422  * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
  423  *
  424  * There are other flags available as well, but these are not intended
  425  * for general use, and so are not documented here. For a full list of
  426  * potential flags, always refer to linux/gfp.h.
  427  */
  428 static __always_inline void *kmalloc(size_t size, gfp_t flags)
  429 {
  430 	if (__builtin_constant_p(size)) {
  431 		if (size > KMALLOC_MAX_CACHE_SIZE)
  432 			return kmalloc_large(size, flags);
  433 #ifndef CONFIG_SLOB
  434 		if (!(flags & GFP_DMA)) {
  435 			int index = kmalloc_index(size);
  436 
  437 			if (!index)
  438 				return ZERO_SIZE_PTR;
  439 
  440 			return kmem_cache_alloc_trace(kmalloc_caches[index],
  441 					flags, size);
  442 		}
  443 #endif
  444 	}
  445 	return __kmalloc(size, flags);
  446 }
  447 
  448 /*
  449  * Determine size used for the nth kmalloc cache.
  450  * return size or 0 if a kmalloc cache for that
  451  * size does not exist
  452  */
  453 static __always_inline int kmalloc_size(int n)
  454 {
  455 #ifndef CONFIG_SLOB
  456 	if (n > 2)
  457 		return 1 << n;
  458 
  459 	if (n == 1 && KMALLOC_MIN_SIZE <= 32)
  460 		return 96;
  461 
  462 	if (n == 2 && KMALLOC_MIN_SIZE <= 64)
  463 		return 192;
  464 #endif
  465 	return 0;
  466 }
  467 
  468 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  469 {
  470 #ifndef CONFIG_SLOB
  471 	if (__builtin_constant_p(size) &&
  472 		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
  473 		int i = kmalloc_index(size);
  474 
  475 		if (!i)
  476 			return ZERO_SIZE_PTR;
  477 
  478 		return kmem_cache_alloc_node_trace(kmalloc_caches[i],
  479 						flags, node, size);
  480 	}
  481 #endif
  482 	return __kmalloc_node(size, flags, node);
  483 }
  484 
  485 /*
  486  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
  487  * Intended for arches that get misalignment faults even for 64 bit integer
  488  * aligned buffers.
  489  */
  490 #ifndef ARCH_SLAB_MINALIGN
  491 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
  492 #endif
  493 
  494 struct memcg_cache_array {
  495 	struct rcu_head rcu;
  496 	struct kmem_cache *entries[0];
  497 };
  498 
  499 /*
  500  * This is the main placeholder for memcg-related information in kmem caches.
  501  * Both the root cache and the child caches will have it. For the root cache,
  502  * this will hold a dynamically allocated array large enough to hold
  503  * information about the currently limited memcgs in the system. To allow the
  504  * array to be accessed without taking any locks, on relocation we free the old
  505  * version only after a grace period.
  506  *
  507  * Child caches will hold extra metadata needed for its operation. Fields are:
  508  *
  509  * @memcg: pointer to the memcg this cache belongs to
  510  * @root_cache: pointer to the global, root cache, this cache was derived from
  511  *
  512  * Both root and child caches of the same kind are linked into a list chained
  513  * through @list.
  514  */
  515 struct memcg_cache_params {
  516 	bool is_root_cache;
  517 	struct list_head list;
  518 	union {
  519 		struct memcg_cache_array __rcu *memcg_caches;
  520 		struct {
  521 			struct mem_cgroup *memcg;
  522 			struct kmem_cache *root_cache;
  523 		};
  524 	};
  525 };
  526 
  527 int memcg_update_all_caches(int num_memcgs);
  528 
  529 /**
  530  * kmalloc_array - allocate memory for an array.
  531  * @n: number of elements.
  532  * @size: element size.
  533  * @flags: the type of memory to allocate (see kmalloc).
  534  */
  535 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
  536 {
  537 	if (size != 0 && n > SIZE_MAX / size)
  538 		return NULL;
  539 	return __kmalloc(n * size, flags);
  540 }
  541 
  542 /**
  543  * kcalloc - allocate memory for an array. The memory is set to zero.
  544  * @n: number of elements.
  545  * @size: element size.
  546  * @flags: the type of memory to allocate (see kmalloc).
  547  */
  548 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
  549 {
  550 	return kmalloc_array(n, size, flags | __GFP_ZERO);
  551 }
  552 
  553 /*
  554  * kmalloc_track_caller is a special version of kmalloc that records the
  555  * calling function of the routine calling it for slab leak tracking instead
  556  * of just the calling function (confusing, eh?).
  557  * It's useful when the call to kmalloc comes from a widely-used standard
  558  * allocator where we care about the real place the memory allocation
  559  * request comes from.
  560  */
  561 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
  562 #define kmalloc_track_caller(size, flags) \
  563 	__kmalloc_track_caller(size, flags, _RET_IP_)
  564 
  565 #ifdef CONFIG_NUMA
  566 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
  567 #define kmalloc_node_track_caller(size, flags, node) \
  568 	__kmalloc_node_track_caller(size, flags, node, \
  569 			_RET_IP_)
  570 
  571 #else /* CONFIG_NUMA */
  572 
  573 #define kmalloc_node_track_caller(size, flags, node) \
  574 	kmalloc_track_caller(size, flags)
  575 
  576 #endif /* CONFIG_NUMA */
  577 
  578 /*
  579  * Shortcuts
  580  */
  581 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
  582 {
  583 	return kmem_cache_alloc(k, flags | __GFP_ZERO);
  584 }
  585 
  586 /**
  587  * kzalloc - allocate memory. The memory is set to zero.
  588  * @size: how many bytes of memory are required.
  589  * @flags: the type of memory to allocate (see kmalloc).
  590  */
  591 static inline void *kzalloc(size_t size, gfp_t flags)
  592 {
  593 	return kmalloc(size, flags | __GFP_ZERO);
  594 }
  595 
  596 /**
  597  * kzalloc_node - allocate zeroed memory from a particular memory node.
  598  * @size: how many bytes of memory are required.
  599  * @flags: the type of memory to allocate (see kmalloc).
  600  * @node: memory node from which to allocate
  601  */
  602 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
  603 {
  604 	return kmalloc_node(size, flags | __GFP_ZERO, node);
  605 }
  606 
  607 unsigned int kmem_cache_size(struct kmem_cache *s);
  608 void __init kmem_cache_init_late(void);
  609 
  610 #endif	/* _LINUX_SLAB_H */                 1 #ifndef __LINUX_SPINLOCK_H
    2 #define __LINUX_SPINLOCK_H
    3 
    4 /*
    5  * include/linux/spinlock.h - generic spinlock/rwlock declarations
    6  *
    7  * here's the role of the various spinlock/rwlock related include files:
    8  *
    9  * on SMP builds:
   10  *
   11  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
   12  *                        initializers
   13  *
   14  *  linux/spinlock_types.h:
   15  *                        defines the generic type and initializers
   16  *
   17  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
   18  *                        implementations, mostly inline assembly code
   19  *
   20  *   (also included on UP-debug builds:)
   21  *
   22  *  linux/spinlock_api_smp.h:
   23  *                        contains the prototypes for the _spin_*() APIs.
   24  *
   25  *  linux/spinlock.h:     builds the final spin_*() APIs.
   26  *
   27  * on UP builds:
   28  *
   29  *  linux/spinlock_type_up.h:
   30  *                        contains the generic, simplified UP spinlock type.
   31  *                        (which is an empty structure on non-debug builds)
   32  *
   33  *  linux/spinlock_types.h:
   34  *                        defines the generic type and initializers
   35  *
   36  *  linux/spinlock_up.h:
   37  *                        contains the arch_spin_*()/etc. version of UP
   38  *                        builds. (which are NOPs on non-debug, non-preempt
   39  *                        builds)
   40  *
   41  *   (included on UP-non-debug builds:)
   42  *
   43  *  linux/spinlock_api_up.h:
   44  *                        builds the _spin_*() APIs.
   45  *
   46  *  linux/spinlock.h:     builds the final spin_*() APIs.
   47  */
   48 
   49 #include <linux/typecheck.h>
   50 #include <linux/preempt.h>
   51 #include <linux/linkage.h>
   52 #include <linux/compiler.h>
   53 #include <linux/irqflags.h>
   54 #include <linux/thread_info.h>
   55 #include <linux/kernel.h>
   56 #include <linux/stringify.h>
   57 #include <linux/bottom_half.h>
   58 #include <asm/barrier.h>
   59 
   60 
   61 /*
   62  * Must define these before including other files, inline functions need them
   63  */
   64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
   65 
   66 #define LOCK_SECTION_START(extra)               \
   67         ".subsection 1\n\t"                     \
   68         extra                                   \
   69         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
   70         LOCK_SECTION_NAME ":\n\t"               \
   71         ".endif\n"
   72 
   73 #define LOCK_SECTION_END                        \
   74         ".previous\n\t"
   75 
   76 #define __lockfunc __attribute__((section(".spinlock.text")))
   77 
   78 /*
   79  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
   80  */
   81 #include <linux/spinlock_types.h>
   82 
   83 /*
   84  * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
   85  */
   86 #ifdef CONFIG_SMP
   87 # include <asm/spinlock.h>
   88 #else
   89 # include <linux/spinlock_up.h>
   90 #endif
   91 
   92 #ifdef CONFIG_DEBUG_SPINLOCK
   93   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
   94 				   struct lock_class_key *key);
   95 # define raw_spin_lock_init(lock)				\
   96 do {								\
   97 	static struct lock_class_key __key;			\
   98 								\
   99 	__raw_spin_lock_init((lock), #lock, &__key);		\
  100 } while (0)
  101 
  102 #else
  103 # define raw_spin_lock_init(lock)				\
  104 	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
  105 #endif
  106 
  107 #define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
  108 
  109 #ifdef CONFIG_GENERIC_LOCKBREAK
  110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
  111 #else
  112 
  113 #ifdef arch_spin_is_contended
  114 #define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
  115 #else
  116 #define raw_spin_is_contended(lock)	(((void)(lock), 0))
  117 #endif /*arch_spin_is_contended*/
  118 #endif
  119 
  120 /*
  121  * Despite its name it doesn't necessarily has to be a full barrier.
  122  * It should only guarantee that a STORE before the critical section
  123  * can not be reordered with LOADs and STOREs inside this section.
  124  * spin_lock() is the one-way barrier, this LOAD can not escape out
  125  * of the region. So the default implementation simply ensures that
  126  * a STORE can not move into the critical section, smp_wmb() should
  127  * serialize it with another STORE done by spin_lock().
  128  */
  129 #ifndef smp_mb__before_spinlock
  130 #define smp_mb__before_spinlock()	smp_wmb()
  131 #endif
  132 
  133 /**
  134  * raw_spin_unlock_wait - wait until the spinlock gets unlocked
  135  * @lock: the spinlock in question.
  136  */
  137 #define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
  138 
  139 #ifdef CONFIG_DEBUG_SPINLOCK
  140  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
  141 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  142  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  143  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
  144 #else
  145 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
  146 {
  147 	__acquire(lock);
  148 	arch_spin_lock(&lock->raw_lock);
  149 }
  150 
  151 static inline void
  152 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
  153 {
  154 	__acquire(lock);
  155 	arch_spin_lock_flags(&lock->raw_lock, *flags);
  156 }
  157 
  158 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
  159 {
  160 	return arch_spin_trylock(&(lock)->raw_lock);
  161 }
  162 
  163 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
  164 {
  165 	arch_spin_unlock(&lock->raw_lock);
  166 	__release(lock);
  167 }
  168 #endif
  169 
  170 /*
  171  * Define the various spin_lock methods.  Note we define these
  172  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
  173  * various methods are defined as nops in the case they are not
  174  * required.
  175  */
  176 #define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
  177 
  178 #define raw_spin_lock(lock)	_raw_spin_lock(lock)
  179 
  180 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  181 # define raw_spin_lock_nested(lock, subclass) \
  182 	_raw_spin_lock_nested(lock, subclass)
  183 # define raw_spin_lock_bh_nested(lock, subclass) \
  184 	_raw_spin_lock_bh_nested(lock, subclass)
  185 
  186 # define raw_spin_lock_nest_lock(lock, nest_lock)			\
  187 	 do {								\
  188 		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
  189 		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
  190 	 } while (0)
  191 #else
  192 /*
  193  * Always evaluate the 'subclass' argument to avoid that the compiler
  194  * warns about set-but-not-used variables when building with
  195  * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
  196  */
  197 # define raw_spin_lock_nested(lock, subclass)		\
  198 	_raw_spin_lock(((void)(subclass), (lock)))
  199 # define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
  200 # define raw_spin_lock_bh_nested(lock, subclass)	_raw_spin_lock_bh(lock)
  201 #endif
  202 
  203 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  204 
  205 #define raw_spin_lock_irqsave(lock, flags)			\
  206 	do {						\
  207 		typecheck(unsigned long, flags);	\
  208 		flags = _raw_spin_lock_irqsave(lock);	\
  209 	} while (0)
  210 
  211 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  212 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  213 	do {								\
  214 		typecheck(unsigned long, flags);			\
  215 		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
  216 	} while (0)
  217 #else
  218 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  219 	do {								\
  220 		typecheck(unsigned long, flags);			\
  221 		flags = _raw_spin_lock_irqsave(lock);			\
  222 	} while (0)
  223 #endif
  224 
  225 #else
  226 
  227 #define raw_spin_lock_irqsave(lock, flags)		\
  228 	do {						\
  229 		typecheck(unsigned long, flags);	\
  230 		_raw_spin_lock_irqsave(lock, flags);	\
  231 	} while (0)
  232 
  233 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
  234 	raw_spin_lock_irqsave(lock, flags)
  235 
  236 #endif
  237 
  238 #define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
  239 #define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
  240 #define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
  241 #define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
  242 
  243 #define raw_spin_unlock_irqrestore(lock, flags)		\
  244 	do {							\
  245 		typecheck(unsigned long, flags);		\
  246 		_raw_spin_unlock_irqrestore(lock, flags);	\
  247 	} while (0)
  248 #define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
  249 
  250 #define raw_spin_trylock_bh(lock) \
  251 	__cond_lock(lock, _raw_spin_trylock_bh(lock))
  252 
  253 #define raw_spin_trylock_irq(lock) \
  254 ({ \
  255 	local_irq_disable(); \
  256 	raw_spin_trylock(lock) ? \
  257 	1 : ({ local_irq_enable(); 0;  }); \
  258 })
  259 
  260 #define raw_spin_trylock_irqsave(lock, flags) \
  261 ({ \
  262 	local_irq_save(flags); \
  263 	raw_spin_trylock(lock) ? \
  264 	1 : ({ local_irq_restore(flags); 0; }); \
  265 })
  266 
  267 /**
  268  * raw_spin_can_lock - would raw_spin_trylock() succeed?
  269  * @lock: the spinlock in question.
  270  */
  271 #define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))
  272 
  273 /* Include rwlock functions */
  274 #include <linux/rwlock.h>
  275 
  276 /*
  277  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  278  */
  279 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  280 # include <linux/spinlock_api_smp.h>
  281 #else
  282 # include <linux/spinlock_api_up.h>
  283 #endif
  284 
  285 /*
  286  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  287  */
  288 
  289 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
  290 {
  291 	return &lock->rlock;
  292 }
  293 
  294 #define spin_lock_init(_lock)				\
  295 do {							\
  296 	spinlock_check(_lock);				\
  297 	raw_spin_lock_init(&(_lock)->rlock);		\
  298 } while (0)
  299 
  300 static __always_inline void spin_lock(spinlock_t *lock)
  301 {
  302 	raw_spin_lock(&lock->rlock);
  303 }
  304 
  305 static __always_inline void spin_lock_bh(spinlock_t *lock)
  306 {
  307 	raw_spin_lock_bh(&lock->rlock);
  308 }
  309 
  310 static __always_inline int spin_trylock(spinlock_t *lock)
  311 {
  312 	return raw_spin_trylock(&lock->rlock);
  313 }
  314 
  315 #define spin_lock_nested(lock, subclass)			\
  316 do {								\
  317 	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
  318 } while (0)
  319 
  320 #define spin_lock_bh_nested(lock, subclass)			\
  321 do {								\
  322 	raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
  323 } while (0)
  324 
  325 #define spin_lock_nest_lock(lock, nest_lock)				\
  326 do {									\
  327 	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
  328 } while (0)
  329 
  330 static __always_inline void spin_lock_irq(spinlock_t *lock)
  331 {
  332 	raw_spin_lock_irq(&lock->rlock);
  333 }
  334 
  335 #define spin_lock_irqsave(lock, flags)				\
  336 do {								\
  337 	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
  338 } while (0)
  339 
  340 #define spin_lock_irqsave_nested(lock, flags, subclass)			\
  341 do {									\
  342 	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
  343 } while (0)
  344 
  345 static __always_inline void spin_unlock(spinlock_t *lock)
  346 {
  347 	raw_spin_unlock(&lock->rlock);
  348 }
  349 
  350 static __always_inline void spin_unlock_bh(spinlock_t *lock)
  351 {
  352 	raw_spin_unlock_bh(&lock->rlock);
  353 }
  354 
  355 static __always_inline void spin_unlock_irq(spinlock_t *lock)
  356 {
  357 	raw_spin_unlock_irq(&lock->rlock);
  358 }
  359 
  360 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  361 {
  362 	raw_spin_unlock_irqrestore(&lock->rlock, flags);
  363 }
  364 
  365 static __always_inline int spin_trylock_bh(spinlock_t *lock)
  366 {
  367 	return raw_spin_trylock_bh(&lock->rlock);
  368 }
  369 
  370 static __always_inline int spin_trylock_irq(spinlock_t *lock)
  371 {
  372 	return raw_spin_trylock_irq(&lock->rlock);
  373 }
  374 
  375 #define spin_trylock_irqsave(lock, flags)			\
  376 ({								\
  377 	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
  378 })
  379 
  380 static __always_inline void spin_unlock_wait(spinlock_t *lock)
  381 {
  382 	raw_spin_unlock_wait(&lock->rlock);
  383 }
  384 
  385 static __always_inline int spin_is_locked(spinlock_t *lock)
  386 {
  387 	return raw_spin_is_locked(&lock->rlock);
  388 }
  389 
  390 static __always_inline int spin_is_contended(spinlock_t *lock)
  391 {
  392 	return raw_spin_is_contended(&lock->rlock);
  393 }
  394 
  395 static __always_inline int spin_can_lock(spinlock_t *lock)
  396 {
  397 	return raw_spin_can_lock(&lock->rlock);
  398 }
  399 
  400 #define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
  401 
  402 /*
  403  * Pull the atomic_t declaration:
  404  * (asm-mips/atomic.h needs above definitions)
  405  */
  406 #include <linux/atomic.h>
  407 /**
  408  * atomic_dec_and_lock - lock on reaching reference count zero
  409  * @atomic: the atomic counter
  410  * @lock: the spinlock in question
  411  *
  412  * Decrements @atomic by 1.  If the result is 0, returns true and locks
  413  * @lock.  Returns false for all other cases.
  414  */
  415 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  416 #define atomic_dec_and_lock(atomic, lock) \
  417 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  418 
  419 #endif /* __LINUX_SPINLOCK_H */                 1 #ifndef __LINUX_USB_H
    2 #define __LINUX_USB_H
    3 
    4 #include <linux/mod_devicetable.h>
    5 #include <linux/usb/ch9.h>
    6 
    7 #define USB_MAJOR			180
    8 #define USB_DEVICE_MAJOR		189
    9 
   10 
   11 #ifdef __KERNEL__
   12 
   13 #include <linux/errno.h>        /* for -ENODEV */
   14 #include <linux/delay.h>	/* for mdelay() */
   15 #include <linux/interrupt.h>	/* for in_interrupt() */
   16 #include <linux/list.h>		/* for struct list_head */
   17 #include <linux/kref.h>		/* for struct kref */
   18 #include <linux/device.h>	/* for struct device */
   19 #include <linux/fs.h>		/* for struct file_operations */
   20 #include <linux/completion.h>	/* for struct completion */
   21 #include <linux/sched.h>	/* for current && schedule_timeout */
   22 #include <linux/mutex.h>	/* for struct mutex */
   23 #include <linux/pm_runtime.h>	/* for runtime PM */
   24 
   25 struct usb_device;
   26 struct usb_driver;
   27 struct wusb_dev;
   28 
   29 /*-------------------------------------------------------------------------*/
   30 
   31 /*
   32  * Host-side wrappers for standard USB descriptors ... these are parsed
   33  * from the data provided by devices.  Parsing turns them from a flat
   34  * sequence of descriptors into a hierarchy:
   35  *
   36  *  - devices have one (usually) or more configs;
   37  *  - configs have one (often) or more interfaces;
   38  *  - interfaces have one (usually) or more settings;
   39  *  - each interface setting has zero or (usually) more endpoints.
   40  *  - a SuperSpeed endpoint has a companion descriptor
   41  *
   42  * And there might be other descriptors mixed in with those.
   43  *
   44  * Devices may also have class-specific or vendor-specific descriptors.
   45  */
   46 
   47 struct ep_device;
   48 
   49 /**
   50  * struct usb_host_endpoint - host-side endpoint descriptor and queue
   51  * @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder
   52  * @ss_ep_comp: SuperSpeed companion descriptor for this endpoint
   53  * @urb_list: urbs queued to this endpoint; maintained by usbcore
   54  * @hcpriv: for use by HCD; typically holds hardware dma queue head (QH)
   55  *	with one or more transfer descriptors (TDs) per urb
   56  * @ep_dev: ep_device for sysfs info
   57  * @extra: descriptors following this endpoint in the configuration
   58  * @extralen: how many bytes of "extra" are valid
   59  * @enabled: URBs may be submitted to this endpoint
   60  * @streams: number of USB-3 streams allocated on the endpoint
   61  *
   62  * USB requests are always queued to a given endpoint, identified by a
   63  * descriptor within an active interface in a given USB configuration.
   64  */
   65 struct usb_host_endpoint {
   66 	struct usb_endpoint_descriptor		desc;
   67 	struct usb_ss_ep_comp_descriptor	ss_ep_comp;
   68 	struct list_head		urb_list;
   69 	void				*hcpriv;
   70 	struct ep_device		*ep_dev;	/* For sysfs info */
   71 
   72 	unsigned char *extra;   /* Extra descriptors */
   73 	int extralen;
   74 	int enabled;
   75 	int streams;
   76 };
   77 
   78 /* host-side wrapper for one interface setting's parsed descriptors */
   79 struct usb_host_interface {
   80 	struct usb_interface_descriptor	desc;
   81 
   82 	int extralen;
   83 	unsigned char *extra;   /* Extra descriptors */
   84 
   85 	/* array of desc.bNumEndpoints endpoints associated with this
   86 	 * interface setting.  these will be in no particular order.
   87 	 */
   88 	struct usb_host_endpoint *endpoint;
   89 
   90 	char *string;		/* iInterface string, if present */
   91 };
   92 
   93 enum usb_interface_condition {
   94 	USB_INTERFACE_UNBOUND = 0,
   95 	USB_INTERFACE_BINDING,
   96 	USB_INTERFACE_BOUND,
   97 	USB_INTERFACE_UNBINDING,
   98 };
   99 
  100 /**
  101  * struct usb_interface - what usb device drivers talk to
  102  * @altsetting: array of interface structures, one for each alternate
  103  *	setting that may be selected.  Each one includes a set of
  104  *	endpoint configurations.  They will be in no particular order.
  105  * @cur_altsetting: the current altsetting.
  106  * @num_altsetting: number of altsettings defined.
  107  * @intf_assoc: interface association descriptor
  108  * @minor: the minor number assigned to this interface, if this
  109  *	interface is bound to a driver that uses the USB major number.
  110  *	If this interface does not use the USB major, this field should
  111  *	be unused.  The driver should set this value in the probe()
  112  *	function of the driver, after it has been assigned a minor
  113  *	number from the USB core by calling usb_register_dev().
  114  * @condition: binding state of the interface: not bound, binding
  115  *	(in probe()), bound to a driver, or unbinding (in disconnect())
  116  * @sysfs_files_created: sysfs attributes exist
  117  * @ep_devs_created: endpoint child pseudo-devices exist
  118  * @unregistering: flag set when the interface is being unregistered
  119  * @needs_remote_wakeup: flag set when the driver requires remote-wakeup
  120  *	capability during autosuspend.
  121  * @needs_altsetting0: flag set when a set-interface request for altsetting 0
  122  *	has been deferred.
  123  * @needs_binding: flag set when the driver should be re-probed or unbound
  124  *	following a reset or suspend operation it doesn't support.
  125  * @dev: driver model's view of this device
  126  * @usb_dev: if an interface is bound to the USB major, this will point
  127  *	to the sysfs representation for that device.
  128  * @pm_usage_cnt: PM usage counter for this interface
  129  * @reset_ws: Used for scheduling resets from atomic context.
  130  * @resetting_device: USB core reset the device, so use alt setting 0 as
  131  *	current; needs bandwidth alloc after reset.
  132  *
  133  * USB device drivers attach to interfaces on a physical device.  Each
  134  * interface encapsulates a single high level function, such as feeding
  135  * an audio stream to a speaker or reporting a change in a volume control.
  136  * Many USB devices only have one interface.  The protocol used to talk to
  137  * an interface's endpoints can be defined in a usb "class" specification,
  138  * or by a product's vendor.  The (default) control endpoint is part of
  139  * every interface, but is never listed among the interface's descriptors.
  140  *
  141  * The driver that is bound to the interface can use standard driver model
  142  * calls such as dev_get_drvdata() on the dev member of this structure.
  143  *
  144  * Each interface may have alternate settings.  The initial configuration
  145  * of a device sets altsetting 0, but the device driver can change
  146  * that setting using usb_set_interface().  Alternate settings are often
  147  * used to control the use of periodic endpoints, such as by having
  148  * different endpoints use different amounts of reserved USB bandwidth.
  149  * All standards-conformant USB devices that use isochronous endpoints
  150  * will use them in non-default settings.
  151  *
  152  * The USB specification says that alternate setting numbers must run from
  153  * 0 to one less than the total number of alternate settings.  But some
  154  * devices manage to mess this up, and the structures aren't necessarily
  155  * stored in numerical order anyhow.  Use usb_altnum_to_altsetting() to
  156  * look up an alternate setting in the altsetting array based on its number.
  157  */
  158 struct usb_interface {
  159 	/* array of alternate settings for this interface,
  160 	 * stored in no particular order */
  161 	struct usb_host_interface *altsetting;
  162 
  163 	struct usb_host_interface *cur_altsetting;	/* the currently
  164 					 * active alternate setting */
  165 	unsigned num_altsetting;	/* number of alternate settings */
  166 
  167 	/* If there is an interface association descriptor then it will list
  168 	 * the associated interfaces */
  169 	struct usb_interface_assoc_descriptor *intf_assoc;
  170 
  171 	int minor;			/* minor number this interface is
  172 					 * bound to */
  173 	enum usb_interface_condition condition;		/* state of binding */
  174 	unsigned sysfs_files_created:1;	/* the sysfs attributes exist */
  175 	unsigned ep_devs_created:1;	/* endpoint "devices" exist */
  176 	unsigned unregistering:1;	/* unregistration is in progress */
  177 	unsigned needs_remote_wakeup:1;	/* driver requires remote wakeup */
  178 	unsigned needs_altsetting0:1;	/* switch to altsetting 0 is pending */
  179 	unsigned needs_binding:1;	/* needs delayed unbind/rebind */
  180 	unsigned resetting_device:1;	/* true: bandwidth alloc after reset */
  181 
  182 	struct device dev;		/* interface specific device info */
  183 	struct device *usb_dev;
  184 	atomic_t pm_usage_cnt;		/* usage counter for autosuspend */
  185 	struct work_struct reset_ws;	/* for resets in atomic context */
  186 };
  187 #define	to_usb_interface(d) container_of(d, struct usb_interface, dev)
  188 
  189 static inline void *usb_get_intfdata(struct usb_interface *intf)
  190 {
  191 	return dev_get_drvdata(&intf->dev);
  192 }
  193 
  194 static inline void usb_set_intfdata(struct usb_interface *intf, void *data)
  195 {
  196 	dev_set_drvdata(&intf->dev, data);
  197 }
  198 
  199 struct usb_interface *usb_get_intf(struct usb_interface *intf);
  200 void usb_put_intf(struct usb_interface *intf);
  201 
  202 /* Hard limit */
  203 #define USB_MAXENDPOINTS	30
  204 /* this maximum is arbitrary */
  205 #define USB_MAXINTERFACES	32
  206 #define USB_MAXIADS		(USB_MAXINTERFACES/2)
  207 
  208 /*
  209  * USB Resume Timer: Every Host controller driver should drive the resume
  210  * signalling on the bus for the amount of time defined by this macro.
  211  *
  212  * That way we will have a 'stable' behavior among all HCDs supported by Linux.
  213  *
  214  * Note that the USB Specification states we should drive resume for *at least*
  215  * 20 ms, but it doesn't give an upper bound. This creates two possible
  216  * situations which we want to avoid:
  217  *
  218  * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes
  219  * us to fail USB Electrical Tests, thus failing Certification
  220  *
  221  * (b) Some (many) devices actually need more than 20 ms of resume signalling,
  222  * and while we can argue that's against the USB Specification, we don't have
  223  * control over which devices a certification laboratory will be using for
  224  * certification. If CertLab uses a device which was tested against Windows and
  225  * that happens to have relaxed resume signalling rules, we might fall into
  226  * situations where we fail interoperability and electrical tests.
  227  *
  228  * In order to avoid both conditions, we're using a 40 ms resume timeout, which
  229  * should cope with both LPJ calibration errors and devices not following every
  230  * detail of the USB Specification.
  231  */
  232 #define USB_RESUME_TIMEOUT	40 /* ms */
  233 
  234 /**
  235  * struct usb_interface_cache - long-term representation of a device interface
  236  * @num_altsetting: number of altsettings defined.
  237  * @ref: reference counter.
  238  * @altsetting: variable-length array of interface structures, one for
  239  *	each alternate setting that may be selected.  Each one includes a
  240  *	set of endpoint configurations.  They will be in no particular order.
  241  *
  242  * These structures persist for the lifetime of a usb_device, unlike
  243  * struct usb_interface (which persists only as long as its configuration
  244  * is installed).  The altsetting arrays can be accessed through these
  245  * structures at any time, permitting comparison of configurations and
  246  * providing support for the /proc/bus/usb/devices pseudo-file.
  247  */
  248 struct usb_interface_cache {
  249 	unsigned num_altsetting;	/* number of alternate settings */
  250 	struct kref ref;		/* reference counter */
  251 
  252 	/* variable-length array of alternate settings for this interface,
  253 	 * stored in no particular order */
  254 	struct usb_host_interface altsetting[0];
  255 };
  256 #define	ref_to_usb_interface_cache(r) \
  257 		container_of(r, struct usb_interface_cache, ref)
  258 #define	altsetting_to_usb_interface_cache(a) \
  259 		container_of(a, struct usb_interface_cache, altsetting[0])
  260 
  261 /**
  262  * struct usb_host_config - representation of a device's configuration
  263  * @desc: the device's configuration descriptor.
  264  * @string: pointer to the cached version of the iConfiguration string, if
  265  *	present for this configuration.
  266  * @intf_assoc: list of any interface association descriptors in this config
  267  * @interface: array of pointers to usb_interface structures, one for each
  268  *	interface in the configuration.  The number of interfaces is stored
  269  *	in desc.bNumInterfaces.  These pointers are valid only while the
  270  *	the configuration is active.
  271  * @intf_cache: array of pointers to usb_interface_cache structures, one
  272  *	for each interface in the configuration.  These structures exist
  273  *	for the entire life of the device.
  274  * @extra: pointer to buffer containing all extra descriptors associated
  275  *	with this configuration (those preceding the first interface
  276  *	descriptor).
  277  * @extralen: length of the extra descriptors buffer.
  278  *
  279  * USB devices may have multiple configurations, but only one can be active
  280  * at any time.  Each encapsulates a different operational environment;
  281  * for example, a dual-speed device would have separate configurations for
  282  * full-speed and high-speed operation.  The number of configurations
  283  * available is stored in the device descriptor as bNumConfigurations.
  284  *
  285  * A configuration can contain multiple interfaces.  Each corresponds to
  286  * a different function of the USB device, and all are available whenever
  287  * the configuration is active.  The USB standard says that interfaces
  288  * are supposed to be numbered from 0 to desc.bNumInterfaces-1, but a lot
  289  * of devices get this wrong.  In addition, the interface array is not
  290  * guaranteed to be sorted in numerical order.  Use usb_ifnum_to_if() to
  291  * look up an interface entry based on its number.
  292  *
  293  * Device drivers should not attempt to activate configurations.  The choice
  294  * of which configuration to install is a policy decision based on such
  295  * considerations as available power, functionality provided, and the user's
  296  * desires (expressed through userspace tools).  However, drivers can call
  297  * usb_reset_configuration() to reinitialize the current configuration and
  298  * all its interfaces.
  299  */
  300 struct usb_host_config {
  301 	struct usb_config_descriptor	desc;
  302 
  303 	char *string;		/* iConfiguration string, if present */
  304 
  305 	/* List of any Interface Association Descriptors in this
  306 	 * configuration. */
  307 	struct usb_interface_assoc_descriptor *intf_assoc[USB_MAXIADS];
  308 
  309 	/* the interfaces associated with this configuration,
  310 	 * stored in no particular order */
  311 	struct usb_interface *interface[USB_MAXINTERFACES];
  312 
  313 	/* Interface information available even when this is not the
  314 	 * active configuration */
  315 	struct usb_interface_cache *intf_cache[USB_MAXINTERFACES];
  316 
  317 	unsigned char *extra;   /* Extra descriptors */
  318 	int extralen;
  319 };
  320 
  321 /* USB2.0 and USB3.0 device BOS descriptor set */
  322 struct usb_host_bos {
  323 	struct usb_bos_descriptor	*desc;
  324 
  325 	/* wireless cap descriptor is handled by wusb */
  326 	struct usb_ext_cap_descriptor	*ext_cap;
  327 	struct usb_ss_cap_descriptor	*ss_cap;
  328 	struct usb_ss_container_id_descriptor	*ss_id;
  329 };
  330 
  331 int __usb_get_extra_descriptor(char *buffer, unsigned size,
  332 	unsigned char type, void **ptr);
  333 #define usb_get_extra_descriptor(ifpoint, type, ptr) \
  334 				__usb_get_extra_descriptor((ifpoint)->extra, \
  335 				(ifpoint)->extralen, \
  336 				type, (void **)ptr)
  337 
  338 /* ----------------------------------------------------------------------- */
  339 
  340 /* USB device number allocation bitmap */
  341 struct usb_devmap {
  342 	unsigned long devicemap[128 / (8*sizeof(unsigned long))];
  343 };
  344 
  345 /*
  346  * Allocated per bus (tree of devices) we have:
  347  */
  348 struct usb_bus {
  349 	struct device *controller;	/* host/master side hardware */
  350 	int busnum;			/* Bus number (in order of reg) */
  351 	const char *bus_name;		/* stable id (PCI slot_name etc) */
  352 	u8 uses_dma;			/* Does the host controller use DMA? */
  353 	u8 uses_pio_for_control;	/*
  354 					 * Does the host controller use PIO
  355 					 * for control transfers?
  356 					 */
  357 	u8 otg_port;			/* 0, or number of OTG/HNP port */
  358 	unsigned is_b_host:1;		/* true during some HNP roleswitches */
  359 	unsigned b_hnp_enable:1;	/* OTG: did A-Host enable HNP? */
  360 	unsigned no_stop_on_short:1;    /*
  361 					 * Quirk: some controllers don't stop
  362 					 * the ep queue on a short transfer
  363 					 * with the URB_SHORT_NOT_OK flag set.
  364 					 */
  365 	unsigned no_sg_constraint:1;	/* no sg constraint */
  366 	unsigned sg_tablesize;		/* 0 or largest number of sg list entries */
  367 
  368 	int devnum_next;		/* Next open device number in
  369 					 * round-robin allocation */
  370 
  371 	struct usb_devmap devmap;	/* device address allocation map */
  372 	struct usb_device *root_hub;	/* Root hub */
  373 	struct usb_bus *hs_companion;	/* Companion EHCI bus, if any */
  374 	struct list_head bus_list;	/* list of busses */
  375 
  376 	struct mutex usb_address0_mutex; /* unaddressed device mutex */
  377 
  378 	int bandwidth_allocated;	/* on this bus: how much of the time
  379 					 * reserved for periodic (intr/iso)
  380 					 * requests is used, on average?
  381 					 * Units: microseconds/frame.
  382 					 * Limits: Full/low speed reserve 90%,
  383 					 * while high speed reserves 80%.
  384 					 */
  385 	int bandwidth_int_reqs;		/* number of Interrupt requests */
  386 	int bandwidth_isoc_reqs;	/* number of Isoc. requests */
  387 
  388 	unsigned resuming_ports;	/* bit array: resuming root-hub ports */
  389 
  390 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
  391 	struct mon_bus *mon_bus;	/* non-null when associated */
  392 	int monitored;			/* non-zero when monitored */
  393 #endif
  394 };
  395 
  396 struct usb_dev_state;
  397 
  398 /* ----------------------------------------------------------------------- */
  399 
  400 struct usb_tt;
  401 
  402 enum usb_device_removable {
  403 	USB_DEVICE_REMOVABLE_UNKNOWN = 0,
  404 	USB_DEVICE_REMOVABLE,
  405 	USB_DEVICE_FIXED,
  406 };
  407 
  408 enum usb_port_connect_type {
  409 	USB_PORT_CONNECT_TYPE_UNKNOWN = 0,
  410 	USB_PORT_CONNECT_TYPE_HOT_PLUG,
  411 	USB_PORT_CONNECT_TYPE_HARD_WIRED,
  412 	USB_PORT_NOT_USED,
  413 };
  414 
  415 /*
  416  * USB 2.0 Link Power Management (LPM) parameters.
  417  */
  418 struct usb2_lpm_parameters {
  419 	/* Best effort service latency indicate how long the host will drive
  420 	 * resume on an exit from L1.
  421 	 */
  422 	unsigned int besl;
  423 
  424 	/* Timeout value in microseconds for the L1 inactivity (LPM) timer.
  425 	 * When the timer counts to zero, the parent hub will initiate a LPM
  426 	 * transition to L1.
  427 	 */
  428 	int timeout;
  429 };
  430 
  431 /*
  432  * USB 3.0 Link Power Management (LPM) parameters.
  433  *
  434  * PEL and SEL are USB 3.0 Link PM latencies for device-initiated LPM exit.
  435  * MEL is the USB 3.0 Link PM latency for host-initiated LPM exit.
  436  * All three are stored in nanoseconds.
  437  */
  438 struct usb3_lpm_parameters {
  439 	/*
  440 	 * Maximum exit latency (MEL) for the host to send a packet to the
  441 	 * device (either a Ping for isoc endpoints, or a data packet for
  442 	 * interrupt endpoints), the hubs to decode the packet, and for all hubs
  443 	 * in the path to transition the links to U0.
  444 	 */
  445 	unsigned int mel;
  446 	/*
  447 	 * Maximum exit latency for a device-initiated LPM transition to bring
  448 	 * all links into U0.  Abbreviated as "PEL" in section 9.4.12 of the USB
  449 	 * 3.0 spec, with no explanation of what "P" stands for.  "Path"?
  450 	 */
  451 	unsigned int pel;
  452 
  453 	/*
  454 	 * The System Exit Latency (SEL) includes PEL, and three other
  455 	 * latencies.  After a device initiates a U0 transition, it will take
  456 	 * some time from when the device sends the ERDY to when it will finally
  457 	 * receive the data packet.  Basically, SEL should be the worse-case
  458 	 * latency from when a device starts initiating a U0 transition to when
  459 	 * it will get data.
  460 	 */
  461 	unsigned int sel;
  462 	/*
  463 	 * The idle timeout value that is currently programmed into the parent
  464 	 * hub for this device.  When the timer counts to zero, the parent hub
  465 	 * will initiate an LPM transition to either U1 or U2.
  466 	 */
  467 	int timeout;
  468 };
  469 
  470 /**
  471  * struct usb_device - kernel's representation of a USB device
  472  * @devnum: device number; address on a USB bus
  473  * @devpath: device ID string for use in messages (e.g., /port/...)
  474  * @route: tree topology hex string for use with xHCI
  475  * @state: device state: configured, not attached, etc.
  476  * @speed: device speed: high/full/low (or error)
  477  * @tt: Transaction Translator info; used with low/full speed dev, highspeed hub
  478  * @ttport: device port on that tt hub
  479  * @toggle: one bit for each endpoint, with ([0] = IN, [1] = OUT) endpoints
  480  * @parent: our hub, unless we're the root
  481  * @bus: bus we're part of
  482  * @ep0: endpoint 0 data (default control pipe)
  483  * @dev: generic device interface
  484  * @descriptor: USB device descriptor
  485  * @bos: USB device BOS descriptor set
  486  * @config: all of the device's configs
  487  * @actconfig: the active configuration
  488  * @ep_in: array of IN endpoints
  489  * @ep_out: array of OUT endpoints
  490  * @rawdescriptors: raw descriptors for each config
  491  * @bus_mA: Current available from the bus
  492  * @portnum: parent port number (origin 1)
  493  * @level: number of USB hub ancestors
  494  * @can_submit: URBs may be submitted
  495  * @persist_enabled:  USB_PERSIST enabled for this device
  496  * @have_langid: whether string_langid is valid
  497  * @authorized: policy has said we can use it;
  498  *	(user space) policy determines if we authorize this device to be
  499  *	used or not. By default, wired USB devices are authorized.
  500  *	WUSB devices are not, until we authorize them from user space.
  501  *	FIXME -- complete doc
  502  * @authenticated: Crypto authentication passed
  503  * @wusb: device is Wireless USB
  504  * @lpm_capable: device supports LPM
  505  * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
  506  * @usb2_hw_lpm_besl_capable: device can perform USB2 hardware BESL LPM
  507  * @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled
  508  * @usb2_hw_lpm_allowed: Userspace allows USB 2.0 LPM to be enabled
  509  * @usb3_lpm_enabled: USB3 hardware LPM enabled
  510  * @string_langid: language ID for strings
  511  * @product: iProduct string, if present (static)
  512  * @manufacturer: iManufacturer string, if present (static)
  513  * @serial: iSerialNumber string, if present (static)
  514  * @filelist: usbfs files that are open to this device
  515  * @maxchild: number of ports if hub
  516  * @quirks: quirks of the whole device
  517  * @urbnum: number of URBs submitted for the whole device
  518  * @active_duration: total time device is not suspended
  519  * @connect_time: time device was first connected
  520  * @do_remote_wakeup:  remote wakeup should be enabled
  521  * @reset_resume: needs reset instead of resume
  522  * @port_is_suspended: the upstream port is suspended (L2 or U3)
  523  * @wusb_dev: if this is a Wireless USB device, link to the WUSB
  524  *	specific data for the device.
  525  * @slot_id: Slot ID assigned by xHCI
  526  * @removable: Device can be physically removed from this port
  527  * @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout.
  528  * @u1_params: exit latencies for USB3 U1 LPM state, and hub-initiated timeout.
  529  * @u2_params: exit latencies for USB3 U2 LPM state, and hub-initiated timeout.
  530  * @lpm_disable_count: Ref count used by usb_disable_lpm() and usb_enable_lpm()
  531  *	to keep track of the number of functions that require USB 3.0 Link Power
  532  *	Management to be disabled for this usb_device.  This count should only
  533  *	be manipulated by those functions, with the bandwidth_mutex is held.
  534  *
  535  * Notes:
  536  * Usbcore drivers should not set usbdev->state directly.  Instead use
  537  * usb_set_device_state().
  538  */
  539 struct usb_device {
  540 	int		devnum;
  541 	char		devpath[16];
  542 	u32		route;
  543 	enum usb_device_state	state;
  544 	enum usb_device_speed	speed;
  545 
  546 	struct usb_tt	*tt;
  547 	int		ttport;
  548 
  549 	unsigned int toggle[2];
  550 
  551 	struct usb_device *parent;
  552 	struct usb_bus *bus;
  553 	struct usb_host_endpoint ep0;
  554 
  555 	struct device dev;
  556 
  557 	struct usb_device_descriptor descriptor;
  558 	struct usb_host_bos *bos;
  559 	struct usb_host_config *config;
  560 
  561 	struct usb_host_config *actconfig;
  562 	struct usb_host_endpoint *ep_in[16];
  563 	struct usb_host_endpoint *ep_out[16];
  564 
  565 	char **rawdescriptors;
  566 
  567 	unsigned short bus_mA;
  568 	u8 portnum;
  569 	u8 level;
  570 
  571 	unsigned can_submit:1;
  572 	unsigned persist_enabled:1;
  573 	unsigned have_langid:1;
  574 	unsigned authorized:1;
  575 	unsigned authenticated:1;
  576 	unsigned wusb:1;
  577 	unsigned lpm_capable:1;
  578 	unsigned usb2_hw_lpm_capable:1;
  579 	unsigned usb2_hw_lpm_besl_capable:1;
  580 	unsigned usb2_hw_lpm_enabled:1;
  581 	unsigned usb2_hw_lpm_allowed:1;
  582 	unsigned usb3_lpm_enabled:1;
  583 	int string_langid;
  584 
  585 	/* static strings from the device */
  586 	char *product;
  587 	char *manufacturer;
  588 	char *serial;
  589 
  590 	struct list_head filelist;
  591 
  592 	int maxchild;
  593 
  594 	u32 quirks;
  595 	atomic_t urbnum;
  596 
  597 	unsigned long active_duration;
  598 
  599 #ifdef CONFIG_PM
  600 	unsigned long connect_time;
  601 
  602 	unsigned do_remote_wakeup:1;
  603 	unsigned reset_resume:1;
  604 	unsigned port_is_suspended:1;
  605 #endif
  606 	struct wusb_dev *wusb_dev;
  607 	int slot_id;
  608 	enum usb_device_removable removable;
  609 	struct usb2_lpm_parameters l1_params;
  610 	struct usb3_lpm_parameters u1_params;
  611 	struct usb3_lpm_parameters u2_params;
  612 	unsigned lpm_disable_count;
  613 };
  614 #define	to_usb_device(d) container_of(d, struct usb_device, dev)
  615 
  616 static inline struct usb_device *interface_to_usbdev(struct usb_interface *intf)
  617 {
  618 	return to_usb_device(intf->dev.parent);
  619 }
  620 
  621 extern struct usb_device *usb_get_dev(struct usb_device *dev);
  622 extern void usb_put_dev(struct usb_device *dev);
  623 extern struct usb_device *usb_hub_find_child(struct usb_device *hdev,
  624 	int port1);
  625 
  626 /**
  627  * usb_hub_for_each_child - iterate over all child devices on the hub
  628  * @hdev:  USB device belonging to the usb hub
  629  * @port1: portnum associated with child device
  630  * @child: child device pointer
  631  */
  632 #define usb_hub_for_each_child(hdev, port1, child) \
  633 	for (port1 = 1,	child =	usb_hub_find_child(hdev, port1); \
  634 			port1 <= hdev->maxchild; \
  635 			child = usb_hub_find_child(hdev, ++port1)) \
  636 		if (!child) continue; else
  637 
  638 /* USB device locking */
  639 #define usb_lock_device(udev)		device_lock(&(udev)->dev)
  640 #define usb_unlock_device(udev)		device_unlock(&(udev)->dev)
  641 #define usb_trylock_device(udev)	device_trylock(&(udev)->dev)
  642 extern int usb_lock_device_for_reset(struct usb_device *udev,
  643 				     const struct usb_interface *iface);
  644 
  645 /* USB port reset for device reinitialization */
  646 extern int usb_reset_device(struct usb_device *dev);
  647 extern void usb_queue_reset_device(struct usb_interface *dev);
  648 
  649 #ifdef CONFIG_ACPI
  650 extern int usb_acpi_set_power_state(struct usb_device *hdev, int index,
  651 	bool enable);
  652 extern bool usb_acpi_power_manageable(struct usb_device *hdev, int index);
  653 #else
  654 static inline int usb_acpi_set_power_state(struct usb_device *hdev, int index,
  655 	bool enable) { return 0; }
  656 static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
  657 	{ return true; }
  658 #endif
  659 
  660 /* USB autosuspend and autoresume */
  661 #ifdef CONFIG_PM
  662 extern void usb_enable_autosuspend(struct usb_device *udev);
  663 extern void usb_disable_autosuspend(struct usb_device *udev);
  664 
  665 extern int usb_autopm_get_interface(struct usb_interface *intf);
  666 extern void usb_autopm_put_interface(struct usb_interface *intf);
  667 extern int usb_autopm_get_interface_async(struct usb_interface *intf);
  668 extern void usb_autopm_put_interface_async(struct usb_interface *intf);
  669 extern void usb_autopm_get_interface_no_resume(struct usb_interface *intf);
  670 extern void usb_autopm_put_interface_no_suspend(struct usb_interface *intf);
  671 
  672 static inline void usb_mark_last_busy(struct usb_device *udev)
  673 {
  674 	pm_runtime_mark_last_busy(&udev->dev);
  675 }
  676 
  677 #else
  678 
  679 static inline int usb_enable_autosuspend(struct usb_device *udev)
  680 { return 0; }
  681 static inline int usb_disable_autosuspend(struct usb_device *udev)
  682 { return 0; }
  683 
  684 static inline int usb_autopm_get_interface(struct usb_interface *intf)
  685 { return 0; }
  686 static inline int usb_autopm_get_interface_async(struct usb_interface *intf)
  687 { return 0; }
  688 
  689 static inline void usb_autopm_put_interface(struct usb_interface *intf)
  690 { }
  691 static inline void usb_autopm_put_interface_async(struct usb_interface *intf)
  692 { }
  693 static inline void usb_autopm_get_interface_no_resume(
  694 		struct usb_interface *intf)
  695 { }
  696 static inline void usb_autopm_put_interface_no_suspend(
  697 		struct usb_interface *intf)
  698 { }
  699 static inline void usb_mark_last_busy(struct usb_device *udev)
  700 { }
  701 #endif
  702 
  703 extern int usb_disable_lpm(struct usb_device *udev);
  704 extern void usb_enable_lpm(struct usb_device *udev);
  705 /* Same as above, but these functions lock/unlock the bandwidth_mutex. */
  706 extern int usb_unlocked_disable_lpm(struct usb_device *udev);
  707 extern void usb_unlocked_enable_lpm(struct usb_device *udev);
  708 
  709 extern int usb_disable_ltm(struct usb_device *udev);
  710 extern void usb_enable_ltm(struct usb_device *udev);
  711 
  712 static inline bool usb_device_supports_ltm(struct usb_device *udev)
  713 {
  714 	if (udev->speed != USB_SPEED_SUPER || !udev->bos || !udev->bos->ss_cap)
  715 		return false;
  716 	return udev->bos->ss_cap->bmAttributes & USB_LTM_SUPPORT;
  717 }
  718 
  719 static inline bool usb_device_no_sg_constraint(struct usb_device *udev)
  720 {
  721 	return udev && udev->bus && udev->bus->no_sg_constraint;
  722 }
  723 
  724 
  725 /*-------------------------------------------------------------------------*/
  726 
  727 /* for drivers using iso endpoints */
  728 extern int usb_get_current_frame_number(struct usb_device *usb_dev);
  729 
  730 /* Sets up a group of bulk endpoints to support multiple stream IDs. */
  731 extern int usb_alloc_streams(struct usb_interface *interface,
  732 		struct usb_host_endpoint **eps, unsigned int num_eps,
  733 		unsigned int num_streams, gfp_t mem_flags);
  734 
  735 /* Reverts a group of bulk endpoints back to not using stream IDs. */
  736 extern int usb_free_streams(struct usb_interface *interface,
  737 		struct usb_host_endpoint **eps, unsigned int num_eps,
  738 		gfp_t mem_flags);
  739 
  740 /* used these for multi-interface device registration */
  741 extern int usb_driver_claim_interface(struct usb_driver *driver,
  742 			struct usb_interface *iface, void *priv);
  743 
  744 /**
  745  * usb_interface_claimed - returns true iff an interface is claimed
  746  * @iface: the interface being checked
  747  *
  748  * Return: %true (nonzero) iff the interface is claimed, else %false
  749  * (zero).
  750  *
  751  * Note:
  752  * Callers must own the driver model's usb bus readlock.  So driver
  753  * probe() entries don't need extra locking, but other call contexts
  754  * may need to explicitly claim that lock.
  755  *
  756  */
  757 static inline int usb_interface_claimed(struct usb_interface *iface)
  758 {
  759 	return (iface->dev.driver != NULL);
  760 }
  761 
  762 extern void usb_driver_release_interface(struct usb_driver *driver,
  763 			struct usb_interface *iface);
  764 const struct usb_device_id *usb_match_id(struct usb_interface *interface,
  765 					 const struct usb_device_id *id);
  766 extern int usb_match_one_id(struct usb_interface *interface,
  767 			    const struct usb_device_id *id);
  768 
  769 extern int usb_for_each_dev(void *data, int (*fn)(struct usb_device *, void *));
  770 extern struct usb_interface *usb_find_interface(struct usb_driver *drv,
  771 		int minor);
  772 extern struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev,
  773 		unsigned ifnum);
  774 extern struct usb_host_interface *usb_altnum_to_altsetting(
  775 		const struct usb_interface *intf, unsigned int altnum);
  776 extern struct usb_host_interface *usb_find_alt_setting(
  777 		struct usb_host_config *config,
  778 		unsigned int iface_num,
  779 		unsigned int alt_num);
  780 
  781 /* port claiming functions */
  782 int usb_hub_claim_port(struct usb_device *hdev, unsigned port1,
  783 		struct usb_dev_state *owner);
  784 int usb_hub_release_port(struct usb_device *hdev, unsigned port1,
  785 		struct usb_dev_state *owner);
  786 
  787 /**
  788  * usb_make_path - returns stable device path in the usb tree
  789  * @dev: the device whose path is being constructed
  790  * @buf: where to put the string
  791  * @size: how big is "buf"?
  792  *
  793  * Return: Length of the string (> 0) or negative if size was too small.
  794  *
  795  * Note:
  796  * This identifier is intended to be "stable", reflecting physical paths in
  797  * hardware such as physical bus addresses for host controllers or ports on
  798  * USB hubs.  That makes it stay the same until systems are physically
  799  * reconfigured, by re-cabling a tree of USB devices or by moving USB host
  800  * controllers.  Adding and removing devices, including virtual root hubs
  801  * in host controller driver modules, does not change these path identifiers;
  802  * neither does rebooting or re-enumerating.  These are more useful identifiers
  803  * than changeable ("unstable") ones like bus numbers or device addresses.
  804  *
  805  * With a partial exception for devices connected to USB 2.0 root hubs, these
  806  * identifiers are also predictable.  So long as the device tree isn't changed,
  807  * plugging any USB device into a given hub port always gives it the same path.
  808  * Because of the use of "companion" controllers, devices connected to ports on
  809  * USB 2.0 root hubs (EHCI host controllers) will get one path ID if they are
  810  * high speed, and a different one if they are full or low speed.
  811  */
  812 static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size)
  813 {
  814 	int actual;
  815 	actual = snprintf(buf, size, "usb-%s-%s", dev->bus->bus_name,
  816 			  dev->devpath);
  817 	return (actual >= (int)size) ? -1 : actual;
  818 }
  819 
  820 /*-------------------------------------------------------------------------*/
  821 
  822 #define USB_DEVICE_ID_MATCH_DEVICE \
  823 		(USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT)
  824 #define USB_DEVICE_ID_MATCH_DEV_RANGE \
  825 		(USB_DEVICE_ID_MATCH_DEV_LO | USB_DEVICE_ID_MATCH_DEV_HI)
  826 #define USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION \
  827 		(USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_RANGE)
  828 #define USB_DEVICE_ID_MATCH_DEV_INFO \
  829 		(USB_DEVICE_ID_MATCH_DEV_CLASS | \
  830 		USB_DEVICE_ID_MATCH_DEV_SUBCLASS | \
  831 		USB_DEVICE_ID_MATCH_DEV_PROTOCOL)
  832 #define USB_DEVICE_ID_MATCH_INT_INFO \
  833 		(USB_DEVICE_ID_MATCH_INT_CLASS | \
  834 		USB_DEVICE_ID_MATCH_INT_SUBCLASS | \
  835 		USB_DEVICE_ID_MATCH_INT_PROTOCOL)
  836 
  837 /**
  838  * USB_DEVICE - macro used to describe a specific usb device
  839  * @vend: the 16 bit USB Vendor ID
  840  * @prod: the 16 bit USB Product ID
  841  *
  842  * This macro is used to create a struct usb_device_id that matches a
  843  * specific device.
  844  */
  845 #define USB_DEVICE(vend, prod) \
  846 	.match_flags = USB_DEVICE_ID_MATCH_DEVICE, \
  847 	.idVendor = (vend), \
  848 	.idProduct = (prod)
  849 /**
  850  * USB_DEVICE_VER - describe a specific usb device with a version range
  851  * @vend: the 16 bit USB Vendor ID
  852  * @prod: the 16 bit USB Product ID
  853  * @lo: the bcdDevice_lo value
  854  * @hi: the bcdDevice_hi value
  855  *
  856  * This macro is used to create a struct usb_device_id that matches a
  857  * specific device, with a version range.
  858  */
  859 #define USB_DEVICE_VER(vend, prod, lo, hi) \
  860 	.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, \
  861 	.idVendor = (vend), \
  862 	.idProduct = (prod), \
  863 	.bcdDevice_lo = (lo), \
  864 	.bcdDevice_hi = (hi)
  865 
  866 /**
  867  * USB_DEVICE_INTERFACE_CLASS - describe a usb device with a specific interface class
  868  * @vend: the 16 bit USB Vendor ID
  869  * @prod: the 16 bit USB Product ID
  870  * @cl: bInterfaceClass value
  871  *
  872  * This macro is used to create a struct usb_device_id that matches a
  873  * specific interface class of devices.
  874  */
  875 #define USB_DEVICE_INTERFACE_CLASS(vend, prod, cl) \
  876 	.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
  877 		       USB_DEVICE_ID_MATCH_INT_CLASS, \
  878 	.idVendor = (vend), \
  879 	.idProduct = (prod), \
  880 	.bInterfaceClass = (cl)
  881 
  882 /**
  883  * USB_DEVICE_INTERFACE_PROTOCOL - describe a usb device with a specific interface protocol
  884  * @vend: the 16 bit USB Vendor ID
  885  * @prod: the 16 bit USB Product ID
  886  * @pr: bInterfaceProtocol value
  887  *
  888  * This macro is used to create a struct usb_device_id that matches a
  889  * specific interface protocol of devices.
  890  */
  891 #define USB_DEVICE_INTERFACE_PROTOCOL(vend, prod, pr) \
  892 	.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
  893 		       USB_DEVICE_ID_MATCH_INT_PROTOCOL, \
  894 	.idVendor = (vend), \
  895 	.idProduct = (prod), \
  896 	.bInterfaceProtocol = (pr)
  897 
  898 /**
  899  * USB_DEVICE_INTERFACE_NUMBER - describe a usb device with a specific interface number
  900  * @vend: the 16 bit USB Vendor ID
  901  * @prod: the 16 bit USB Product ID
  902  * @num: bInterfaceNumber value
  903  *
  904  * This macro is used to create a struct usb_device_id that matches a
  905  * specific interface number of devices.
  906  */
  907 #define USB_DEVICE_INTERFACE_NUMBER(vend, prod, num) \
  908 	.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
  909 		       USB_DEVICE_ID_MATCH_INT_NUMBER, \
  910 	.idVendor = (vend), \
  911 	.idProduct = (prod), \
  912 	.bInterfaceNumber = (num)
  913 
  914 /**
  915  * USB_DEVICE_INFO - macro used to describe a class of usb devices
  916  * @cl: bDeviceClass value
  917  * @sc: bDeviceSubClass value
  918  * @pr: bDeviceProtocol value
  919  *
  920  * This macro is used to create a struct usb_device_id that matches a
  921  * specific class of devices.
  922  */
  923 #define USB_DEVICE_INFO(cl, sc, pr) \
  924 	.match_flags = USB_DEVICE_ID_MATCH_DEV_INFO, \
  925 	.bDeviceClass = (cl), \
  926 	.bDeviceSubClass = (sc), \
  927 	.bDeviceProtocol = (pr)
  928 
  929 /**
  930  * USB_INTERFACE_INFO - macro used to describe a class of usb interfaces
  931  * @cl: bInterfaceClass value
  932  * @sc: bInterfaceSubClass value
  933  * @pr: bInterfaceProtocol value
  934  *
  935  * This macro is used to create a struct usb_device_id that matches a
  936  * specific class of interfaces.
  937  */
  938 #define USB_INTERFACE_INFO(cl, sc, pr) \
  939 	.match_flags = USB_DEVICE_ID_MATCH_INT_INFO, \
  940 	.bInterfaceClass = (cl), \
  941 	.bInterfaceSubClass = (sc), \
  942 	.bInterfaceProtocol = (pr)
  943 
  944 /**
  945  * USB_DEVICE_AND_INTERFACE_INFO - describe a specific usb device with a class of usb interfaces
  946  * @vend: the 16 bit USB Vendor ID
  947  * @prod: the 16 bit USB Product ID
  948  * @cl: bInterfaceClass value
  949  * @sc: bInterfaceSubClass value
  950  * @pr: bInterfaceProtocol value
  951  *
  952  * This macro is used to create a struct usb_device_id that matches a
  953  * specific device with a specific class of interfaces.
  954  *
  955  * This is especially useful when explicitly matching devices that have
  956  * vendor specific bDeviceClass values, but standards-compliant interfaces.
  957  */
  958 #define USB_DEVICE_AND_INTERFACE_INFO(vend, prod, cl, sc, pr) \
  959 	.match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
  960 		| USB_DEVICE_ID_MATCH_DEVICE, \
  961 	.idVendor = (vend), \
  962 	.idProduct = (prod), \
  963 	.bInterfaceClass = (cl), \
  964 	.bInterfaceSubClass = (sc), \
  965 	.bInterfaceProtocol = (pr)
  966 
  967 /**
  968  * USB_VENDOR_AND_INTERFACE_INFO - describe a specific usb vendor with a class of usb interfaces
  969  * @vend: the 16 bit USB Vendor ID
  970  * @cl: bInterfaceClass value
  971  * @sc: bInterfaceSubClass value
  972  * @pr: bInterfaceProtocol value
  973  *
  974  * This macro is used to create a struct usb_device_id that matches a
  975  * specific vendor with a specific class of interfaces.
  976  *
  977  * This is especially useful when explicitly matching devices that have
  978  * vendor specific bDeviceClass values, but standards-compliant interfaces.
  979  */
  980 #define USB_VENDOR_AND_INTERFACE_INFO(vend, cl, sc, pr) \
  981 	.match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
  982 		| USB_DEVICE_ID_MATCH_VENDOR, \
  983 	.idVendor = (vend), \
  984 	.bInterfaceClass = (cl), \
  985 	.bInterfaceSubClass = (sc), \
  986 	.bInterfaceProtocol = (pr)
  987 
  988 /* ----------------------------------------------------------------------- */
  989 
  990 /* Stuff for dynamic usb ids */
  991 struct usb_dynids {
  992 	spinlock_t lock;
  993 	struct list_head list;
  994 };
  995 
  996 struct usb_dynid {
  997 	struct list_head node;
  998 	struct usb_device_id id;
  999 };
 1000 
 1001 extern ssize_t usb_store_new_id(struct usb_dynids *dynids,
 1002 				const struct usb_device_id *id_table,
 1003 				struct device_driver *driver,
 1004 				const char *buf, size_t count);
 1005 
 1006 extern ssize_t usb_show_dynids(struct usb_dynids *dynids, char *buf);
 1007 
 1008 /**
 1009  * struct usbdrv_wrap - wrapper for driver-model structure
 1010  * @driver: The driver-model core driver structure.
 1011  * @for_devices: Non-zero for device drivers, 0 for interface drivers.
 1012  */
 1013 struct usbdrv_wrap {
 1014 	struct device_driver driver;
 1015 	int for_devices;
 1016 };
 1017 
 1018 /**
 1019  * struct usb_driver - identifies USB interface driver to usbcore
 1020  * @name: The driver name should be unique among USB drivers,
 1021  *	and should normally be the same as the module name.
 1022  * @probe: Called to see if the driver is willing to manage a particular
 1023  *	interface on a device.  If it is, probe returns zero and uses
 1024  *	usb_set_intfdata() to associate driver-specific data with the
 1025  *	interface.  It may also use usb_set_interface() to specify the
 1026  *	appropriate altsetting.  If unwilling to manage the interface,
 1027  *	return -ENODEV, if genuine IO errors occurred, an appropriate
 1028  *	negative errno value.
 1029  * @disconnect: Called when the interface is no longer accessible, usually
 1030  *	because its device has been (or is being) disconnected or the
 1031  *	driver module is being unloaded.
 1032  * @unlocked_ioctl: Used for drivers that want to talk to userspace through
 1033  *	the "usbfs" filesystem.  This lets devices provide ways to
 1034  *	expose information to user space regardless of where they
 1035  *	do (or don't) show up otherwise in the filesystem.
 1036  * @suspend: Called when the device is going to be suspended by the
 1037  *	system either from system sleep or runtime suspend context. The
 1038  *	return value will be ignored in system sleep context, so do NOT
 1039  *	try to continue using the device if suspend fails in this case.
 1040  *	Instead, let the resume or reset-resume routine recover from
 1041  *	the failure.
 1042  * @resume: Called when the device is being resumed by the system.
 1043  * @reset_resume: Called when the suspended device has been reset instead
 1044  *	of being resumed.
 1045  * @pre_reset: Called by usb_reset_device() when the device is about to be
 1046  *	reset.  This routine must not return until the driver has no active
 1047  *	URBs for the device, and no more URBs may be submitted until the
 1048  *	post_reset method is called.
 1049  * @post_reset: Called by usb_reset_device() after the device
 1050  *	has been reset
 1051  * @id_table: USB drivers use ID table to support hotplugging.
 1052  *	Export this with MODULE_DEVICE_TABLE(usb,...).  This must be set
 1053  *	or your driver's probe function will never get called.
 1054  * @dynids: used internally to hold the list of dynamically added device
 1055  *	ids for this driver.
 1056  * @drvwrap: Driver-model core structure wrapper.
 1057  * @no_dynamic_id: if set to 1, the USB core will not allow dynamic ids to be
 1058  *	added to this driver by preventing the sysfs file from being created.
 1059  * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
 1060  *	for interfaces bound to this driver.
 1061  * @soft_unbind: if set to 1, the USB core will not kill URBs and disable
 1062  *	endpoints before calling the driver's disconnect method.
 1063  * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs
 1064  *	to initiate lower power link state transitions when an idle timeout
 1065  *	occurs.  Device-initiated USB 3.0 link PM will still be allowed.
 1066  *
 1067  * USB interface drivers must provide a name, probe() and disconnect()
 1068  * methods, and an id_table.  Other driver fields are optional.
 1069  *
 1070  * The id_table is used in hotplugging.  It holds a set of descriptors,
 1071  * and specialized data may be associated with each entry.  That table
 1072  * is used by both user and kernel mode hotplugging support.
 1073  *
 1074  * The probe() and disconnect() methods are called in a context where
 1075  * they can sleep, but they should avoid abusing the privilege.  Most
 1076  * work to connect to a device should be done when the device is opened,
 1077  * and undone at the last close.  The disconnect code needs to address
 1078  * concurrency issues with respect to open() and close() methods, as
 1079  * well as forcing all pending I/O requests to complete (by unlinking
 1080  * them as necessary, and blocking until the unlinks complete).
 1081  */
 1082 struct usb_driver {
 1083 	const char *name;
 1084 
 1085 	int (*probe) (struct usb_interface *intf,
 1086 		      const struct usb_device_id *id);
 1087 
 1088 	void (*disconnect) (struct usb_interface *intf);
 1089 
 1090 	int (*unlocked_ioctl) (struct usb_interface *intf, unsigned int code,
 1091 			void *buf);
 1092 
 1093 	int (*suspend) (struct usb_interface *intf, pm_message_t message);
 1094 	int (*resume) (struct usb_interface *intf);
 1095 	int (*reset_resume)(struct usb_interface *intf);
 1096 
 1097 	int (*pre_reset)(struct usb_interface *intf);
 1098 	int (*post_reset)(struct usb_interface *intf);
 1099 
 1100 	const struct usb_device_id *id_table;
 1101 
 1102 	struct usb_dynids dynids;
 1103 	struct usbdrv_wrap drvwrap;
 1104 	unsigned int no_dynamic_id:1;
 1105 	unsigned int supports_autosuspend:1;
 1106 	unsigned int disable_hub_initiated_lpm:1;
 1107 	unsigned int soft_unbind:1;
 1108 };
 1109 #define	to_usb_driver(d) container_of(d, struct usb_driver, drvwrap.driver)
 1110 
 1111 /**
 1112  * struct usb_device_driver - identifies USB device driver to usbcore
 1113  * @name: The driver name should be unique among USB drivers,
 1114  *	and should normally be the same as the module name.
 1115  * @probe: Called to see if the driver is willing to manage a particular
 1116  *	device.  If it is, probe returns zero and uses dev_set_drvdata()
 1117  *	to associate driver-specific data with the device.  If unwilling
 1118  *	to manage the device, return a negative errno value.
 1119  * @disconnect: Called when the device is no longer accessible, usually
 1120  *	because it has been (or is being) disconnected or the driver's
 1121  *	module is being unloaded.
 1122  * @suspend: Called when the device is going to be suspended by the system.
 1123  * @resume: Called when the device is being resumed by the system.
 1124  * @drvwrap: Driver-model core structure wrapper.
 1125  * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
 1126  *	for devices bound to this driver.
 1127  *
 1128  * USB drivers must provide all the fields listed above except drvwrap.
 1129  */
 1130 struct usb_device_driver {
 1131 	const char *name;
 1132 
 1133 	int (*probe) (struct usb_device *udev);
 1134 	void (*disconnect) (struct usb_device *udev);
 1135 
 1136 	int (*suspend) (struct usb_device *udev, pm_message_t message);
 1137 	int (*resume) (struct usb_device *udev, pm_message_t message);
 1138 	struct usbdrv_wrap drvwrap;
 1139 	unsigned int supports_autosuspend:1;
 1140 };
 1141 #define	to_usb_device_driver(d) container_of(d, struct usb_device_driver, \
 1142 		drvwrap.driver)
 1143 
 1144 extern struct bus_type usb_bus_type;
 1145 
 1146 /**
 1147  * struct usb_class_driver - identifies a USB driver that wants to use the USB major number
 1148  * @name: the usb class device name for this driver.  Will show up in sysfs.
 1149  * @devnode: Callback to provide a naming hint for a possible
 1150  *	device node to create.
 1151  * @fops: pointer to the struct file_operations of this driver.
 1152  * @minor_base: the start of the minor range for this driver.
 1153  *
 1154  * This structure is used for the usb_register_dev() and
 1155  * usb_unregister_dev() functions, to consolidate a number of the
 1156  * parameters used for them.
 1157  */
 1158 struct usb_class_driver {
 1159 	char *name;
 1160 	char *(*devnode)(struct device *dev, umode_t *mode);
 1161 	const struct file_operations *fops;
 1162 	int minor_base;
 1163 };
 1164 
 1165 /*
 1166  * use these in module_init()/module_exit()
 1167  * and don't forget MODULE_DEVICE_TABLE(usb, ...)
 1168  */
 1169 extern int usb_register_driver(struct usb_driver *, struct module *,
 1170 			       const char *);
 1171 
 1172 /* use a define to avoid include chaining to get THIS_MODULE & friends */
 1173 #define usb_register(driver) \
 1174 	usb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
 1175 
 1176 extern void usb_deregister(struct usb_driver *);
 1177 
 1178 /**
 1179  * module_usb_driver() - Helper macro for registering a USB driver
 1180  * @__usb_driver: usb_driver struct
 1181  *
 1182  * Helper macro for USB drivers which do not do anything special in module
 1183  * init/exit. This eliminates a lot of boilerplate. Each module may only
 1184  * use this macro once, and calling it replaces module_init() and module_exit()
 1185  */
 1186 #define module_usb_driver(__usb_driver) \
 1187 	module_driver(__usb_driver, usb_register, \
 1188 		       usb_deregister)
 1189 
 1190 extern int usb_register_device_driver(struct usb_device_driver *,
 1191 			struct module *);
 1192 extern void usb_deregister_device_driver(struct usb_device_driver *);
 1193 
 1194 extern int usb_register_dev(struct usb_interface *intf,
 1195 			    struct usb_class_driver *class_driver);
 1196 extern void usb_deregister_dev(struct usb_interface *intf,
 1197 			       struct usb_class_driver *class_driver);
 1198 
 1199 extern int usb_disabled(void);
 1200 
 1201 /* ----------------------------------------------------------------------- */
 1202 
 1203 /*
 1204  * URB support, for asynchronous request completions
 1205  */
 1206 
 1207 /*
 1208  * urb->transfer_flags:
 1209  *
 1210  * Note: URB_DIR_IN/OUT is automatically set in usb_submit_urb().
 1211  */
 1212 #define URB_SHORT_NOT_OK	0x0001	/* report short reads as errors */
 1213 #define URB_ISO_ASAP		0x0002	/* iso-only; use the first unexpired
 1214 					 * slot in the schedule */
 1215 #define URB_NO_TRANSFER_DMA_MAP	0x0004	/* urb->transfer_dma valid on submit */
 1216 #define URB_NO_FSBR		0x0020	/* UHCI-specific */
 1217 #define URB_ZERO_PACKET		0x0040	/* Finish bulk OUT with short packet */
 1218 #define URB_NO_INTERRUPT	0x0080	/* HINT: no non-error interrupt
 1219 					 * needed */
 1220 #define URB_FREE_BUFFER		0x0100	/* Free transfer buffer with the URB */
 1221 
 1222 /* The following flags are used internally by usbcore and HCDs */
 1223 #define URB_DIR_IN		0x0200	/* Transfer from device to host */
 1224 #define URB_DIR_OUT		0
 1225 #define URB_DIR_MASK		URB_DIR_IN
 1226 
 1227 #define URB_DMA_MAP_SINGLE	0x00010000	/* Non-scatter-gather mapping */
 1228 #define URB_DMA_MAP_PAGE	0x00020000	/* HCD-unsupported S-G */
 1229 #define URB_DMA_MAP_SG		0x00040000	/* HCD-supported S-G */
 1230 #define URB_MAP_LOCAL		0x00080000	/* HCD-local-memory mapping */
 1231 #define URB_SETUP_MAP_SINGLE	0x00100000	/* Setup packet DMA mapped */
 1232 #define URB_SETUP_MAP_LOCAL	0x00200000	/* HCD-local setup packet */
 1233 #define URB_DMA_SG_COMBINED	0x00400000	/* S-G entries were combined */
 1234 #define URB_ALIGNED_TEMP_BUFFER	0x00800000	/* Temp buffer was alloc'd */
 1235 
 1236 struct usb_iso_packet_descriptor {
 1237 	unsigned int offset;
 1238 	unsigned int length;		/* expected length */
 1239 	unsigned int actual_length;
 1240 	int status;
 1241 };
 1242 
 1243 struct urb;
 1244 
 1245 struct usb_anchor {
 1246 	struct list_head urb_list;
 1247 	wait_queue_head_t wait;
 1248 	spinlock_t lock;
 1249 	atomic_t suspend_wakeups;
 1250 	unsigned int poisoned:1;
 1251 };
 1252 
 1253 static inline void init_usb_anchor(struct usb_anchor *anchor)
 1254 {
 1255 	memset(anchor, 0, sizeof(*anchor));
 1256 	INIT_LIST_HEAD(&anchor->urb_list);
 1257 	init_waitqueue_head(&anchor->wait);
 1258 	spin_lock_init(&anchor->lock);
 1259 }
 1260 
 1261 typedef void (*usb_complete_t)(struct urb *);
 1262 
 1263 /**
 1264  * struct urb - USB Request Block
 1265  * @urb_list: For use by current owner of the URB.
 1266  * @anchor_list: membership in the list of an anchor
 1267  * @anchor: to anchor URBs to a common mooring
 1268  * @ep: Points to the endpoint's data structure.  Will eventually
 1269  *	replace @pipe.
 1270  * @pipe: Holds endpoint number, direction, type, and more.
 1271  *	Create these values with the eight macros available;
 1272  *	usb_{snd,rcv}TYPEpipe(dev,endpoint), where the TYPE is "ctrl"
 1273  *	(control), "bulk", "int" (interrupt), or "iso" (isochronous).
 1274  *	For example usb_sndbulkpipe() or usb_rcvintpipe().  Endpoint
 1275  *	numbers range from zero to fifteen.  Note that "in" endpoint two
 1276  *	is a different endpoint (and pipe) from "out" endpoint two.
 1277  *	The current configuration controls the existence, type, and
 1278  *	maximum packet size of any given endpoint.
 1279  * @stream_id: the endpoint's stream ID for bulk streams
 1280  * @dev: Identifies the USB device to perform the request.
 1281  * @status: This is read in non-iso completion functions to get the
 1282  *	status of the particular request.  ISO requests only use it
 1283  *	to tell whether the URB was unlinked; detailed status for
 1284  *	each frame is in the fields of the iso_frame-desc.
 1285  * @transfer_flags: A variety of flags may be used to affect how URB
 1286  *	submission, unlinking, or operation are handled.  Different
 1287  *	kinds of URB can use different flags.
 1288  * @transfer_buffer:  This identifies the buffer to (or from) which the I/O
 1289  *	request will be performed unless URB_NO_TRANSFER_DMA_MAP is set
 1290  *	(however, do not leave garbage in transfer_buffer even then).
 1291  *	This buffer must be suitable for DMA; allocate it with
 1292  *	kmalloc() or equivalent.  For transfers to "in" endpoints, contents
 1293  *	of this buffer will be modified.  This buffer is used for the data
 1294  *	stage of control transfers.
 1295  * @transfer_dma: When transfer_flags includes URB_NO_TRANSFER_DMA_MAP,
 1296  *	the device driver is saying that it provided this DMA address,
 1297  *	which the host controller driver should use in preference to the
 1298  *	transfer_buffer.
 1299  * @sg: scatter gather buffer list, the buffer size of each element in
 1300  * 	the list (except the last) must be divisible by the endpoint's
 1301  * 	max packet size if no_sg_constraint isn't set in 'struct usb_bus'
 1302  * @num_mapped_sgs: (internal) number of mapped sg entries
 1303  * @num_sgs: number of entries in the sg list
 1304  * @transfer_buffer_length: How big is transfer_buffer.  The transfer may
 1305  *	be broken up into chunks according to the current maximum packet
 1306  *	size for the endpoint, which is a function of the configuration
 1307  *	and is encoded in the pipe.  When the length is zero, neither
 1308  *	transfer_buffer nor transfer_dma is used.
 1309  * @actual_length: This is read in non-iso completion functions, and
 1310  *	it tells how many bytes (out of transfer_buffer_length) were
 1311  *	transferred.  It will normally be the same as requested, unless
 1312  *	either an error was reported or a short read was performed.
 1313  *	The URB_SHORT_NOT_OK transfer flag may be used to make such
 1314  *	short reads be reported as errors.
 1315  * @setup_packet: Only used for control transfers, this points to eight bytes
 1316  *	of setup data.  Control transfers always start by sending this data
 1317  *	to the device.  Then transfer_buffer is read or written, if needed.
 1318  * @setup_dma: DMA pointer for the setup packet.  The caller must not use
 1319  *	this field; setup_packet must point to a valid buffer.
 1320  * @start_frame: Returns the initial frame for isochronous transfers.
 1321  * @number_of_packets: Lists the number of ISO transfer buffers.
 1322  * @interval: Specifies the polling interval for interrupt or isochronous
 1323  *	transfers.  The units are frames (milliseconds) for full and low
 1324  *	speed devices, and microframes (1/8 millisecond) for highspeed
 1325  *	and SuperSpeed devices.
 1326  * @error_count: Returns the number of ISO transfers that reported errors.
 1327  * @context: For use in completion functions.  This normally points to
 1328  *	request-specific driver context.
 1329  * @complete: Completion handler. This URB is passed as the parameter to the
 1330  *	completion function.  The completion function may then do what
 1331  *	it likes with the URB, including resubmitting or freeing it.
 1332  * @iso_frame_desc: Used to provide arrays of ISO transfer buffers and to
 1333  *	collect the transfer status for each buffer.
 1334  *
 1335  * This structure identifies USB transfer requests.  URBs must be allocated by
 1336  * calling usb_alloc_urb() and freed with a call to usb_free_urb().
 1337  * Initialization may be done using various usb_fill_*_urb() functions.  URBs
 1338  * are submitted using usb_submit_urb(), and pending requests may be canceled
 1339  * using usb_unlink_urb() or usb_kill_urb().
 1340  *
 1341  * Data Transfer Buffers:
 1342  *
 1343  * Normally drivers provide I/O buffers allocated with kmalloc() or otherwise
 1344  * taken from the general page pool.  That is provided by transfer_buffer
 1345  * (control requests also use setup_packet), and host controller drivers
 1346  * perform a dma mapping (and unmapping) for each buffer transferred.  Those
 1347  * mapping operations can be expensive on some platforms (perhaps using a dma
 1348  * bounce buffer or talking to an IOMMU),
 1349  * although they're cheap on commodity x86 and ppc hardware.
 1350  *
 1351  * Alternatively, drivers may pass the URB_NO_TRANSFER_DMA_MAP transfer flag,
 1352  * which tells the host controller driver that no such mapping is needed for
 1353  * the transfer_buffer since
 1354  * the device driver is DMA-aware.  For example, a device driver might
 1355  * allocate a DMA buffer with usb_alloc_coherent() or call usb_buffer_map().
 1356  * When this transfer flag is provided, host controller drivers will
 1357  * attempt to use the dma address found in the transfer_dma
 1358  * field rather than determining a dma address themselves.
 1359  *
 1360  * Note that transfer_buffer must still be set if the controller
 1361  * does not support DMA (as indicated by bus.uses_dma) and when talking
 1362  * to root hub. If you have to trasfer between highmem zone and the device
 1363  * on such controller, create a bounce buffer or bail out with an error.
 1364  * If transfer_buffer cannot be set (is in highmem) and the controller is DMA
 1365  * capable, assign NULL to it, so that usbmon knows not to use the value.
 1366  * The setup_packet must always be set, so it cannot be located in highmem.
 1367  *
 1368  * Initialization:
 1369  *
 1370  * All URBs submitted must initialize the dev, pipe, transfer_flags (may be
 1371  * zero), and complete fields.  All URBs must also initialize
 1372  * transfer_buffer and transfer_buffer_length.  They may provide the
 1373  * URB_SHORT_NOT_OK transfer flag, indicating that short reads are
 1374  * to be treated as errors; that flag is invalid for write requests.
 1375  *
 1376  * Bulk URBs may
 1377  * use the URB_ZERO_PACKET transfer flag, indicating that bulk OUT transfers
 1378  * should always terminate with a short packet, even if it means adding an
 1379  * extra zero length packet.
 1380  *
 1381  * Control URBs must provide a valid pointer in the setup_packet field.
 1382  * Unlike the transfer_buffer, the setup_packet may not be mapped for DMA
 1383  * beforehand.
 1384  *
 1385  * Interrupt URBs must provide an interval, saying how often (in milliseconds
 1386  * or, for highspeed devices, 125 microsecond units)
 1387  * to poll for transfers.  After the URB has been submitted, the interval
 1388  * field reflects how the transfer was actually scheduled.
 1389  * The polling interval may be more frequent than requested.
 1390  * For example, some controllers have a maximum interval of 32 milliseconds,
 1391  * while others support intervals of up to 1024 milliseconds.
 1392  * Isochronous URBs also have transfer intervals.  (Note that for isochronous
 1393  * endpoints, as well as high speed interrupt endpoints, the encoding of
 1394  * the transfer interval in the endpoint descriptor is logarithmic.
 1395  * Device drivers must convert that value to linear units themselves.)
 1396  *
 1397  * If an isochronous endpoint queue isn't already running, the host
 1398  * controller will schedule a new URB to start as soon as bandwidth
 1399  * utilization allows.  If the queue is running then a new URB will be
 1400  * scheduled to start in the first transfer slot following the end of the
 1401  * preceding URB, if that slot has not already expired.  If the slot has
 1402  * expired (which can happen when IRQ delivery is delayed for a long time),
 1403  * the scheduling behavior depends on the URB_ISO_ASAP flag.  If the flag
 1404  * is clear then the URB will be scheduled to start in the expired slot,
 1405  * implying that some of its packets will not be transferred; if the flag
 1406  * is set then the URB will be scheduled in the first unexpired slot,
 1407  * breaking the queue's synchronization.  Upon URB completion, the
 1408  * start_frame field will be set to the (micro)frame number in which the
 1409  * transfer was scheduled.  Ranges for frame counter values are HC-specific
 1410  * and can go from as low as 256 to as high as 65536 frames.
 1411  *
 1412  * Isochronous URBs have a different data transfer model, in part because
 1413  * the quality of service is only "best effort".  Callers provide specially
 1414  * allocated URBs, with number_of_packets worth of iso_frame_desc structures
 1415  * at the end.  Each such packet is an individual ISO transfer.  Isochronous
 1416  * URBs are normally queued, submitted by drivers to arrange that
 1417  * transfers are at least double buffered, and then explicitly resubmitted
 1418  * in completion handlers, so
 1419  * that data (such as audio or video) streams at as constant a rate as the
 1420  * host controller scheduler can support.
 1421  *
 1422  * Completion Callbacks:
 1423  *
 1424  * The completion callback is made in_interrupt(), and one of the first
 1425  * things that a completion handler should do is check the status field.
 1426  * The status field is provided for all URBs.  It is used to report
 1427  * unlinked URBs, and status for all non-ISO transfers.  It should not
 1428  * be examined before the URB is returned to the completion handler.
 1429  *
 1430  * The context field is normally used to link URBs back to the relevant
 1431  * driver or request state.
 1432  *
 1433  * When the completion callback is invoked for non-isochronous URBs, the
 1434  * actual_length field tells how many bytes were transferred.  This field
 1435  * is updated even when the URB terminated with an error or was unlinked.
 1436  *
 1437  * ISO transfer status is reported in the status and actual_length fields
 1438  * of the iso_frame_desc array, and the number of errors is reported in
 1439  * error_count.  Completion callbacks for ISO transfers will normally
 1440  * (re)submit URBs to ensure a constant transfer rate.
 1441  *
 1442  * Note that even fields marked "public" should not be touched by the driver
 1443  * when the urb is owned by the hcd, that is, since the call to
 1444  * usb_submit_urb() till the entry into the completion routine.
 1445  */
 1446 struct urb {
 1447 	/* private: usb core and host controller only fields in the urb */
 1448 	struct kref kref;		/* reference count of the URB */
 1449 	void *hcpriv;			/* private data for host controller */
 1450 	atomic_t use_count;		/* concurrent submissions counter */
 1451 	atomic_t reject;		/* submissions will fail */
 1452 	int unlinked;			/* unlink error code */
 1453 
 1454 	/* public: documented fields in the urb that can be used by drivers */
 1455 	struct list_head urb_list;	/* list head for use by the urb's
 1456 					 * current owner */
 1457 	struct list_head anchor_list;	/* the URB may be anchored */
 1458 	struct usb_anchor *anchor;
 1459 	struct usb_device *dev;		/* (in) pointer to associated device */
 1460 	struct usb_host_endpoint *ep;	/* (internal) pointer to endpoint */
 1461 	unsigned int pipe;		/* (in) pipe information */
 1462 	unsigned int stream_id;		/* (in) stream ID */
 1463 	int status;			/* (return) non-ISO status */
 1464 	unsigned int transfer_flags;	/* (in) URB_SHORT_NOT_OK | ...*/
 1465 	void *transfer_buffer;		/* (in) associated data buffer */
 1466 	dma_addr_t transfer_dma;	/* (in) dma addr for transfer_buffer */
 1467 	struct scatterlist *sg;		/* (in) scatter gather buffer list */
 1468 	int num_mapped_sgs;		/* (internal) mapped sg entries */
 1469 	int num_sgs;			/* (in) number of entries in the sg list */
 1470 	u32 transfer_buffer_length;	/* (in) data buffer length */
 1471 	u32 actual_length;		/* (return) actual transfer length */
 1472 	unsigned char *setup_packet;	/* (in) setup packet (control only) */
 1473 	dma_addr_t setup_dma;		/* (in) dma addr for setup_packet */
 1474 	int start_frame;		/* (modify) start frame (ISO) */
 1475 	int number_of_packets;		/* (in) number of ISO packets */
 1476 	int interval;			/* (modify) transfer interval
 1477 					 * (INT/ISO) */
 1478 	int error_count;		/* (return) number of ISO errors */
 1479 	void *context;			/* (in) context for completion */
 1480 	usb_complete_t complete;	/* (in) completion routine */
 1481 	struct usb_iso_packet_descriptor iso_frame_desc[0];
 1482 					/* (in) ISO ONLY */
 1483 };
 1484 
 1485 /* ----------------------------------------------------------------------- */
 1486 
 1487 /**
 1488  * usb_fill_control_urb - initializes a control urb
 1489  * @urb: pointer to the urb to initialize.
 1490  * @dev: pointer to the struct usb_device for this urb.
 1491  * @pipe: the endpoint pipe
 1492  * @setup_packet: pointer to the setup_packet buffer
 1493  * @transfer_buffer: pointer to the transfer buffer
 1494  * @buffer_length: length of the transfer buffer
 1495  * @complete_fn: pointer to the usb_complete_t function
 1496  * @context: what to set the urb context to.
 1497  *
 1498  * Initializes a control urb with the proper information needed to submit
 1499  * it to a device.
 1500  */
 1501 static inline void usb_fill_control_urb(struct urb *urb,
 1502 					struct usb_device *dev,
 1503 					unsigned int pipe,
 1504 					unsigned char *setup_packet,
 1505 					void *transfer_buffer,
 1506 					int buffer_length,
 1507 					usb_complete_t complete_fn,
 1508 					void *context)
 1509 {
 1510 	urb->dev = dev;
 1511 	urb->pipe = pipe;
 1512 	urb->setup_packet = setup_packet;
 1513 	urb->transfer_buffer = transfer_buffer;
 1514 	urb->transfer_buffer_length = buffer_length;
 1515 	urb->complete = complete_fn;
 1516 	urb->context = context;
 1517 }
 1518 
 1519 /**
 1520  * usb_fill_bulk_urb - macro to help initialize a bulk urb
 1521  * @urb: pointer to the urb to initialize.
 1522  * @dev: pointer to the struct usb_device for this urb.
 1523  * @pipe: the endpoint pipe
 1524  * @transfer_buffer: pointer to the transfer buffer
 1525  * @buffer_length: length of the transfer buffer
 1526  * @complete_fn: pointer to the usb_complete_t function
 1527  * @context: what to set the urb context to.
 1528  *
 1529  * Initializes a bulk urb with the proper information needed to submit it
 1530  * to a device.
 1531  */
 1532 static inline void usb_fill_bulk_urb(struct urb *urb,
 1533 				     struct usb_device *dev,
 1534 				     unsigned int pipe,
 1535 				     void *transfer_buffer,
 1536 				     int buffer_length,
 1537 				     usb_complete_t complete_fn,
 1538 				     void *context)
 1539 {
 1540 	urb->dev = dev;
 1541 	urb->pipe = pipe;
 1542 	urb->transfer_buffer = transfer_buffer;
 1543 	urb->transfer_buffer_length = buffer_length;
 1544 	urb->complete = complete_fn;
 1545 	urb->context = context;
 1546 }
 1547 
 1548 /**
 1549  * usb_fill_int_urb - macro to help initialize a interrupt urb
 1550  * @urb: pointer to the urb to initialize.
 1551  * @dev: pointer to the struct usb_device for this urb.
 1552  * @pipe: the endpoint pipe
 1553  * @transfer_buffer: pointer to the transfer buffer
 1554  * @buffer_length: length of the transfer buffer
 1555  * @complete_fn: pointer to the usb_complete_t function
 1556  * @context: what to set the urb context to.
 1557  * @interval: what to set the urb interval to, encoded like
 1558  *	the endpoint descriptor's bInterval value.
 1559  *
 1560  * Initializes a interrupt urb with the proper information needed to submit
 1561  * it to a device.
 1562  *
 1563  * Note that High Speed and SuperSpeed interrupt endpoints use a logarithmic
 1564  * encoding of the endpoint interval, and express polling intervals in
 1565  * microframes (eight per millisecond) rather than in frames (one per
 1566  * millisecond).
 1567  *
 1568  * Wireless USB also uses the logarithmic encoding, but specifies it in units of
 1569  * 128us instead of 125us.  For Wireless USB devices, the interval is passed
 1570  * through to the host controller, rather than being translated into microframe
 1571  * units.
 1572  */
 1573 static inline void usb_fill_int_urb(struct urb *urb,
 1574 				    struct usb_device *dev,
 1575 				    unsigned int pipe,
 1576 				    void *transfer_buffer,
 1577 				    int buffer_length,
 1578 				    usb_complete_t complete_fn,
 1579 				    void *context,
 1580 				    int interval)
 1581 {
 1582 	urb->dev = dev;
 1583 	urb->pipe = pipe;
 1584 	urb->transfer_buffer = transfer_buffer;
 1585 	urb->transfer_buffer_length = buffer_length;
 1586 	urb->complete = complete_fn;
 1587 	urb->context = context;
 1588 
 1589 	if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) {
 1590 		/* make sure interval is within allowed range */
 1591 		interval = clamp(interval, 1, 16);
 1592 
 1593 		urb->interval = 1 << (interval - 1);
 1594 	} else {
 1595 		urb->interval = interval;
 1596 	}
 1597 
 1598 	urb->start_frame = -1;
 1599 }
 1600 
 1601 extern void usb_init_urb(struct urb *urb);
 1602 extern struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags);
 1603 extern void usb_free_urb(struct urb *urb);
 1604 #define usb_put_urb usb_free_urb
 1605 extern struct urb *usb_get_urb(struct urb *urb);
 1606 extern int usb_submit_urb(struct urb *urb, gfp_t mem_flags);
 1607 extern int usb_unlink_urb(struct urb *urb);
 1608 extern void usb_kill_urb(struct urb *urb);
 1609 extern void usb_poison_urb(struct urb *urb);
 1610 extern void usb_unpoison_urb(struct urb *urb);
 1611 extern void usb_block_urb(struct urb *urb);
 1612 extern void usb_kill_anchored_urbs(struct usb_anchor *anchor);
 1613 extern void usb_poison_anchored_urbs(struct usb_anchor *anchor);
 1614 extern void usb_unpoison_anchored_urbs(struct usb_anchor *anchor);
 1615 extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor);
 1616 extern void usb_anchor_suspend_wakeups(struct usb_anchor *anchor);
 1617 extern void usb_anchor_resume_wakeups(struct usb_anchor *anchor);
 1618 extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor);
 1619 extern void usb_unanchor_urb(struct urb *urb);
 1620 extern int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
 1621 					 unsigned int timeout);
 1622 extern struct urb *usb_get_from_anchor(struct usb_anchor *anchor);
 1623 extern void usb_scuttle_anchored_urbs(struct usb_anchor *anchor);
 1624 extern int usb_anchor_empty(struct usb_anchor *anchor);
 1625 
 1626 #define usb_unblock_urb	usb_unpoison_urb
 1627 
 1628 /**
 1629  * usb_urb_dir_in - check if an URB describes an IN transfer
 1630  * @urb: URB to be checked
 1631  *
 1632  * Return: 1 if @urb describes an IN transfer (device-to-host),
 1633  * otherwise 0.
 1634  */
 1635 static inline int usb_urb_dir_in(struct urb *urb)
 1636 {
 1637 	return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_IN;
 1638 }
 1639 
 1640 /**
 1641  * usb_urb_dir_out - check if an URB describes an OUT transfer
 1642  * @urb: URB to be checked
 1643  *
 1644  * Return: 1 if @urb describes an OUT transfer (host-to-device),
 1645  * otherwise 0.
 1646  */
 1647 static inline int usb_urb_dir_out(struct urb *urb)
 1648 {
 1649 	return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_OUT;
 1650 }
 1651 
 1652 void *usb_alloc_coherent(struct usb_device *dev, size_t size,
 1653 	gfp_t mem_flags, dma_addr_t *dma);
 1654 void usb_free_coherent(struct usb_device *dev, size_t size,
 1655 	void *addr, dma_addr_t dma);
 1656 
 1657 #if 0
 1658 struct urb *usb_buffer_map(struct urb *urb);
 1659 void usb_buffer_dmasync(struct urb *urb);
 1660 void usb_buffer_unmap(struct urb *urb);
 1661 #endif
 1662 
 1663 struct scatterlist;
 1664 int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
 1665 		      struct scatterlist *sg, int nents);
 1666 #if 0
 1667 void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
 1668 			   struct scatterlist *sg, int n_hw_ents);
 1669 #endif
 1670 void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
 1671 			 struct scatterlist *sg, int n_hw_ents);
 1672 
 1673 /*-------------------------------------------------------------------*
 1674  *                         SYNCHRONOUS CALL SUPPORT                  *
 1675  *-------------------------------------------------------------------*/
 1676 
 1677 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
 1678 	__u8 request, __u8 requesttype, __u16 value, __u16 index,
 1679 	void *data, __u16 size, int timeout);
 1680 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
 1681 	void *data, int len, int *actual_length, int timeout);
 1682 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
 1683 	void *data, int len, int *actual_length,
 1684 	int timeout);
 1685 
 1686 /* wrappers around usb_control_msg() for the most common standard requests */
 1687 extern int usb_get_descriptor(struct usb_device *dev, unsigned char desctype,
 1688 	unsigned char descindex, void *buf, int size);
 1689 extern int usb_get_status(struct usb_device *dev,
 1690 	int type, int target, void *data);
 1691 extern int usb_string(struct usb_device *dev, int index,
 1692 	char *buf, size_t size);
 1693 
 1694 /* wrappers that also update important state inside usbcore */
 1695 extern int usb_clear_halt(struct usb_device *dev, int pipe);
 1696 extern int usb_reset_configuration(struct usb_device *dev);
 1697 extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate);
 1698 extern void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr);
 1699 
 1700 /* this request isn't really synchronous, but it belongs with the others */
 1701 extern int usb_driver_set_configuration(struct usb_device *udev, int config);
 1702 
 1703 /* choose and set configuration for device */
 1704 extern int usb_choose_configuration(struct usb_device *udev);
 1705 extern int usb_set_configuration(struct usb_device *dev, int configuration);
 1706 
 1707 /*
 1708  * timeouts, in milliseconds, used for sending/receiving control messages
 1709  * they typically complete within a few frames (msec) after they're issued
 1710  * USB identifies 5 second timeouts, maybe more in a few cases, and a few
 1711  * slow devices (like some MGE Ellipse UPSes) actually push that limit.
 1712  */
 1713 #define USB_CTRL_GET_TIMEOUT	5000
 1714 #define USB_CTRL_SET_TIMEOUT	5000
 1715 
 1716 
 1717 /**
 1718  * struct usb_sg_request - support for scatter/gather I/O
 1719  * @status: zero indicates success, else negative errno
 1720  * @bytes: counts bytes transferred.
 1721  *
 1722  * These requests are initialized using usb_sg_init(), and then are used
 1723  * as request handles passed to usb_sg_wait() or usb_sg_cancel().  Most
 1724  * members of the request object aren't for driver access.
 1725  *
 1726  * The status and bytecount values are valid only after usb_sg_wait()
 1727  * returns.  If the status is zero, then the bytecount matches the total
 1728  * from the request.
 1729  *
 1730  * After an error completion, drivers may need to clear a halt condition
 1731  * on the endpoint.
 1732  */
 1733 struct usb_sg_request {
 1734 	int			status;
 1735 	size_t			bytes;
 1736 
 1737 	/* private:
 1738 	 * members below are private to usbcore,
 1739 	 * and are not provided for driver access!
 1740 	 */
 1741 	spinlock_t		lock;
 1742 
 1743 	struct usb_device	*dev;
 1744 	int			pipe;
 1745 
 1746 	int			entries;
 1747 	struct urb		**urbs;
 1748 
 1749 	int			count;
 1750 	struct completion	complete;
 1751 };
 1752 
 1753 int usb_sg_init(
 1754 	struct usb_sg_request	*io,
 1755 	struct usb_device	*dev,
 1756 	unsigned		pipe,
 1757 	unsigned		period,
 1758 	struct scatterlist	*sg,
 1759 	int			nents,
 1760 	size_t			length,
 1761 	gfp_t			mem_flags
 1762 );
 1763 void usb_sg_cancel(struct usb_sg_request *io);
 1764 void usb_sg_wait(struct usb_sg_request *io);
 1765 
 1766 
 1767 /* ----------------------------------------------------------------------- */
 1768 
 1769 /*
 1770  * For various legacy reasons, Linux has a small cookie that's paired with
 1771  * a struct usb_device to identify an endpoint queue.  Queue characteristics
 1772  * are defined by the endpoint's descriptor.  This cookie is called a "pipe",
 1773  * an unsigned int encoded as:
 1774  *
 1775  *  - direction:	bit 7		(0 = Host-to-Device [Out],
 1776  *					 1 = Device-to-Host [In] ...
 1777  *					like endpoint bEndpointAddress)
 1778  *  - device address:	bits 8-14       ... bit positions known to uhci-hcd
 1779  *  - endpoint:		bits 15-18      ... bit positions known to uhci-hcd
 1780  *  - pipe type:	bits 30-31	(00 = isochronous, 01 = interrupt,
 1781  *					 10 = control, 11 = bulk)
 1782  *
 1783  * Given the device address and endpoint descriptor, pipes are redundant.
 1784  */
 1785 
 1786 /* NOTE:  these are not the standard USB_ENDPOINT_XFER_* values!! */
 1787 /* (yet ... they're the values used by usbfs) */
 1788 #define PIPE_ISOCHRONOUS		0
 1789 #define PIPE_INTERRUPT			1
 1790 #define PIPE_CONTROL			2
 1791 #define PIPE_BULK			3
 1792 
 1793 #define usb_pipein(pipe)	((pipe) & USB_DIR_IN)
 1794 #define usb_pipeout(pipe)	(!usb_pipein(pipe))
 1795 
 1796 #define usb_pipedevice(pipe)	(((pipe) >> 8) & 0x7f)
 1797 #define usb_pipeendpoint(pipe)	(((pipe) >> 15) & 0xf)
 1798 
 1799 #define usb_pipetype(pipe)	(((pipe) >> 30) & 3)
 1800 #define usb_pipeisoc(pipe)	(usb_pipetype((pipe)) == PIPE_ISOCHRONOUS)
 1801 #define usb_pipeint(pipe)	(usb_pipetype((pipe)) == PIPE_INTERRUPT)
 1802 #define usb_pipecontrol(pipe)	(usb_pipetype((pipe)) == PIPE_CONTROL)
 1803 #define usb_pipebulk(pipe)	(usb_pipetype((pipe)) == PIPE_BULK)
 1804 
 1805 static inline unsigned int __create_pipe(struct usb_device *dev,
 1806 		unsigned int endpoint)
 1807 {
 1808 	return (dev->devnum << 8) | (endpoint << 15);
 1809 }
 1810 
 1811 /* Create various pipes... */
 1812 #define usb_sndctrlpipe(dev, endpoint)	\
 1813 	((PIPE_CONTROL << 30) | __create_pipe(dev, endpoint))
 1814 #define usb_rcvctrlpipe(dev, endpoint)	\
 1815 	((PIPE_CONTROL << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
 1816 #define usb_sndisocpipe(dev, endpoint)	\
 1817 	((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev, endpoint))
 1818 #define usb_rcvisocpipe(dev, endpoint)	\
 1819 	((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
 1820 #define usb_sndbulkpipe(dev, endpoint)	\
 1821 	((PIPE_BULK << 30) | __create_pipe(dev, endpoint))
 1822 #define usb_rcvbulkpipe(dev, endpoint)	\
 1823 	((PIPE_BULK << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
 1824 #define usb_sndintpipe(dev, endpoint)	\
 1825 	((PIPE_INTERRUPT << 30) | __create_pipe(dev, endpoint))
 1826 #define usb_rcvintpipe(dev, endpoint)	\
 1827 	((PIPE_INTERRUPT << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
 1828 
 1829 static inline struct usb_host_endpoint *
 1830 usb_pipe_endpoint(struct usb_device *dev, unsigned int pipe)
 1831 {
 1832 	struct usb_host_endpoint **eps;
 1833 	eps = usb_pipein(pipe) ? dev->ep_in : dev->ep_out;
 1834 	return eps[usb_pipeendpoint(pipe)];
 1835 }
 1836 
 1837 /*-------------------------------------------------------------------------*/
 1838 
 1839 static inline __u16
 1840 usb_maxpacket(struct usb_device *udev, int pipe, int is_out)
 1841 {
 1842 	struct usb_host_endpoint	*ep;
 1843 	unsigned			epnum = usb_pipeendpoint(pipe);
 1844 
 1845 	if (is_out) {
 1846 		WARN_ON(usb_pipein(pipe));
 1847 		ep = udev->ep_out[epnum];
 1848 	} else {
 1849 		WARN_ON(usb_pipeout(pipe));
 1850 		ep = udev->ep_in[epnum];
 1851 	}
 1852 	if (!ep)
 1853 		return 0;
 1854 
 1855 	/* NOTE:  only 0x07ff bits are for packet size... */
 1856 	return usb_endpoint_maxp(&ep->desc);
 1857 }
 1858 
 1859 /* ----------------------------------------------------------------------- */
 1860 
 1861 /* translate USB error codes to codes user space understands */
 1862 static inline int usb_translate_errors(int error_code)
 1863 {
 1864 	switch (error_code) {
 1865 	case 0:
 1866 	case -ENOMEM:
 1867 	case -ENODEV:
 1868 	case -EOPNOTSUPP:
 1869 		return error_code;
 1870 	default:
 1871 		return -EIO;
 1872 	}
 1873 }
 1874 
 1875 /* Events from the usb core */
 1876 #define USB_DEVICE_ADD		0x0001
 1877 #define USB_DEVICE_REMOVE	0x0002
 1878 #define USB_BUS_ADD		0x0003
 1879 #define USB_BUS_REMOVE		0x0004
 1880 extern void usb_register_notify(struct notifier_block *nb);
 1881 extern void usb_unregister_notify(struct notifier_block *nb);
 1882 
 1883 /* debugfs stuff */
 1884 extern struct dentry *usb_debug_root;
 1885 
 1886 /* LED triggers */
 1887 enum usb_led_event {
 1888 	USB_LED_EVENT_HOST = 0,
 1889 	USB_LED_EVENT_GADGET = 1,
 1890 };
 1891 
 1892 #ifdef CONFIG_USB_LED_TRIG
 1893 extern void usb_led_activity(enum usb_led_event ev);
 1894 #else
 1895 static inline void usb_led_activity(enum usb_led_event ev) {}
 1896 #endif
 1897 
 1898 #endif  /* __KERNEL__ */
 1899 
 1900 #endif                 1 /*
    2  * LIRC base driver
    3  *
    4  * by Artur Lipowski <alipowski@interia.pl>
    5  *        This code is licensed under GNU GPL
    6  *
    7  */
    8 
    9 #ifndef _LINUX_LIRC_DEV_H
   10 #define _LINUX_LIRC_DEV_H
   11 
   12 #define MAX_IRCTL_DEVICES 8
   13 #define BUFLEN            16
   14 
   15 #define mod(n, div) ((n) % (div))
   16 
   17 #include <linux/slab.h>
   18 #include <linux/fs.h>
   19 #include <linux/ioctl.h>
   20 #include <linux/poll.h>
   21 #include <linux/kfifo.h>
   22 #include <media/lirc.h>
   23 
   24 struct lirc_buffer {
   25 	wait_queue_head_t wait_poll;
   26 	spinlock_t fifo_lock;
   27 	unsigned int chunk_size;
   28 	unsigned int size; /* in chunks */
   29 	/* Using chunks instead of bytes pretends to simplify boundary checking
   30 	 * And should allow for some performance fine tunning later */
   31 	struct kfifo fifo;
   32 };
   33 
   34 static inline void lirc_buffer_clear(struct lirc_buffer *buf)
   35 {
   36 	unsigned long flags;
   37 
   38 	if (kfifo_initialized(&buf->fifo)) {
   39 		spin_lock_irqsave(&buf->fifo_lock, flags);
   40 		kfifo_reset(&buf->fifo);
   41 		spin_unlock_irqrestore(&buf->fifo_lock, flags);
   42 	} else
   43 		WARN(1, "calling %s on an uninitialized lirc_buffer\n",
   44 		     __func__);
   45 }
   46 
   47 static inline int lirc_buffer_init(struct lirc_buffer *buf,
   48 				    unsigned int chunk_size,
   49 				    unsigned int size)
   50 {
   51 	int ret;
   52 
   53 	init_waitqueue_head(&buf->wait_poll);
   54 	spin_lock_init(&buf->fifo_lock);
   55 	buf->chunk_size = chunk_size;
   56 	buf->size = size;
   57 	ret = kfifo_alloc(&buf->fifo, size * chunk_size, GFP_KERNEL);
   58 
   59 	return ret;
   60 }
   61 
   62 static inline void lirc_buffer_free(struct lirc_buffer *buf)
   63 {
   64 	if (kfifo_initialized(&buf->fifo)) {
   65 		kfifo_free(&buf->fifo);
   66 	} else
   67 		WARN(1, "calling %s on an uninitialized lirc_buffer\n",
   68 		     __func__);
   69 }
   70 
   71 static inline int lirc_buffer_len(struct lirc_buffer *buf)
   72 {
   73 	int len;
   74 	unsigned long flags;
   75 
   76 	spin_lock_irqsave(&buf->fifo_lock, flags);
   77 	len = kfifo_len(&buf->fifo);
   78 	spin_unlock_irqrestore(&buf->fifo_lock, flags);
   79 
   80 	return len;
   81 }
   82 
   83 static inline int lirc_buffer_full(struct lirc_buffer *buf)
   84 {
   85 	return lirc_buffer_len(buf) == buf->size * buf->chunk_size;
   86 }
   87 
   88 static inline int lirc_buffer_empty(struct lirc_buffer *buf)
   89 {
   90 	return !lirc_buffer_len(buf);
   91 }
   92 
   93 static inline int lirc_buffer_available(struct lirc_buffer *buf)
   94 {
   95 	return buf->size - (lirc_buffer_len(buf) / buf->chunk_size);
   96 }
   97 
   98 static inline unsigned int lirc_buffer_read(struct lirc_buffer *buf,
   99 					    unsigned char *dest)
  100 {
  101 	unsigned int ret = 0;
  102 
  103 	if (lirc_buffer_len(buf) >= buf->chunk_size)
  104 		ret = kfifo_out_locked(&buf->fifo, dest, buf->chunk_size,
  105 				       &buf->fifo_lock);
  106 	return ret;
  107 
  108 }
  109 
  110 static inline unsigned int lirc_buffer_write(struct lirc_buffer *buf,
  111 					     unsigned char *orig)
  112 {
  113 	unsigned int ret;
  114 
  115 	ret = kfifo_in_locked(&buf->fifo, orig, buf->chunk_size,
  116 			      &buf->fifo_lock);
  117 
  118 	return ret;
  119 }
  120 
  121 struct lirc_driver {
  122 	char name[40];
  123 	int minor;
  124 	__u32 code_length;
  125 	unsigned int buffer_size; /* in chunks holding one code each */
  126 	int sample_rate;
  127 	__u32 features;
  128 
  129 	unsigned int chunk_size;
  130 
  131 	void *data;
  132 	int min_timeout;
  133 	int max_timeout;
  134 	int (*add_to_buf) (void *data, struct lirc_buffer *buf);
  135 	struct lirc_buffer *rbuf;
  136 	int (*set_use_inc) (void *data);
  137 	void (*set_use_dec) (void *data);
  138 	struct rc_dev *rdev;
  139 	const struct file_operations *fops;
  140 	struct device *dev;
  141 	struct module *owner;
  142 };
  143 
  144 /* name:
  145  * this string will be used for logs
  146  *
  147  * minor:
  148  * indicates minor device (/dev/lirc) number for registered driver
  149  * if caller fills it with negative value, then the first free minor
  150  * number will be used (if available)
  151  *
  152  * code_length:
  153  * length of the remote control key code expressed in bits
  154  *
  155  * sample_rate:
  156  *
  157  * data:
  158  * it may point to any driver data and this pointer will be passed to
  159  * all callback functions
  160  *
  161  * add_to_buf:
  162  * add_to_buf will be called after specified period of the time or
  163  * triggered by the external event, this behavior depends on value of
  164  * the sample_rate this function will be called in user context. This
  165  * routine should return 0 if data was added to the buffer and
  166  * -ENODATA if none was available. This should add some number of bits
  167  * evenly divisible by code_length to the buffer
  168  *
  169  * rbuf:
  170  * if not NULL, it will be used as a read buffer, you will have to
  171  * write to the buffer by other means, like irq's (see also
  172  * lirc_serial.c).
  173  *
  174  * set_use_inc:
  175  * set_use_inc will be called after device is opened
  176  *
  177  * set_use_dec:
  178  * set_use_dec will be called after device is closed
  179  *
  180  * fops:
  181  * file_operations for drivers which don't fit the current driver model.
  182  *
  183  * Some ioctl's can be directly handled by lirc_dev if the driver's
  184  * ioctl function is NULL or if it returns -ENOIOCTLCMD (see also
  185  * lirc_serial.c).
  186  *
  187  * owner:
  188  * the module owning this struct
  189  *
  190  */
  191 
  192 
  193 /* following functions can be called ONLY from user context
  194  *
  195  * returns negative value on error or minor number
  196  * of the registered device if success
  197  * contents of the structure pointed by p is copied
  198  */
  199 extern int lirc_register_driver(struct lirc_driver *d);
  200 
  201 /* returns negative value on error or 0 if success
  202 */
  203 extern int lirc_unregister_driver(int minor);
  204 
  205 /* Returns the private data stored in the lirc_driver
  206  * associated with the given device file pointer.
  207  */
  208 void *lirc_get_pdata(struct file *file);
  209 
  210 /* default file operations
  211  * used by drivers if they override only some operations
  212  */
  213 int lirc_dev_fop_open(struct inode *inode, struct file *file);
  214 int lirc_dev_fop_close(struct inode *inode, struct file *file);
  215 unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait);
  216 long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
  217 ssize_t lirc_dev_fop_read(struct file *file, char __user *buffer, size_t length,
  218 			  loff_t *ppos);
  219 ssize_t lirc_dev_fop_write(struct file *file, const char __user *buffer,
  220 			   size_t length, loff_t *ppos);
  221 
  222 #endif                 1 /*
    2  * This file holds USB constants and structures that are needed for
    3  * USB device APIs.  These are used by the USB device model, which is
    4  * defined in chapter 9 of the USB 2.0 specification and in the
    5  * Wireless USB 1.0 (spread around).  Linux has several APIs in C that
    6  * need these:
    7  *
    8  * - the master/host side Linux-USB kernel driver API;
    9  * - the "usbfs" user space API; and
   10  * - the Linux "gadget" slave/device/peripheral side driver API.
   11  *
   12  * USB 2.0 adds an additional "On The Go" (OTG) mode, which lets systems
   13  * act either as a USB master/host or as a USB slave/device.  That means
   14  * the master and slave side APIs benefit from working well together.
   15  *
   16  * There's also "Wireless USB", using low power short range radios for
   17  * peripheral interconnection but otherwise building on the USB framework.
   18  *
   19  * Note all descriptors are declared '__attribute__((packed))' so that:
   20  *
   21  * [a] they never get padded, either internally (USB spec writers
   22  *     probably handled that) or externally;
   23  *
   24  * [b] so that accessing bigger-than-a-bytes fields will never
   25  *     generate bus errors on any platform, even when the location of
   26  *     its descriptor inside a bundle isn't "naturally aligned", and
   27  *
   28  * [c] for consistency, removing all doubt even when it appears to
   29  *     someone that the two other points are non-issues for that
   30  *     particular descriptor type.
   31  */
   32 
   33 #ifndef _UAPI__LINUX_USB_CH9_H
   34 #define _UAPI__LINUX_USB_CH9_H
   35 
   36 #include <linux/types.h>	/* __u8 etc */
   37 #include <asm/byteorder.h>	/* le16_to_cpu */
   38 
   39 /*-------------------------------------------------------------------------*/
   40 
   41 /* CONTROL REQUEST SUPPORT */
   42 
   43 /*
   44  * USB directions
   45  *
   46  * This bit flag is used in endpoint descriptors' bEndpointAddress field.
   47  * It's also one of three fields in control requests bRequestType.
   48  */
   49 #define USB_DIR_OUT			0		/* to device */
   50 #define USB_DIR_IN			0x80		/* to host */
   51 
   52 /*
   53  * USB types, the second of three bRequestType fields
   54  */
   55 #define USB_TYPE_MASK			(0x03 << 5)
   56 #define USB_TYPE_STANDARD		(0x00 << 5)
   57 #define USB_TYPE_CLASS			(0x01 << 5)
   58 #define USB_TYPE_VENDOR			(0x02 << 5)
   59 #define USB_TYPE_RESERVED		(0x03 << 5)
   60 
   61 /*
   62  * USB recipients, the third of three bRequestType fields
   63  */
   64 #define USB_RECIP_MASK			0x1f
   65 #define USB_RECIP_DEVICE		0x00
   66 #define USB_RECIP_INTERFACE		0x01
   67 #define USB_RECIP_ENDPOINT		0x02
   68 #define USB_RECIP_OTHER			0x03
   69 /* From Wireless USB 1.0 */
   70 #define USB_RECIP_PORT			0x04
   71 #define USB_RECIP_RPIPE		0x05
   72 
   73 /*
   74  * Standard requests, for the bRequest field of a SETUP packet.
   75  *
   76  * These are qualified by the bRequestType field, so that for example
   77  * TYPE_CLASS or TYPE_VENDOR specific feature flags could be retrieved
   78  * by a GET_STATUS request.
   79  */
   80 #define USB_REQ_GET_STATUS		0x00
   81 #define USB_REQ_CLEAR_FEATURE		0x01
   82 #define USB_REQ_SET_FEATURE		0x03
   83 #define USB_REQ_SET_ADDRESS		0x05
   84 #define USB_REQ_GET_DESCRIPTOR		0x06
   85 #define USB_REQ_SET_DESCRIPTOR		0x07
   86 #define USB_REQ_GET_CONFIGURATION	0x08
   87 #define USB_REQ_SET_CONFIGURATION	0x09
   88 #define USB_REQ_GET_INTERFACE		0x0A
   89 #define USB_REQ_SET_INTERFACE		0x0B
   90 #define USB_REQ_SYNCH_FRAME		0x0C
   91 #define USB_REQ_SET_SEL			0x30
   92 #define USB_REQ_SET_ISOCH_DELAY		0x31
   93 
   94 #define USB_REQ_SET_ENCRYPTION		0x0D	/* Wireless USB */
   95 #define USB_REQ_GET_ENCRYPTION		0x0E
   96 #define USB_REQ_RPIPE_ABORT		0x0E
   97 #define USB_REQ_SET_HANDSHAKE		0x0F
   98 #define USB_REQ_RPIPE_RESET		0x0F
   99 #define USB_REQ_GET_HANDSHAKE		0x10
  100 #define USB_REQ_SET_CONNECTION		0x11
  101 #define USB_REQ_SET_SECURITY_DATA	0x12
  102 #define USB_REQ_GET_SECURITY_DATA	0x13
  103 #define USB_REQ_SET_WUSB_DATA		0x14
  104 #define USB_REQ_LOOPBACK_DATA_WRITE	0x15
  105 #define USB_REQ_LOOPBACK_DATA_READ	0x16
  106 #define USB_REQ_SET_INTERFACE_DS	0x17
  107 
  108 /* The Link Power Management (LPM) ECN defines USB_REQ_TEST_AND_SET command,
  109  * used by hubs to put ports into a new L1 suspend state, except that it
  110  * forgot to define its number ...
  111  */
  112 
  113 /*
  114  * USB feature flags are written using USB_REQ_{CLEAR,SET}_FEATURE, and
  115  * are read as a bit array returned by USB_REQ_GET_STATUS.  (So there
  116  * are at most sixteen features of each type.)  Hubs may also support a
  117  * new USB_REQ_TEST_AND_SET_FEATURE to put ports into L1 suspend.
  118  */
  119 #define USB_DEVICE_SELF_POWERED		0	/* (read only) */
  120 #define USB_DEVICE_REMOTE_WAKEUP	1	/* dev may initiate wakeup */
  121 #define USB_DEVICE_TEST_MODE		2	/* (wired high speed only) */
  122 #define USB_DEVICE_BATTERY		2	/* (wireless) */
  123 #define USB_DEVICE_B_HNP_ENABLE		3	/* (otg) dev may initiate HNP */
  124 #define USB_DEVICE_WUSB_DEVICE		3	/* (wireless)*/
  125 #define USB_DEVICE_A_HNP_SUPPORT	4	/* (otg) RH port supports HNP */
  126 #define USB_DEVICE_A_ALT_HNP_SUPPORT	5	/* (otg) other RH port does */
  127 #define USB_DEVICE_DEBUG_MODE		6	/* (special devices only) */
  128 
  129 /*
  130  * Test Mode Selectors
  131  * See USB 2.0 spec Table 9-7
  132  */
  133 #define	TEST_J		1
  134 #define	TEST_K		2
  135 #define	TEST_SE0_NAK	3
  136 #define	TEST_PACKET	4
  137 #define	TEST_FORCE_EN	5
  138 
  139 /*
  140  * New Feature Selectors as added by USB 3.0
  141  * See USB 3.0 spec Table 9-7
  142  */
  143 #define USB_DEVICE_U1_ENABLE	48	/* dev may initiate U1 transition */
  144 #define USB_DEVICE_U2_ENABLE	49	/* dev may initiate U2 transition */
  145 #define USB_DEVICE_LTM_ENABLE	50	/* dev may send LTM */
  146 #define USB_INTRF_FUNC_SUSPEND	0	/* function suspend */
  147 
  148 #define USB_INTR_FUNC_SUSPEND_OPT_MASK	0xFF00
  149 /*
  150  * Suspend Options, Table 9-8 USB 3.0 spec
  151  */
  152 #define USB_INTRF_FUNC_SUSPEND_LP	(1 << (8 + 0))
  153 #define USB_INTRF_FUNC_SUSPEND_RW	(1 << (8 + 1))
  154 
  155 /*
  156  * Interface status, Figure 9-5 USB 3.0 spec
  157  */
  158 #define USB_INTRF_STAT_FUNC_RW_CAP     1
  159 #define USB_INTRF_STAT_FUNC_RW         2
  160 
  161 #define USB_ENDPOINT_HALT		0	/* IN/OUT will STALL */
  162 
  163 /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */
  164 #define USB_DEV_STAT_U1_ENABLED		2	/* transition into U1 state */
  165 #define USB_DEV_STAT_U2_ENABLED		3	/* transition into U2 state */
  166 #define USB_DEV_STAT_LTM_ENABLED	4	/* Latency tolerance messages */
  167 
  168 /**
  169  * struct usb_ctrlrequest - SETUP data for a USB device control request
  170  * @bRequestType: matches the USB bmRequestType field
  171  * @bRequest: matches the USB bRequest field
  172  * @wValue: matches the USB wValue field (le16 byte order)
  173  * @wIndex: matches the USB wIndex field (le16 byte order)
  174  * @wLength: matches the USB wLength field (le16 byte order)
  175  *
  176  * This structure is used to send control requests to a USB device.  It matches
  177  * the different fields of the USB 2.0 Spec section 9.3, table 9-2.  See the
  178  * USB spec for a fuller description of the different fields, and what they are
  179  * used for.
  180  *
  181  * Note that the driver for any interface can issue control requests.
  182  * For most devices, interfaces don't coordinate with each other, so
  183  * such requests may be made at any time.
  184  */
  185 struct usb_ctrlrequest {
  186 	__u8 bRequestType;
  187 	__u8 bRequest;
  188 	__le16 wValue;
  189 	__le16 wIndex;
  190 	__le16 wLength;
  191 } __attribute__ ((packed));
  192 
  193 /*-------------------------------------------------------------------------*/
  194 
  195 /*
  196  * STANDARD DESCRIPTORS ... as returned by GET_DESCRIPTOR, or
  197  * (rarely) accepted by SET_DESCRIPTOR.
  198  *
  199  * Note that all multi-byte values here are encoded in little endian
  200  * byte order "on the wire".  Within the kernel and when exposed
  201  * through the Linux-USB APIs, they are not converted to cpu byte
  202  * order; it is the responsibility of the client code to do this.
  203  * The single exception is when device and configuration descriptors (but
  204  * not other descriptors) are read from usbfs (i.e. /proc/bus/usb/BBB/DDD);
  205  * in this case the fields are converted to host endianness by the kernel.
  206  */
  207 
  208 /*
  209  * Descriptor types ... USB 2.0 spec table 9.5
  210  */
  211 #define USB_DT_DEVICE			0x01
  212 #define USB_DT_CONFIG			0x02
  213 #define USB_DT_STRING			0x03
  214 #define USB_DT_INTERFACE		0x04
  215 #define USB_DT_ENDPOINT			0x05
  216 #define USB_DT_DEVICE_QUALIFIER		0x06
  217 #define USB_DT_OTHER_SPEED_CONFIG	0x07
  218 #define USB_DT_INTERFACE_POWER		0x08
  219 /* these are from a minor usb 2.0 revision (ECN) */
  220 #define USB_DT_OTG			0x09
  221 #define USB_DT_DEBUG			0x0a
  222 #define USB_DT_INTERFACE_ASSOCIATION	0x0b
  223 /* these are from the Wireless USB spec */
  224 #define USB_DT_SECURITY			0x0c
  225 #define USB_DT_KEY			0x0d
  226 #define USB_DT_ENCRYPTION_TYPE		0x0e
  227 #define USB_DT_BOS			0x0f
  228 #define USB_DT_DEVICE_CAPABILITY	0x10
  229 #define USB_DT_WIRELESS_ENDPOINT_COMP	0x11
  230 #define USB_DT_WIRE_ADAPTER		0x21
  231 #define USB_DT_RPIPE			0x22
  232 #define USB_DT_CS_RADIO_CONTROL		0x23
  233 /* From the T10 UAS specification */
  234 #define USB_DT_PIPE_USAGE		0x24
  235 /* From the USB 3.0 spec */
  236 #define	USB_DT_SS_ENDPOINT_COMP		0x30
  237 
  238 /* Conventional codes for class-specific descriptors.  The convention is
  239  * defined in the USB "Common Class" Spec (3.11).  Individual class specs
  240  * are authoritative for their usage, not the "common class" writeup.
  241  */
  242 #define USB_DT_CS_DEVICE		(USB_TYPE_CLASS | USB_DT_DEVICE)
  243 #define USB_DT_CS_CONFIG		(USB_TYPE_CLASS | USB_DT_CONFIG)
  244 #define USB_DT_CS_STRING		(USB_TYPE_CLASS | USB_DT_STRING)
  245 #define USB_DT_CS_INTERFACE		(USB_TYPE_CLASS | USB_DT_INTERFACE)
  246 #define USB_DT_CS_ENDPOINT		(USB_TYPE_CLASS | USB_DT_ENDPOINT)
  247 
  248 /* All standard descriptors have these 2 fields at the beginning */
  249 struct usb_descriptor_header {
  250 	__u8  bLength;
  251 	__u8  bDescriptorType;
  252 } __attribute__ ((packed));
  253 
  254 
  255 /*-------------------------------------------------------------------------*/
  256 
  257 /* USB_DT_DEVICE: Device descriptor */
  258 struct usb_device_descriptor {
  259 	__u8  bLength;
  260 	__u8  bDescriptorType;
  261 
  262 	__le16 bcdUSB;
  263 	__u8  bDeviceClass;
  264 	__u8  bDeviceSubClass;
  265 	__u8  bDeviceProtocol;
  266 	__u8  bMaxPacketSize0;
  267 	__le16 idVendor;
  268 	__le16 idProduct;
  269 	__le16 bcdDevice;
  270 	__u8  iManufacturer;
  271 	__u8  iProduct;
  272 	__u8  iSerialNumber;
  273 	__u8  bNumConfigurations;
  274 } __attribute__ ((packed));
  275 
  276 #define USB_DT_DEVICE_SIZE		18
  277 
  278 
  279 /*
  280  * Device and/or Interface Class codes
  281  * as found in bDeviceClass or bInterfaceClass
  282  * and defined by www.usb.org documents
  283  */
  284 #define USB_CLASS_PER_INTERFACE		0	/* for DeviceClass */
  285 #define USB_CLASS_AUDIO			1
  286 #define USB_CLASS_COMM			2
  287 #define USB_CLASS_HID			3
  288 #define USB_CLASS_PHYSICAL		5
  289 #define USB_CLASS_STILL_IMAGE		6
  290 #define USB_CLASS_PRINTER		7
  291 #define USB_CLASS_MASS_STORAGE		8
  292 #define USB_CLASS_HUB			9
  293 #define USB_CLASS_CDC_DATA		0x0a
  294 #define USB_CLASS_CSCID			0x0b	/* chip+ smart card */
  295 #define USB_CLASS_CONTENT_SEC		0x0d	/* content security */
  296 #define USB_CLASS_VIDEO			0x0e
  297 #define USB_CLASS_WIRELESS_CONTROLLER	0xe0
  298 #define USB_CLASS_MISC			0xef
  299 #define USB_CLASS_APP_SPEC		0xfe
  300 #define USB_CLASS_VENDOR_SPEC		0xff
  301 
  302 #define USB_SUBCLASS_VENDOR_SPEC	0xff
  303 
  304 /*-------------------------------------------------------------------------*/
  305 
  306 /* USB_DT_CONFIG: Configuration descriptor information.
  307  *
  308  * USB_DT_OTHER_SPEED_CONFIG is the same descriptor, except that the
  309  * descriptor type is different.  Highspeed-capable devices can look
  310  * different depending on what speed they're currently running.  Only
  311  * devices with a USB_DT_DEVICE_QUALIFIER have any OTHER_SPEED_CONFIG
  312  * descriptors.
  313  */
  314 struct usb_config_descriptor {
  315 	__u8  bLength;
  316 	__u8  bDescriptorType;
  317 
  318 	__le16 wTotalLength;
  319 	__u8  bNumInterfaces;
  320 	__u8  bConfigurationValue;
  321 	__u8  iConfiguration;
  322 	__u8  bmAttributes;
  323 	__u8  bMaxPower;
  324 } __attribute__ ((packed));
  325 
  326 #define USB_DT_CONFIG_SIZE		9
  327 
  328 /* from config descriptor bmAttributes */
  329 #define USB_CONFIG_ATT_ONE		(1 << 7)	/* must be set */
  330 #define USB_CONFIG_ATT_SELFPOWER	(1 << 6)	/* self powered */
  331 #define USB_CONFIG_ATT_WAKEUP		(1 << 5)	/* can wakeup */
  332 #define USB_CONFIG_ATT_BATTERY		(1 << 4)	/* battery powered */
  333 
  334 /*-------------------------------------------------------------------------*/
  335 
  336 /* USB_DT_STRING: String descriptor */
  337 struct usb_string_descriptor {
  338 	__u8  bLength;
  339 	__u8  bDescriptorType;
  340 
  341 	__le16 wData[1];		/* UTF-16LE encoded */
  342 } __attribute__ ((packed));
  343 
  344 /* note that "string" zero is special, it holds language codes that
  345  * the device supports, not Unicode characters.
  346  */
  347 
  348 /*-------------------------------------------------------------------------*/
  349 
  350 /* USB_DT_INTERFACE: Interface descriptor */
  351 struct usb_interface_descriptor {
  352 	__u8  bLength;
  353 	__u8  bDescriptorType;
  354 
  355 	__u8  bInterfaceNumber;
  356 	__u8  bAlternateSetting;
  357 	__u8  bNumEndpoints;
  358 	__u8  bInterfaceClass;
  359 	__u8  bInterfaceSubClass;
  360 	__u8  bInterfaceProtocol;
  361 	__u8  iInterface;
  362 } __attribute__ ((packed));
  363 
  364 #define USB_DT_INTERFACE_SIZE		9
  365 
  366 /*-------------------------------------------------------------------------*/
  367 
  368 /* USB_DT_ENDPOINT: Endpoint descriptor */
  369 struct usb_endpoint_descriptor {
  370 	__u8  bLength;
  371 	__u8  bDescriptorType;
  372 
  373 	__u8  bEndpointAddress;
  374 	__u8  bmAttributes;
  375 	__le16 wMaxPacketSize;
  376 	__u8  bInterval;
  377 
  378 	/* NOTE:  these two are _only_ in audio endpoints. */
  379 	/* use USB_DT_ENDPOINT*_SIZE in bLength, not sizeof. */
  380 	__u8  bRefresh;
  381 	__u8  bSynchAddress;
  382 } __attribute__ ((packed));
  383 
  384 #define USB_DT_ENDPOINT_SIZE		7
  385 #define USB_DT_ENDPOINT_AUDIO_SIZE	9	/* Audio extension */
  386 
  387 
  388 /*
  389  * Endpoints
  390  */
  391 #define USB_ENDPOINT_NUMBER_MASK	0x0f	/* in bEndpointAddress */
  392 #define USB_ENDPOINT_DIR_MASK		0x80
  393 
  394 #define USB_ENDPOINT_XFERTYPE_MASK	0x03	/* in bmAttributes */
  395 #define USB_ENDPOINT_XFER_CONTROL	0
  396 #define USB_ENDPOINT_XFER_ISOC		1
  397 #define USB_ENDPOINT_XFER_BULK		2
  398 #define USB_ENDPOINT_XFER_INT		3
  399 #define USB_ENDPOINT_MAX_ADJUSTABLE	0x80
  400 
  401 /* The USB 3.0 spec redefines bits 5:4 of bmAttributes as interrupt ep type. */
  402 #define USB_ENDPOINT_INTRTYPE		0x30
  403 #define USB_ENDPOINT_INTR_PERIODIC	(0 << 4)
  404 #define USB_ENDPOINT_INTR_NOTIFICATION	(1 << 4)
  405 
  406 #define USB_ENDPOINT_SYNCTYPE		0x0c
  407 #define USB_ENDPOINT_SYNC_NONE		(0 << 2)
  408 #define USB_ENDPOINT_SYNC_ASYNC		(1 << 2)
  409 #define USB_ENDPOINT_SYNC_ADAPTIVE	(2 << 2)
  410 #define USB_ENDPOINT_SYNC_SYNC		(3 << 2)
  411 
  412 #define USB_ENDPOINT_USAGE_MASK		0x30
  413 #define USB_ENDPOINT_USAGE_DATA		0x00
  414 #define USB_ENDPOINT_USAGE_FEEDBACK	0x10
  415 #define USB_ENDPOINT_USAGE_IMPLICIT_FB	0x20	/* Implicit feedback Data endpoint */
  416 
  417 /*-------------------------------------------------------------------------*/
  418 
  419 /**
  420  * usb_endpoint_num - get the endpoint's number
  421  * @epd: endpoint to be checked
  422  *
  423  * Returns @epd's number: 0 to 15.
  424  */
  425 static inline int usb_endpoint_num(const struct usb_endpoint_descriptor *epd)
  426 {
  427 	return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
  428 }
  429 
  430 /**
  431  * usb_endpoint_type - get the endpoint's transfer type
  432  * @epd: endpoint to be checked
  433  *
  434  * Returns one of USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT} according
  435  * to @epd's transfer type.
  436  */
  437 static inline int usb_endpoint_type(const struct usb_endpoint_descriptor *epd)
  438 {
  439 	return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
  440 }
  441 
  442 /**
  443  * usb_endpoint_dir_in - check if the endpoint has IN direction
  444  * @epd: endpoint to be checked
  445  *
  446  * Returns true if the endpoint is of type IN, otherwise it returns false.
  447  */
  448 static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
  449 {
  450 	return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
  451 }
  452 
  453 /**
  454  * usb_endpoint_dir_out - check if the endpoint has OUT direction
  455  * @epd: endpoint to be checked
  456  *
  457  * Returns true if the endpoint is of type OUT, otherwise it returns false.
  458  */
  459 static inline int usb_endpoint_dir_out(
  460 				const struct usb_endpoint_descriptor *epd)
  461 {
  462 	return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
  463 }
  464 
  465 /**
  466  * usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type
  467  * @epd: endpoint to be checked
  468  *
  469  * Returns true if the endpoint is of type bulk, otherwise it returns false.
  470  */
  471 static inline int usb_endpoint_xfer_bulk(
  472 				const struct usb_endpoint_descriptor *epd)
  473 {
  474 	return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
  475 		USB_ENDPOINT_XFER_BULK);
  476 }
  477 
  478 /**
  479  * usb_endpoint_xfer_control - check if the endpoint has control transfer type
  480  * @epd: endpoint to be checked
  481  *
  482  * Returns true if the endpoint is of type control, otherwise it returns false.
  483  */
  484 static inline int usb_endpoint_xfer_control(
  485 				const struct usb_endpoint_descriptor *epd)
  486 {
  487 	return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
  488 		USB_ENDPOINT_XFER_CONTROL);
  489 }
  490 
  491 /**
  492  * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type
  493  * @epd: endpoint to be checked
  494  *
  495  * Returns true if the endpoint is of type interrupt, otherwise it returns
  496  * false.
  497  */
  498 static inline int usb_endpoint_xfer_int(
  499 				const struct usb_endpoint_descriptor *epd)
  500 {
  501 	return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
  502 		USB_ENDPOINT_XFER_INT);
  503 }
  504 
  505 /**
  506  * usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type
  507  * @epd: endpoint to be checked
  508  *
  509  * Returns true if the endpoint is of type isochronous, otherwise it returns
  510  * false.
  511  */
  512 static inline int usb_endpoint_xfer_isoc(
  513 				const struct usb_endpoint_descriptor *epd)
  514 {
  515 	return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
  516 		USB_ENDPOINT_XFER_ISOC);
  517 }
  518 
  519 /**
  520  * usb_endpoint_is_bulk_in - check if the endpoint is bulk IN
  521  * @epd: endpoint to be checked
  522  *
  523  * Returns true if the endpoint has bulk transfer type and IN direction,
  524  * otherwise it returns false.
  525  */
  526 static inline int usb_endpoint_is_bulk_in(
  527 				const struct usb_endpoint_descriptor *epd)
  528 {
  529 	return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd);
  530 }
  531 
  532 /**
  533  * usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT
  534  * @epd: endpoint to be checked
  535  *
  536  * Returns true if the endpoint has bulk transfer type and OUT direction,
  537  * otherwise it returns false.
  538  */
  539 static inline int usb_endpoint_is_bulk_out(
  540 				const struct usb_endpoint_descriptor *epd)
  541 {
  542 	return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd);
  543 }
  544 
  545 /**
  546  * usb_endpoint_is_int_in - check if the endpoint is interrupt IN
  547  * @epd: endpoint to be checked
  548  *
  549  * Returns true if the endpoint has interrupt transfer type and IN direction,
  550  * otherwise it returns false.
  551  */
  552 static inline int usb_endpoint_is_int_in(
  553 				const struct usb_endpoint_descriptor *epd)
  554 {
  555 	return usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd);
  556 }
  557 
  558 /**
  559  * usb_endpoint_is_int_out - check if the endpoint is interrupt OUT
  560  * @epd: endpoint to be checked
  561  *
  562  * Returns true if the endpoint has interrupt transfer type and OUT direction,
  563  * otherwise it returns false.
  564  */
  565 static inline int usb_endpoint_is_int_out(
  566 				const struct usb_endpoint_descriptor *epd)
  567 {
  568 	return usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd);
  569 }
  570 
  571 /**
  572  * usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN
  573  * @epd: endpoint to be checked
  574  *
  575  * Returns true if the endpoint has isochronous transfer type and IN direction,
  576  * otherwise it returns false.
  577  */
  578 static inline int usb_endpoint_is_isoc_in(
  579 				const struct usb_endpoint_descriptor *epd)
  580 {
  581 	return usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd);
  582 }
  583 
  584 /**
  585  * usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT
  586  * @epd: endpoint to be checked
  587  *
  588  * Returns true if the endpoint has isochronous transfer type and OUT direction,
  589  * otherwise it returns false.
  590  */
  591 static inline int usb_endpoint_is_isoc_out(
  592 				const struct usb_endpoint_descriptor *epd)
  593 {
  594 	return usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd);
  595 }
  596 
  597 /**
  598  * usb_endpoint_maxp - get endpoint's max packet size
  599  * @epd: endpoint to be checked
  600  *
  601  * Returns @epd's max packet
  602  */
  603 static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd)
  604 {
  605 	return __le16_to_cpu(epd->wMaxPacketSize);
  606 }
  607 
  608 static inline int usb_endpoint_interrupt_type(
  609 		const struct usb_endpoint_descriptor *epd)
  610 {
  611 	return epd->bmAttributes & USB_ENDPOINT_INTRTYPE;
  612 }
  613 
  614 /*-------------------------------------------------------------------------*/
  615 
  616 /* USB_DT_SS_ENDPOINT_COMP: SuperSpeed Endpoint Companion descriptor */
  617 struct usb_ss_ep_comp_descriptor {
  618 	__u8  bLength;
  619 	__u8  bDescriptorType;
  620 
  621 	__u8  bMaxBurst;
  622 	__u8  bmAttributes;
  623 	__le16 wBytesPerInterval;
  624 } __attribute__ ((packed));
  625 
  626 #define USB_DT_SS_EP_COMP_SIZE		6
  627 
  628 /* Bits 4:0 of bmAttributes if this is a bulk endpoint */
  629 static inline int
  630 usb_ss_max_streams(const struct usb_ss_ep_comp_descriptor *comp)
  631 {
  632 	int		max_streams;
  633 
  634 	if (!comp)
  635 		return 0;
  636 
  637 	max_streams = comp->bmAttributes & 0x1f;
  638 
  639 	if (!max_streams)
  640 		return 0;
  641 
  642 	max_streams = 1 << max_streams;
  643 
  644 	return max_streams;
  645 }
  646 
  647 /* Bits 1:0 of bmAttributes if this is an isoc endpoint */
  648 #define USB_SS_MULT(p)			(1 + ((p) & 0x3))
  649 
  650 /*-------------------------------------------------------------------------*/
  651 
  652 /* USB_DT_DEVICE_QUALIFIER: Device Qualifier descriptor */
  653 struct usb_qualifier_descriptor {
  654 	__u8  bLength;
  655 	__u8  bDescriptorType;
  656 
  657 	__le16 bcdUSB;
  658 	__u8  bDeviceClass;
  659 	__u8  bDeviceSubClass;
  660 	__u8  bDeviceProtocol;
  661 	__u8  bMaxPacketSize0;
  662 	__u8  bNumConfigurations;
  663 	__u8  bRESERVED;
  664 } __attribute__ ((packed));
  665 
  666 
  667 /*-------------------------------------------------------------------------*/
  668 
  669 /* USB_DT_OTG (from OTG 1.0a supplement) */
  670 struct usb_otg_descriptor {
  671 	__u8  bLength;
  672 	__u8  bDescriptorType;
  673 
  674 	__u8  bmAttributes;	/* support for HNP, SRP, etc */
  675 } __attribute__ ((packed));
  676 
  677 /* USB_DT_OTG (from OTG 2.0 supplement) */
  678 struct usb_otg20_descriptor {
  679 	__u8  bLength;
  680 	__u8  bDescriptorType;
  681 
  682 	__u8  bmAttributes;	/* support for HNP, SRP and ADP, etc */
  683 	__le16 bcdOTG;		/* OTG and EH supplement release number
  684 				 * in binary-coded decimal(i.e. 2.0 is 0200H)
  685 				 */
  686 } __attribute__ ((packed));
  687 
  688 /* from usb_otg_descriptor.bmAttributes */
  689 #define USB_OTG_SRP		(1 << 0)
  690 #define USB_OTG_HNP		(1 << 1)	/* swap host/device roles */
  691 #define USB_OTG_ADP		(1 << 2)	/* support ADP */
  692 
  693 /*-------------------------------------------------------------------------*/
  694 
  695 /* USB_DT_DEBUG:  for special highspeed devices, replacing serial console */
  696 struct usb_debug_descriptor {
  697 	__u8  bLength;
  698 	__u8  bDescriptorType;
  699 
  700 	/* bulk endpoints with 8 byte maxpacket */
  701 	__u8  bDebugInEndpoint;
  702 	__u8  bDebugOutEndpoint;
  703 } __attribute__((packed));
  704 
  705 /*-------------------------------------------------------------------------*/
  706 
  707 /* USB_DT_INTERFACE_ASSOCIATION: groups interfaces */
  708 struct usb_interface_assoc_descriptor {
  709 	__u8  bLength;
  710 	__u8  bDescriptorType;
  711 
  712 	__u8  bFirstInterface;
  713 	__u8  bInterfaceCount;
  714 	__u8  bFunctionClass;
  715 	__u8  bFunctionSubClass;
  716 	__u8  bFunctionProtocol;
  717 	__u8  iFunction;
  718 } __attribute__ ((packed));
  719 
  720 
  721 /*-------------------------------------------------------------------------*/
  722 
  723 /* USB_DT_SECURITY:  group of wireless security descriptors, including
  724  * encryption types available for setting up a CC/association.
  725  */
  726 struct usb_security_descriptor {
  727 	__u8  bLength;
  728 	__u8  bDescriptorType;
  729 
  730 	__le16 wTotalLength;
  731 	__u8  bNumEncryptionTypes;
  732 } __attribute__((packed));
  733 
  734 /*-------------------------------------------------------------------------*/
  735 
  736 /* USB_DT_KEY:  used with {GET,SET}_SECURITY_DATA; only public keys
  737  * may be retrieved.
  738  */
  739 struct usb_key_descriptor {
  740 	__u8  bLength;
  741 	__u8  bDescriptorType;
  742 
  743 	__u8  tTKID[3];
  744 	__u8  bReserved;
  745 	__u8  bKeyData[0];
  746 } __attribute__((packed));
  747 
  748 /*-------------------------------------------------------------------------*/
  749 
  750 /* USB_DT_ENCRYPTION_TYPE:  bundled in DT_SECURITY groups */
  751 struct usb_encryption_descriptor {
  752 	__u8  bLength;
  753 	__u8  bDescriptorType;
  754 
  755 	__u8  bEncryptionType;
  756 #define	USB_ENC_TYPE_UNSECURE		0
  757 #define	USB_ENC_TYPE_WIRED		1	/* non-wireless mode */
  758 #define	USB_ENC_TYPE_CCM_1		2	/* aes128/cbc session */
  759 #define	USB_ENC_TYPE_RSA_1		3	/* rsa3072/sha1 auth */
  760 	__u8  bEncryptionValue;		/* use in SET_ENCRYPTION */
  761 	__u8  bAuthKeyIndex;
  762 } __attribute__((packed));
  763 
  764 
  765 /*-------------------------------------------------------------------------*/
  766 
  767 /* USB_DT_BOS:  group of device-level capabilities */
  768 struct usb_bos_descriptor {
  769 	__u8  bLength;
  770 	__u8  bDescriptorType;
  771 
  772 	__le16 wTotalLength;
  773 	__u8  bNumDeviceCaps;
  774 } __attribute__((packed));
  775 
  776 #define USB_DT_BOS_SIZE		5
  777 /*-------------------------------------------------------------------------*/
  778 
  779 /* USB_DT_DEVICE_CAPABILITY:  grouped with BOS */
  780 struct usb_dev_cap_header {
  781 	__u8  bLength;
  782 	__u8  bDescriptorType;
  783 	__u8  bDevCapabilityType;
  784 } __attribute__((packed));
  785 
  786 #define	USB_CAP_TYPE_WIRELESS_USB	1
  787 
  788 struct usb_wireless_cap_descriptor {	/* Ultra Wide Band */
  789 	__u8  bLength;
  790 	__u8  bDescriptorType;
  791 	__u8  bDevCapabilityType;
  792 
  793 	__u8  bmAttributes;
  794 #define	USB_WIRELESS_P2P_DRD		(1 << 1)
  795 #define	USB_WIRELESS_BEACON_MASK	(3 << 2)
  796 #define	USB_WIRELESS_BEACON_SELF	(1 << 2)
  797 #define	USB_WIRELESS_BEACON_DIRECTED	(2 << 2)
  798 #define	USB_WIRELESS_BEACON_NONE	(3 << 2)
  799 	__le16 wPHYRates;	/* bit rates, Mbps */
  800 #define	USB_WIRELESS_PHY_53		(1 << 0)	/* always set */
  801 #define	USB_WIRELESS_PHY_80		(1 << 1)
  802 #define	USB_WIRELESS_PHY_107		(1 << 2)	/* always set */
  803 #define	USB_WIRELESS_PHY_160		(1 << 3)
  804 #define	USB_WIRELESS_PHY_200		(1 << 4)	/* always set */
  805 #define	USB_WIRELESS_PHY_320		(1 << 5)
  806 #define	USB_WIRELESS_PHY_400		(1 << 6)
  807 #define	USB_WIRELESS_PHY_480		(1 << 7)
  808 	__u8  bmTFITXPowerInfo;	/* TFI power levels */
  809 	__u8  bmFFITXPowerInfo;	/* FFI power levels */
  810 	__le16 bmBandGroup;
  811 	__u8  bReserved;
  812 } __attribute__((packed));
  813 
  814 /* USB 2.0 Extension descriptor */
  815 #define	USB_CAP_TYPE_EXT		2
  816 
  817 struct usb_ext_cap_descriptor {		/* Link Power Management */
  818 	__u8  bLength;
  819 	__u8  bDescriptorType;
  820 	__u8  bDevCapabilityType;
  821 	__le32 bmAttributes;
  822 #define USB_LPM_SUPPORT			(1 << 1)	/* supports LPM */
  823 #define USB_BESL_SUPPORT		(1 << 2)	/* supports BESL */
  824 #define USB_BESL_BASELINE_VALID		(1 << 3)	/* Baseline BESL valid*/
  825 #define USB_BESL_DEEP_VALID		(1 << 4)	/* Deep BESL valid */
  826 #define USB_GET_BESL_BASELINE(p)	(((p) & (0xf << 8)) >> 8)
  827 #define USB_GET_BESL_DEEP(p)		(((p) & (0xf << 12)) >> 12)
  828 } __attribute__((packed));
  829 
  830 #define USB_DT_USB_EXT_CAP_SIZE	7
  831 
  832 /*
  833  * SuperSpeed USB Capability descriptor: Defines the set of SuperSpeed USB
  834  * specific device level capabilities
  835  */
  836 #define		USB_SS_CAP_TYPE		3
  837 struct usb_ss_cap_descriptor {		/* Link Power Management */
  838 	__u8  bLength;
  839 	__u8  bDescriptorType;
  840 	__u8  bDevCapabilityType;
  841 	__u8  bmAttributes;
  842 #define USB_LTM_SUPPORT			(1 << 1) /* supports LTM */
  843 	__le16 wSpeedSupported;
  844 #define USB_LOW_SPEED_OPERATION		(1)	 /* Low speed operation */
  845 #define USB_FULL_SPEED_OPERATION	(1 << 1) /* Full speed operation */
  846 #define USB_HIGH_SPEED_OPERATION	(1 << 2) /* High speed operation */
  847 #define USB_5GBPS_OPERATION		(1 << 3) /* Operation at 5Gbps */
  848 	__u8  bFunctionalitySupport;
  849 	__u8  bU1devExitLat;
  850 	__le16 bU2DevExitLat;
  851 } __attribute__((packed));
  852 
  853 #define USB_DT_USB_SS_CAP_SIZE	10
  854 
  855 /*
  856  * Container ID Capability descriptor: Defines the instance unique ID used to
  857  * identify the instance across all operating modes
  858  */
  859 #define	CONTAINER_ID_TYPE	4
  860 struct usb_ss_container_id_descriptor {
  861 	__u8  bLength;
  862 	__u8  bDescriptorType;
  863 	__u8  bDevCapabilityType;
  864 	__u8  bReserved;
  865 	__u8  ContainerID[16]; /* 128-bit number */
  866 } __attribute__((packed));
  867 
  868 #define USB_DT_USB_SS_CONTN_ID_SIZE	20
  869 /*-------------------------------------------------------------------------*/
  870 
  871 /* USB_DT_WIRELESS_ENDPOINT_COMP:  companion descriptor associated with
  872  * each endpoint descriptor for a wireless device
  873  */
  874 struct usb_wireless_ep_comp_descriptor {
  875 	__u8  bLength;
  876 	__u8  bDescriptorType;
  877 
  878 	__u8  bMaxBurst;
  879 	__u8  bMaxSequence;
  880 	__le16 wMaxStreamDelay;
  881 	__le16 wOverTheAirPacketSize;
  882 	__u8  bOverTheAirInterval;
  883 	__u8  bmCompAttributes;
  884 #define USB_ENDPOINT_SWITCH_MASK	0x03	/* in bmCompAttributes */
  885 #define USB_ENDPOINT_SWITCH_NO		0
  886 #define USB_ENDPOINT_SWITCH_SWITCH	1
  887 #define USB_ENDPOINT_SWITCH_SCALE	2
  888 } __attribute__((packed));
  889 
  890 /*-------------------------------------------------------------------------*/
  891 
  892 /* USB_REQ_SET_HANDSHAKE is a four-way handshake used between a wireless
  893  * host and a device for connection set up, mutual authentication, and
  894  * exchanging short lived session keys.  The handshake depends on a CC.
  895  */
  896 struct usb_handshake {
  897 	__u8 bMessageNumber;
  898 	__u8 bStatus;
  899 	__u8 tTKID[3];
  900 	__u8 bReserved;
  901 	__u8 CDID[16];
  902 	__u8 nonce[16];
  903 	__u8 MIC[8];
  904 } __attribute__((packed));
  905 
  906 /*-------------------------------------------------------------------------*/
  907 
  908 /* USB_REQ_SET_CONNECTION modifies or revokes a connection context (CC).
  909  * A CC may also be set up using non-wireless secure channels (including
  910  * wired USB!), and some devices may support CCs with multiple hosts.
  911  */
  912 struct usb_connection_context {
  913 	__u8 CHID[16];		/* persistent host id */
  914 	__u8 CDID[16];		/* device id (unique w/in host context) */
  915 	__u8 CK[16];		/* connection key */
  916 } __attribute__((packed));
  917 
  918 /*-------------------------------------------------------------------------*/
  919 
  920 /* USB 2.0 defines three speeds, here's how Linux identifies them */
  921 
  922 enum usb_device_speed {
  923 	USB_SPEED_UNKNOWN = 0,			/* enumerating */
  924 	USB_SPEED_LOW, USB_SPEED_FULL,		/* usb 1.1 */
  925 	USB_SPEED_HIGH,				/* usb 2.0 */
  926 	USB_SPEED_WIRELESS,			/* wireless (usb 2.5) */
  927 	USB_SPEED_SUPER,			/* usb 3.0 */
  928 };
  929 
  930 
  931 enum usb_device_state {
  932 	/* NOTATTACHED isn't in the USB spec, and this state acts
  933 	 * the same as ATTACHED ... but it's clearer this way.
  934 	 */
  935 	USB_STATE_NOTATTACHED = 0,
  936 
  937 	/* chapter 9 and authentication (wireless) device states */
  938 	USB_STATE_ATTACHED,
  939 	USB_STATE_POWERED,			/* wired */
  940 	USB_STATE_RECONNECTING,			/* auth */
  941 	USB_STATE_UNAUTHENTICATED,		/* auth */
  942 	USB_STATE_DEFAULT,			/* limited function */
  943 	USB_STATE_ADDRESS,
  944 	USB_STATE_CONFIGURED,			/* most functions */
  945 
  946 	USB_STATE_SUSPENDED
  947 
  948 	/* NOTE:  there are actually four different SUSPENDED
  949 	 * states, returning to POWERED, DEFAULT, ADDRESS, or
  950 	 * CONFIGURED respectively when SOF tokens flow again.
  951 	 * At this level there's no difference between L1 and L2
  952 	 * suspend states.  (L2 being original USB 1.1 suspend.)
  953 	 */
  954 };
  955 
  956 enum usb3_link_state {
  957 	USB3_LPM_U0 = 0,
  958 	USB3_LPM_U1,
  959 	USB3_LPM_U2,
  960 	USB3_LPM_U3
  961 };
  962 
  963 /*
  964  * A U1 timeout of 0x0 means the parent hub will reject any transitions to U1.
  965  * 0xff means the parent hub will accept transitions to U1, but will not
  966  * initiate a transition.
  967  *
  968  * A U1 timeout of 0x1 to 0x7F also causes the hub to initiate a transition to
  969  * U1 after that many microseconds.  Timeouts of 0x80 to 0xFE are reserved
  970  * values.
  971  *
  972  * A U2 timeout of 0x0 means the parent hub will reject any transitions to U2.
  973  * 0xff means the parent hub will accept transitions to U2, but will not
  974  * initiate a transition.
  975  *
  976  * A U2 timeout of 0x1 to 0xFE also causes the hub to initiate a transition to
  977  * U2 after N*256 microseconds.  Therefore a U2 timeout value of 0x1 means a U2
  978  * idle timer of 256 microseconds, 0x2 means 512 microseconds, 0xFE means
  979  * 65.024ms.
  980  */
  981 #define USB3_LPM_DISABLED		0x0
  982 #define USB3_LPM_U1_MAX_TIMEOUT		0x7F
  983 #define USB3_LPM_U2_MAX_TIMEOUT		0xFE
  984 #define USB3_LPM_DEVICE_INITIATED	0xFF
  985 
  986 struct usb_set_sel_req {
  987 	__u8	u1_sel;
  988 	__u8	u1_pel;
  989 	__le16	u2_sel;
  990 	__le16	u2_pel;
  991 } __attribute__ ((packed));
  992 
  993 /*
  994  * The Set System Exit Latency control transfer provides one byte each for
  995  * U1 SEL and U1 PEL, so the max exit latency is 0xFF.  U2 SEL and U2 PEL each
  996  * are two bytes long.
  997  */
  998 #define USB3_LPM_MAX_U1_SEL_PEL		0xFF
  999 #define USB3_LPM_MAX_U2_SEL_PEL		0xFFFF
 1000 
 1001 /*-------------------------------------------------------------------------*/
 1002 
 1003 /*
 1004  * As per USB compliance update, a device that is actively drawing
 1005  * more than 100mA from USB must report itself as bus-powered in
 1006  * the GetStatus(DEVICE) call.
 1007  * http://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34
 1008  */
 1009 #define USB_SELF_POWER_VBUS_MAX_DRAW		100
 1010 
 1011 #endif /* _UAPI__LINUX_USB_CH9_H */            | 
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
| Ядро | Модуль | Правило | Верификатор | Вердикт | Статус | Время создания | Описание проблемы | 
| linux-4.3-rc1.tar.xz | drivers/staging/media/lirc/lirc_imon.ko | 32_7a | CPAchecker | Bug | Fixed | 2015-11-14 21:22:22 | L0214 | 
Комментарий
reported: 14 Nov 2015
[В начало]