Bug
        
                          [В начало]
Ошибка # 153
Показать/спрятать трассу ошибок|            Error trace     
         {    20     typedef unsigned char __u8;    23     typedef unsigned short __u16;    25     typedef int __s32;    26     typedef unsigned int __u32;    29     typedef long long __s64;    30     typedef unsigned long long __u64;    15     typedef signed char s8;    16     typedef unsigned char u8;    19     typedef unsigned short u16;    21     typedef int s32;    22     typedef unsigned int u32;    24     typedef long long s64;    25     typedef unsigned long long u64;    14     typedef long __kernel_long_t;    15     typedef unsigned long __kernel_ulong_t;    27     typedef int __kernel_pid_t;    48     typedef unsigned int __kernel_uid32_t;    49     typedef unsigned int __kernel_gid32_t;    71     typedef __kernel_ulong_t __kernel_size_t;    72     typedef __kernel_long_t __kernel_ssize_t;    87     typedef long long __kernel_loff_t;    88     typedef __kernel_long_t __kernel_time_t;    89     typedef __kernel_long_t __kernel_clock_t;    90     typedef int __kernel_timer_t;    91     typedef int __kernel_clockid_t;    34     typedef __u32 __le32;    12     typedef __u32 __kernel_dev_t;    15     typedef __kernel_dev_t dev_t;    18     typedef unsigned short umode_t;    21     typedef __kernel_pid_t pid_t;    26     typedef __kernel_clockid_t clockid_t;    29     typedef _Bool bool;    31     typedef __kernel_uid32_t uid_t;    32     typedef __kernel_gid32_t gid_t;    45     typedef __kernel_loff_t loff_t;    54     typedef __kernel_size_t size_t;    59     typedef __kernel_ssize_t ssize_t;    69     typedef __kernel_time_t time_t;   102     typedef __s32 int32_t;   108     typedef __u32 uint32_t;   133     typedef unsigned long sector_t;   134     typedef unsigned long blkcnt_t;   152     typedef u64 dma_addr_t;   157     typedef unsigned int gfp_t;   158     typedef unsigned int fmode_t;   161     typedef u64 phys_addr_t;   166     typedef phys_addr_t resource_size_t;   176     struct __anonstruct_atomic_t_6 {   int counter; } ;   176     typedef struct __anonstruct_atomic_t_6 atomic_t;   181     struct __anonstruct_atomic64_t_7 {   long counter; } ;   181     typedef struct __anonstruct_atomic64_t_7 atomic64_t;   182     struct list_head {   struct list_head *next;   struct list_head *prev; } ;   187     struct hlist_node ;   187     struct hlist_head {   struct hlist_node *first; } ;   191     struct hlist_node {   struct hlist_node *next;   struct hlist_node **pprev; } ;   202     struct callback_head {   struct callback_head *next;   void (*func)(struct callback_head *); } ;    39     struct page ;    64     struct module ;    13     typedef unsigned long pteval_t;    14     typedef unsigned long pmdval_t;    16     typedef unsigned long pgdval_t;    17     typedef unsigned long pgprotval_t;    19     struct __anonstruct_pte_t_9 {   pteval_t pte; } ;    19     typedef struct __anonstruct_pte_t_9 pte_t;    21     struct pgprot {   pgprotval_t pgprot; } ;   256     typedef struct pgprot pgprot_t;   258     struct __anonstruct_pgd_t_10 {   pgdval_t pgd; } ;   258     typedef struct __anonstruct_pgd_t_10 pgd_t;   297     struct __anonstruct_pmd_t_12 {   pmdval_t pmd; } ;   297     typedef struct __anonstruct_pmd_t_12 pmd_t;   423     typedef struct page *pgtable_t;   434     struct file ;   447     struct seq_file ;   216     struct kernel_symbol {   unsigned long value;   const char *name; } ;    71     struct pci_dev ;   259     struct __anonstruct____missing_field_name_14 {   unsigned int a;   unsigned int b; } ;   259     struct __anonstruct____missing_field_name_15 {   u16 limit0;   u16 base0;   unsigned char base1;   unsigned char type;   unsigned char s;   unsigned char dpl;   unsigned char p;   unsigned char limit;   unsigned char avl;   unsigned char l;   unsigned char d;   unsigned char g;   unsigned char base2; } ;   259     union __anonunion____missing_field_name_13 {   struct __anonstruct____missing_field_name_14 __annonCompField4;   struct __anonstruct____missing_field_name_15 __annonCompField5; } ;   259     struct desc_struct {   union __anonunion____missing_field_name_13 __annonCompField6; } ;    98     struct thread_struct ;   100     struct mm_struct ;   101     struct task_struct ;   102     struct cpumask ;    20     struct qspinlock {   atomic_t val; } ;    33     typedef struct qspinlock arch_spinlock_t;    34     struct qrwlock {   atomic_t cnts;   arch_spinlock_t wait_lock; } ;    14     typedef struct qrwlock arch_rwlock_t;   125     typedef void (*ctor_fn_t)();    58     struct device ;   467     struct file_operations ;   479     struct completion ;   480     struct pt_regs ;   710     struct bug_entry {   int bug_addr_disp;   int file_disp;   unsigned short line;   unsigned short flags; } ;   341     struct cpumask {   unsigned long bits[128U]; } ;    15     typedef struct cpumask cpumask_t;   654     typedef struct cpumask *cpumask_var_t;   190     struct static_key ;   288     struct pt_regs {   unsigned long r15;   unsigned long r14;   unsigned long r13;   unsigned long r12;   unsigned long bp;   unsigned long bx;   unsigned long r11;   unsigned long r10;   unsigned long r9;   unsigned long r8;   unsigned long ax;   unsigned long cx;   unsigned long dx;   unsigned long si;   unsigned long di;   unsigned long orig_ax;   unsigned long ip;   unsigned long cs;   unsigned long flags;   unsigned long sp;   unsigned long ss; } ;   247     struct math_emu_info {   long ___orig_eip;   struct pt_regs *regs; } ;    26     union __anonunion___u_47 {   int __val;   char __c[1U]; } ;    38     union __anonunion___u_49 {   int __val;   char __c[1U]; } ;    23     typedef atomic64_t atomic_long_t;    81     struct static_key {   atomic_t enabled; } ;    22     struct tracepoint_func {   void *func;   void *data;   int prio; } ;    28     struct tracepoint {   const char *name;   struct static_key key;   void (*regfunc)();   void (*unregfunc)();   struct tracepoint_func *funcs; } ;   254     struct fregs_state {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u32 status; } ;    26     struct __anonstruct____missing_field_name_64 {   u64 rip;   u64 rdp; } ;    26     struct __anonstruct____missing_field_name_65 {   u32 fip;   u32 fcs;   u32 foo;   u32 fos; } ;    26     union __anonunion____missing_field_name_63 {   struct __anonstruct____missing_field_name_64 __annonCompField22;   struct __anonstruct____missing_field_name_65 __annonCompField23; } ;    26     union __anonunion____missing_field_name_66 {   u32 padding1[12U];   u32 sw_reserved[12U]; } ;    26     struct fxregs_state {   u16 cwd;   u16 swd;   u16 twd;   u16 fop;   union __anonunion____missing_field_name_63 __annonCompField24;   u32 mxcsr;   u32 mxcsr_mask;   u32 st_space[32U];   u32 xmm_space[64U];   u32 padding[12U];   union __anonunion____missing_field_name_66 __annonCompField25; } ;    66     struct swregs_state {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u8 ftop;   u8 changed;   u8 lookahead;   u8 no_update;   u8 rm;   u8 alimit;   struct math_emu_info *info;   u32 entry_eip; } ;   227     struct xstate_header {   u64 xfeatures;   u64 xcomp_bv;   u64 reserved[6U]; } ;   233     struct xregs_state {   struct fxregs_state i387;   struct xstate_header header;   u8 extended_state_area[0U]; } ;   254     union fpregs_state {   struct fregs_state fsave;   struct fxregs_state fxsave;   struct swregs_state soft;   struct xregs_state xsave;   u8 __padding[4096U]; } ;   271     struct fpu {   unsigned int last_cpu;   unsigned char fpstate_active;   unsigned char fpregs_active;   unsigned char counter;   union fpregs_state state; } ;   169     struct seq_operations ;   372     struct perf_event ;   377     struct __anonstruct_mm_segment_t_78 {   unsigned long seg; } ;   377     typedef struct __anonstruct_mm_segment_t_78 mm_segment_t;   378     struct thread_struct {   struct desc_struct tls_array[3U];   unsigned long sp0;   unsigned long sp;   unsigned short es;   unsigned short ds;   unsigned short fsindex;   unsigned short gsindex;   unsigned long fsbase;   unsigned long gsbase;   struct perf_event *ptrace_bps[4U];   unsigned long debugreg6;   unsigned long ptrace_dr7;   unsigned long cr2;   unsigned long trap_nr;   unsigned long error_code;   unsigned long *io_bitmap_ptr;   unsigned long iopl;   unsigned int io_bitmap_max;   mm_segment_t addr_limit;   unsigned char sig_on_uaccess_err;   unsigned char uaccess_err;   struct fpu fpu; } ;    38     typedef int Set;    35     struct resource {   resource_size_t start;   resource_size_t end;   const char *name;   unsigned long flags;   unsigned long desc;   struct resource *parent;   struct resource *sibling;   struct resource *child; } ;    27     union __anonunion___u_80 {   struct list_head *__val;   char __c[1U]; } ;   189     union __anonunion___u_84 {   struct list_head *__val;   char __c[1U]; } ;   703     struct lockdep_map ;    55     struct stack_trace {   unsigned int nr_entries;   unsigned int max_entries;   unsigned long *entries;   int skip; } ;    28     struct lockdep_subclass_key {   char __one_byte; } ;    53     struct lock_class_key {   struct lockdep_subclass_key subkeys[8U]; } ;    59     struct lock_class {   struct hlist_node hash_entry;   struct list_head lock_entry;   struct lockdep_subclass_key *key;   unsigned int subclass;   unsigned int dep_gen_id;   unsigned long usage_mask;   struct stack_trace usage_traces[13U];   struct list_head locks_after;   struct list_head locks_before;   unsigned int version;   unsigned long ops;   const char *name;   int name_version;   unsigned long contention_point[4U];   unsigned long contending_point[4U]; } ;   144     struct lockdep_map {   struct lock_class_key *key;   struct lock_class *class_cache[2U];   const char *name;   int cpu;   unsigned long ip; } ;   207     struct held_lock {   u64 prev_chain_key;   unsigned long acquire_ip;   struct lockdep_map *instance;   struct lockdep_map *nest_lock;   u64 waittime_stamp;   u64 holdtime_stamp;   unsigned short class_idx;   unsigned char irq_context;   unsigned char trylock;   unsigned char read;   unsigned char check;   unsigned char hardirqs_off;   unsigned short references;   unsigned int pin_count; } ;   572     struct raw_spinlock {   arch_spinlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    32     typedef struct raw_spinlock raw_spinlock_t;    33     struct __anonstruct____missing_field_name_96 {   u8 __padding[24U];   struct lockdep_map dep_map; } ;    33     union __anonunion____missing_field_name_95 {   struct raw_spinlock rlock;   struct __anonstruct____missing_field_name_96 __annonCompField28; } ;    33     struct spinlock {   union __anonunion____missing_field_name_95 __annonCompField29; } ;    76     typedef struct spinlock spinlock_t;    23     struct __anonstruct_rwlock_t_97 {   arch_rwlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    23     typedef struct __anonstruct_rwlock_t_97 rwlock_t;    13     struct optimistic_spin_queue {   atomic_t tail; } ;    39     struct mutex {   atomic_t count;   spinlock_t wait_lock;   struct list_head wait_list;   struct task_struct *owner;   void *magic;   struct lockdep_map dep_map; } ;    67     struct mutex_waiter {   struct list_head list;   struct task_struct *task;   void *magic; } ;   177     struct timespec ;   178     struct compat_timespec ;   179     struct __anonstruct_futex_99 {   u32 *uaddr;   u32 val;   u32 flags;   u32 bitset;   u64 time;   u32 *uaddr2; } ;   179     struct __anonstruct_nanosleep_100 {   clockid_t clockid;   struct timespec *rmtp;   struct compat_timespec *compat_rmtp;   u64 expires; } ;   179     struct pollfd ;   179     struct __anonstruct_poll_101 {   struct pollfd *ufds;   int nfds;   int has_timeout;   unsigned long tv_sec;   unsigned long tv_nsec; } ;   179     union __anonunion____missing_field_name_98 {   struct __anonstruct_futex_99 futex;   struct __anonstruct_nanosleep_100 nanosleep;   struct __anonstruct_poll_101 poll; } ;   179     struct restart_block {   long int (*fn)(struct restart_block *);   union __anonunion____missing_field_name_98 __annonCompField30; } ;   416     struct seqcount {   unsigned int sequence;   struct lockdep_map dep_map; } ;    52     typedef struct seqcount seqcount_t;   407     struct __anonstruct_seqlock_t_116 {   struct seqcount seqcount;   spinlock_t lock; } ;   407     typedef struct __anonstruct_seqlock_t_116 seqlock_t;    12     struct __wait_queue ;    12     typedef struct __wait_queue wait_queue_t;    15     struct __wait_queue {   unsigned int flags;   void *private;   int (*func)(wait_queue_t *, unsigned int, int, void *);   struct list_head task_list; } ;    38     struct __wait_queue_head {   spinlock_t lock;   struct list_head task_list; } ;    43     typedef struct __wait_queue_head wait_queue_head_t;  1234     struct completion {   unsigned int done;   wait_queue_head_t wait; } ;   108     struct timespec {   __kernel_time_t tv_sec;   long tv_nsec; } ;     7     typedef __s64 time64_t;   446     union ktime {   s64 tv64; } ;    41     typedef union ktime ktime_t;  1143     union __anonunion____missing_field_name_117 {   unsigned long bitmap[4U];   struct callback_head callback_head; } ;  1143     struct idr_layer {   int prefix;   int layer;   struct idr_layer *ary[256U];   int count;   union __anonunion____missing_field_name_117 __annonCompField31; } ;    41     struct idr {   struct idr_layer *hint;   struct idr_layer *top;   int layers;   int cur;   spinlock_t lock;   int id_free_cnt;   struct idr_layer *id_free; } ;   124     struct ida_bitmap {   long nr_busy;   unsigned long bitmap[15U]; } ;   167     struct ida {   struct idr idr;   struct ida_bitmap *free_bitmap; } ;   199     struct rb_node {   unsigned long __rb_parent_color;   struct rb_node *rb_right;   struct rb_node *rb_left; } ;    41     struct rb_root {   struct rb_node *rb_node; } ;   100     struct dentry ;   101     struct iattr ;   102     struct vm_area_struct ;   103     struct super_block ;   104     struct file_system_type ;   105     struct kernfs_open_node ;   106     struct kernfs_iattrs ;   129     struct kernfs_root ;   129     struct kernfs_elem_dir {   unsigned long subdirs;   struct rb_root children;   struct kernfs_root *root; } ;    85     struct kernfs_node ;    85     struct kernfs_elem_symlink {   struct kernfs_node *target_kn; } ;    89     struct kernfs_ops ;    89     struct kernfs_elem_attr {   const struct kernfs_ops *ops;   struct kernfs_open_node *open;   loff_t size;   struct kernfs_node *notify_next; } ;    96     union __anonunion____missing_field_name_126 {   struct kernfs_elem_dir dir;   struct kernfs_elem_symlink symlink;   struct kernfs_elem_attr attr; } ;    96     struct kernfs_node {   atomic_t count;   atomic_t active;   struct lockdep_map dep_map;   struct kernfs_node *parent;   const char *name;   struct rb_node rb;   const void *ns;   unsigned int hash;   union __anonunion____missing_field_name_126 __annonCompField32;   void *priv;   unsigned short flags;   umode_t mode;   unsigned int ino;   struct kernfs_iattrs *iattr; } ;   138     struct kernfs_syscall_ops {   int (*remount_fs)(struct kernfs_root *, int *, char *);   int (*show_options)(struct seq_file *, struct kernfs_root *);   int (*mkdir)(struct kernfs_node *, const char *, umode_t );   int (*rmdir)(struct kernfs_node *);   int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *);   int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ;   157     struct kernfs_root {   struct kernfs_node *kn;   unsigned int flags;   struct ida ino_ida;   struct kernfs_syscall_ops *syscall_ops;   struct list_head supers;   wait_queue_head_t deactivate_waitq; } ;   173     struct vm_operations_struct ;   173     struct kernfs_open_file {   struct kernfs_node *kn;   struct file *file;   void *priv;   struct mutex mutex;   struct mutex prealloc_mutex;   int event;   struct list_head list;   char *prealloc_buf;   size_t atomic_write_len;   bool mmapped;   const struct vm_operations_struct *vm_ops; } ;   191     struct kernfs_ops {   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   ssize_t  (*read)(struct kernfs_open_file *, char *, size_t , loff_t );   size_t atomic_write_len;   bool prealloc;   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *);   struct lock_class_key lockdep_key; } ;   288     struct inode ;   499     struct sock ;   500     struct kobject ;   501     enum kobj_ns_type {   KOBJ_NS_TYPE_NONE = 0,   KOBJ_NS_TYPE_NET = 1,   KOBJ_NS_TYPES = 2 } ;   507     struct kobj_ns_type_operations {   enum kobj_ns_type type;   bool  (*current_may_mount)();   void * (*grab_current_ns)();   const void * (*netlink_ns)(struct sock *);   const void * (*initial_ns)();   void (*drop_ns)(void *); } ;    83     struct user_namespace ;    22     struct __anonstruct_kuid_t_129 {   uid_t val; } ;    22     typedef struct __anonstruct_kuid_t_129 kuid_t;    27     struct __anonstruct_kgid_t_130 {   gid_t val; } ;    27     typedef struct __anonstruct_kgid_t_130 kgid_t;   139     struct kstat {   u64 ino;   dev_t dev;   umode_t mode;   unsigned int nlink;   kuid_t uid;   kgid_t gid;   dev_t rdev;   loff_t size;   struct timespec atime;   struct timespec mtime;   struct timespec ctime;   unsigned long blksize;   unsigned long long blocks; } ;    36     struct bin_attribute ;    37     struct attribute {   const char *name;   umode_t mode;   bool ignore_lockdep;   struct lock_class_key *key;   struct lock_class_key skey; } ;    37     struct attribute_group {   const char *name;   umode_t  (*is_visible)(struct kobject *, struct attribute *, int);   umode_t  (*is_bin_visible)(struct kobject *, struct bin_attribute *, int);   struct attribute **attrs;   struct bin_attribute **bin_attrs; } ;    92     struct bin_attribute {   struct attribute attr;   size_t size;   void *private;   ssize_t  (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   ssize_t  (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;   165     struct sysfs_ops {   ssize_t  (*show)(struct kobject *, struct attribute *, char *);   ssize_t  (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;   530     struct kref {   atomic_t refcount; } ;   139     struct timer_list {   struct hlist_node entry;   unsigned long expires;   void (*function)(unsigned long);   unsigned long data;   u32 flags;   int start_pid;   void *start_site;   char start_comm[16U];   struct lockdep_map lockdep_map; } ;   254     struct hrtimer ;   255     enum hrtimer_restart ;   835     struct nsproxy ;   278     struct workqueue_struct ;   279     struct work_struct ;    54     struct work_struct {   atomic_long_t data;   struct list_head entry;   void (*func)(struct work_struct *);   struct lockdep_map lockdep_map; } ;   107     struct delayed_work {   struct work_struct work;   struct timer_list timer;   struct workqueue_struct *wq;   int cpu; } ;    52     struct kset ;    52     struct kobj_type ;    52     struct kobject {   const char *name;   struct list_head entry;   struct kobject *parent;   struct kset *kset;   struct kobj_type *ktype;   struct kernfs_node *sd;   struct kref kref;   struct delayed_work release;   unsigned char state_initialized;   unsigned char state_in_sysfs;   unsigned char state_add_uevent_sent;   unsigned char state_remove_uevent_sent;   unsigned char uevent_suppress; } ;   115     struct kobj_type {   void (*release)(struct kobject *);   const struct sysfs_ops *sysfs_ops;   struct attribute **default_attrs;   const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *);   const void * (*namespace)(struct kobject *); } ;   123     struct kobj_uevent_env {   char *argv[3U];   char *envp[32U];   int envp_idx;   char buf[2048U];   int buflen; } ;   131     struct kset_uevent_ops {   const int (*filter)(struct kset *, struct kobject *);   const const char * (*name)(struct kset *, struct kobject *);   const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;   148     struct kset {   struct list_head list;   spinlock_t list_lock;   struct kobject kobj;   const struct kset_uevent_ops *uevent_ops; } ;   223     struct klist_node ;    37     struct klist_node {   void *n_klist;   struct list_head n_node;   struct kref n_ref; } ;    97     struct __anonstruct_nodemask_t_133 {   unsigned long bits[16U]; } ;    97     typedef struct __anonstruct_nodemask_t_133 nodemask_t;    93     struct hlist_bl_node ;    93     struct hlist_bl_head {   struct hlist_bl_node *first; } ;    36     struct hlist_bl_node {   struct hlist_bl_node *next;   struct hlist_bl_node **pprev; } ;   114     struct __anonstruct____missing_field_name_177 {   spinlock_t lock;   int count; } ;   114     union __anonunion____missing_field_name_176 {   struct __anonstruct____missing_field_name_177 __annonCompField35; } ;   114     struct lockref {   union __anonunion____missing_field_name_176 __annonCompField36; } ;    77     struct path ;    78     struct vfsmount ;    79     struct __anonstruct____missing_field_name_179 {   u32 hash;   u32 len; } ;    79     union __anonunion____missing_field_name_178 {   struct __anonstruct____missing_field_name_179 __annonCompField37;   u64 hash_len; } ;    79     struct qstr {   union __anonunion____missing_field_name_178 __annonCompField38;   const unsigned char *name; } ;    65     struct dentry_operations ;    65     union __anonunion____missing_field_name_180 {   struct list_head d_lru;   wait_queue_head_t *d_wait; } ;    65     union __anonunion_d_u_181 {   struct hlist_node d_alias;   struct hlist_bl_node d_in_lookup_hash;   struct callback_head d_rcu; } ;    65     struct dentry {   unsigned int d_flags;   seqcount_t d_seq;   struct hlist_bl_node d_hash;   struct dentry *d_parent;   struct qstr d_name;   struct inode *d_inode;   unsigned char d_iname[32U];   struct lockref d_lockref;   const struct dentry_operations *d_op;   struct super_block *d_sb;   unsigned long d_time;   void *d_fsdata;   union __anonunion____missing_field_name_180 __annonCompField39;   struct list_head d_child;   struct list_head d_subdirs;   union __anonunion_d_u_181 d_u; } ;   121     struct dentry_operations {   int (*d_revalidate)(struct dentry *, unsigned int);   int (*d_weak_revalidate)(struct dentry *, unsigned int);   int (*d_hash)(const struct dentry *, struct qstr *);   int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *);   int (*d_delete)(const struct dentry *);   int (*d_init)(struct dentry *);   void (*d_release)(struct dentry *);   void (*d_prune)(struct dentry *);   void (*d_iput)(struct dentry *, struct inode *);   char * (*d_dname)(struct dentry *, char *, int);   struct vfsmount * (*d_automount)(struct path *);   int (*d_manage)(struct dentry *, bool );   struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ;   591     struct path {   struct vfsmount *mnt;   struct dentry *dentry; } ;    19     struct mem_cgroup ;    19     struct shrink_control {   gfp_t gfp_mask;   unsigned long nr_to_scan;   int nid;   struct mem_cgroup *memcg; } ;    27     struct shrinker {   unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *);   unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *);   int seeks;   long batch;   unsigned long flags;   struct list_head list;   atomic_long_t *nr_deferred; } ;    80     struct list_lru_one {   struct list_head list;   long nr_items; } ;    32     struct list_lru_memcg {   struct list_lru_one *lru[0U]; } ;    37     struct list_lru_node {   spinlock_t lock;   struct list_lru_one lru;   struct list_lru_memcg *memcg_lrus; } ;    47     struct list_lru {   struct list_lru_node *node;   struct list_head list; } ;   189     struct llist_node ;    64     struct llist_node {   struct llist_node *next; } ;    63     struct __anonstruct____missing_field_name_183 {   struct radix_tree_node *parent;   void *private_data; } ;    63     union __anonunion____missing_field_name_182 {   struct __anonstruct____missing_field_name_183 __annonCompField40;   struct callback_head callback_head; } ;    63     struct radix_tree_node {   unsigned char shift;   unsigned char offset;   unsigned int count;   union __anonunion____missing_field_name_182 __annonCompField41;   struct list_head private_list;   void *slots[64U];   unsigned long tags[3U][1U]; } ;   106     struct radix_tree_root {   gfp_t gfp_mask;   struct radix_tree_node *rnode; } ;   523     enum pid_type {   PIDTYPE_PID = 0,   PIDTYPE_PGID = 1,   PIDTYPE_SID = 2,   PIDTYPE_MAX = 3 } ;   530     struct pid_namespace ;   530     struct upid {   int nr;   struct pid_namespace *ns;   struct hlist_node pid_chain; } ;    56     struct pid {   atomic_t count;   unsigned int level;   struct hlist_head tasks[3U];   struct callback_head rcu;   struct upid numbers[1U]; } ;    68     struct pid_link {   struct hlist_node node;   struct pid *pid; } ;   174     struct rw_semaphore ;   175     struct rw_semaphore {   atomic_long_t count;   struct list_head wait_list;   raw_spinlock_t wait_lock;   struct optimistic_spin_queue osq;   struct task_struct *owner;   struct lockdep_map dep_map; } ;    22     struct kernel_cap_struct {   __u32 cap[2U]; } ;    25     typedef struct kernel_cap_struct kernel_cap_t;    45     struct fiemap_extent {   __u64 fe_logical;   __u64 fe_physical;   __u64 fe_length;   __u64 fe_reserved64[2U];   __u32 fe_flags;   __u32 fe_reserved[3U]; } ;    38     enum migrate_mode {   MIGRATE_ASYNC = 0,   MIGRATE_SYNC_LIGHT = 1,   MIGRATE_SYNC = 2 } ;    58     struct pm_message {   int event; } ;    64     typedef struct pm_message pm_message_t;    65     struct dev_pm_ops {   int (*prepare)(struct device *);   void (*complete)(struct device *);   int (*suspend)(struct device *);   int (*resume)(struct device *);   int (*freeze)(struct device *);   int (*thaw)(struct device *);   int (*poweroff)(struct device *);   int (*restore)(struct device *);   int (*suspend_late)(struct device *);   int (*resume_early)(struct device *);   int (*freeze_late)(struct device *);   int (*thaw_early)(struct device *);   int (*poweroff_late)(struct device *);   int (*restore_early)(struct device *);   int (*suspend_noirq)(struct device *);   int (*resume_noirq)(struct device *);   int (*freeze_noirq)(struct device *);   int (*thaw_noirq)(struct device *);   int (*poweroff_noirq)(struct device *);   int (*restore_noirq)(struct device *);   int (*runtime_suspend)(struct device *);   int (*runtime_resume)(struct device *);   int (*runtime_idle)(struct device *); } ;   320     enum rpm_status {   RPM_ACTIVE = 0,   RPM_RESUMING = 1,   RPM_SUSPENDED = 2,   RPM_SUSPENDING = 3 } ;   327     enum rpm_request {   RPM_REQ_NONE = 0,   RPM_REQ_IDLE = 1,   RPM_REQ_SUSPEND = 2,   RPM_REQ_AUTOSUSPEND = 3,   RPM_REQ_RESUME = 4 } ;   335     struct wakeup_source ;   336     struct wake_irq ;   337     struct pm_domain_data ;   338     struct pm_subsys_data {   spinlock_t lock;   unsigned int refcount;   struct list_head clock_list;   struct pm_domain_data *domain_data; } ;   556     struct dev_pm_qos ;   556     struct dev_pm_info {   pm_message_t power_state;   unsigned char can_wakeup;   unsigned char async_suspend;   bool is_prepared;   bool is_suspended;   bool is_noirq_suspended;   bool is_late_suspended;   bool early_init;   bool direct_complete;   spinlock_t lock;   struct list_head entry;   struct completion completion;   struct wakeup_source *wakeup;   bool wakeup_path;   bool syscore;   bool no_pm_callbacks;   struct timer_list suspend_timer;   unsigned long timer_expires;   struct work_struct work;   wait_queue_head_t wait_queue;   struct wake_irq *wakeirq;   atomic_t usage_count;   atomic_t child_count;   unsigned char disable_depth;   unsigned char idle_notification;   unsigned char request_pending;   unsigned char deferred_resume;   unsigned char run_wake;   unsigned char runtime_auto;   bool ignore_children;   unsigned char no_callbacks;   unsigned char irq_safe;   unsigned char use_autosuspend;   unsigned char timer_autosuspends;   unsigned char memalloc_noio;   enum rpm_request request;   enum rpm_status runtime_status;   int runtime_error;   int autosuspend_delay;   unsigned long last_busy;   unsigned long active_jiffies;   unsigned long suspended_jiffies;   unsigned long accounting_timestamp;   struct pm_subsys_data *subsys_data;   void (*set_latency_tolerance)(struct device *, s32 );   struct dev_pm_qos *qos; } ;   616     struct dev_pm_domain {   struct dev_pm_ops ops;   void (*detach)(struct device *, bool );   int (*activate)(struct device *);   void (*sync)(struct device *);   void (*dismiss)(struct device *); } ;   144     struct pci_bus ;    26     struct ldt_struct ;    26     struct vdso_image ;    26     struct __anonstruct_mm_context_t_255 {   struct ldt_struct *ldt;   unsigned short ia32_compat;   struct mutex lock;   void *vdso;   const struct vdso_image *vdso_image;   atomic_t perf_rdpmc_allowed; } ;    26     typedef struct __anonstruct_mm_context_t_255 mm_context_t;   124     enum rcu_sync_type {   RCU_SYNC = 0,   RCU_SCHED_SYNC = 1,   RCU_BH_SYNC = 2 } ;   130     struct rcu_sync {   int gp_state;   int gp_count;   wait_queue_head_t gp_wait;   int cb_state;   struct callback_head cb_head;   enum rcu_sync_type gp_type; } ;    65     struct percpu_rw_semaphore {   struct rcu_sync rss;   unsigned int *fast_read_ctr;   struct rw_semaphore rw_sem;   atomic_t slow_read_ctr;   wait_queue_head_t write_waitq; } ;    87     struct block_device ;    88     struct io_context ;    89     struct cgroup_subsys_state ;   266     struct delayed_call {   void (*fn)(void *);   void *arg; } ;   261     struct backing_dev_info ;   262     struct bdi_writeback ;   263     struct export_operations ;   266     struct kiocb ;   267     struct pipe_inode_info ;   268     struct poll_table_struct ;   269     struct kstatfs ;   270     struct cred ;   271     struct swap_info_struct ;   272     struct iov_iter ;   273     struct fscrypt_info ;   274     struct fscrypt_operations ;    76     struct iattr {   unsigned int ia_valid;   umode_t ia_mode;   kuid_t ia_uid;   kgid_t ia_gid;   loff_t ia_size;   struct timespec ia_atime;   struct timespec ia_mtime;   struct timespec ia_ctime;   struct file *ia_file; } ;   247     typedef unsigned int isolate_mode_t;   213     struct dquot ;   214     struct kqid ;    19     typedef __kernel_uid32_t projid_t;    23     struct __anonstruct_kprojid_t_271 {   projid_t val; } ;    23     typedef struct __anonstruct_kprojid_t_271 kprojid_t;   181     enum quota_type {   USRQUOTA = 0,   GRPQUOTA = 1,   PRJQUOTA = 2 } ;    66     typedef long long qsize_t;    67     union __anonunion____missing_field_name_272 {   kuid_t uid;   kgid_t gid;   kprojid_t projid; } ;    67     struct kqid {   union __anonunion____missing_field_name_272 __annonCompField43;   enum quota_type type; } ;   194     struct mem_dqblk {   qsize_t dqb_bhardlimit;   qsize_t dqb_bsoftlimit;   qsize_t dqb_curspace;   qsize_t dqb_rsvspace;   qsize_t dqb_ihardlimit;   qsize_t dqb_isoftlimit;   qsize_t dqb_curinodes;   time64_t dqb_btime;   time64_t dqb_itime; } ;   216     struct quota_format_type ;   217     struct mem_dqinfo {   struct quota_format_type *dqi_format;   int dqi_fmt_id;   struct list_head dqi_dirty_list;   unsigned long dqi_flags;   unsigned int dqi_bgrace;   unsigned int dqi_igrace;   qsize_t dqi_max_spc_limit;   qsize_t dqi_max_ino_limit;   void *dqi_priv; } ;   282     struct dquot {   struct hlist_node dq_hash;   struct list_head dq_inuse;   struct list_head dq_free;   struct list_head dq_dirty;   struct mutex dq_lock;   atomic_t dq_count;   wait_queue_head_t dq_wait_unused;   struct super_block *dq_sb;   struct kqid dq_id;   loff_t dq_off;   unsigned long dq_flags;   struct mem_dqblk dq_dqb; } ;   309     struct quota_format_ops {   int (*check_quota_file)(struct super_block *, int);   int (*read_file_info)(struct super_block *, int);   int (*write_file_info)(struct super_block *, int);   int (*free_file_info)(struct super_block *, int);   int (*read_dqblk)(struct dquot *);   int (*commit_dqblk)(struct dquot *);   int (*release_dqblk)(struct dquot *);   int (*get_next_id)(struct super_block *, struct kqid *); } ;   321     struct dquot_operations {   int (*write_dquot)(struct dquot *);   struct dquot * (*alloc_dquot)(struct super_block *, int);   void (*destroy_dquot)(struct dquot *);   int (*acquire_dquot)(struct dquot *);   int (*release_dquot)(struct dquot *);   int (*mark_dirty)(struct dquot *);   int (*write_info)(struct super_block *, int);   qsize_t * (*get_reserved_space)(struct inode *);   int (*get_projid)(struct inode *, kprojid_t *);   int (*get_next_id)(struct super_block *, struct kqid *); } ;   338     struct qc_dqblk {   int d_fieldmask;   u64 d_spc_hardlimit;   u64 d_spc_softlimit;   u64 d_ino_hardlimit;   u64 d_ino_softlimit;   u64 d_space;   u64 d_ino_count;   s64 d_ino_timer;   s64 d_spc_timer;   int d_ino_warns;   int d_spc_warns;   u64 d_rt_spc_hardlimit;   u64 d_rt_spc_softlimit;   u64 d_rt_space;   s64 d_rt_spc_timer;   int d_rt_spc_warns; } ;   361     struct qc_type_state {   unsigned int flags;   unsigned int spc_timelimit;   unsigned int ino_timelimit;   unsigned int rt_spc_timelimit;   unsigned int spc_warnlimit;   unsigned int ino_warnlimit;   unsigned int rt_spc_warnlimit;   unsigned long long ino;   blkcnt_t blocks;   blkcnt_t nextents; } ;   407     struct qc_state {   unsigned int s_incoredqs;   struct qc_type_state s_state[3U]; } ;   418     struct qc_info {   int i_fieldmask;   unsigned int i_flags;   unsigned int i_spc_timelimit;   unsigned int i_ino_timelimit;   unsigned int i_rt_spc_timelimit;   unsigned int i_spc_warnlimit;   unsigned int i_ino_warnlimit;   unsigned int i_rt_spc_warnlimit; } ;   431     struct quotactl_ops {   int (*quota_on)(struct super_block *, int, int, struct path *);   int (*quota_off)(struct super_block *, int);   int (*quota_enable)(struct super_block *, unsigned int);   int (*quota_disable)(struct super_block *, unsigned int);   int (*quota_sync)(struct super_block *, int);   int (*set_info)(struct super_block *, int, struct qc_info *);   int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *);   int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*get_state)(struct super_block *, struct qc_state *);   int (*rm_xquota)(struct super_block *, unsigned int); } ;   447     struct quota_format_type {   int qf_fmt_id;   const struct quota_format_ops *qf_ops;   struct module *qf_owner;   struct quota_format_type *qf_next; } ;   511     struct quota_info {   unsigned int flags;   struct mutex dqio_mutex;   struct mutex dqonoff_mutex;   struct inode *files[3U];   struct mem_dqinfo info[3U];   const struct quota_format_ops *ops[3U]; } ;   541     struct address_space ;   542     struct writeback_control ;   543     struct kiocb {   struct file *ki_filp;   loff_t ki_pos;   void (*ki_complete)(struct kiocb *, long, long);   void *private;   int ki_flags; } ;   367     struct address_space_operations {   int (*writepage)(struct page *, struct writeback_control *);   int (*readpage)(struct file *, struct page *);   int (*writepages)(struct address_space *, struct writeback_control *);   int (*set_page_dirty)(struct page *);   int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int);   int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **);   int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *);   sector_t  (*bmap)(struct address_space *, sector_t );   void (*invalidatepage)(struct page *, unsigned int, unsigned int);   int (*releasepage)(struct page *, gfp_t );   void (*freepage)(struct page *);   ssize_t  (*direct_IO)(struct kiocb *, struct iov_iter *);   int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode );   bool  (*isolate_page)(struct page *, isolate_mode_t );   void (*putback_page)(struct page *);   int (*launder_page)(struct page *);   int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);   void (*is_dirty_writeback)(struct page *, bool *, bool *);   int (*error_remove_page)(struct address_space *, struct page *);   int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *);   void (*swap_deactivate)(struct file *); } ;   426     struct address_space {   struct inode *host;   struct radix_tree_root page_tree;   spinlock_t tree_lock;   atomic_t i_mmap_writable;   struct rb_root i_mmap;   struct rw_semaphore i_mmap_rwsem;   unsigned long nrpages;   unsigned long nrexceptional;   unsigned long writeback_index;   const struct address_space_operations *a_ops;   unsigned long flags;   spinlock_t private_lock;   struct list_head private_list;   void *private_data; } ;   447     struct request_queue ;   448     struct hd_struct ;   448     struct gendisk ;   448     struct block_device {   dev_t bd_dev;   int bd_openers;   struct inode *bd_inode;   struct super_block *bd_super;   struct mutex bd_mutex;   void *bd_claiming;   void *bd_holder;   int bd_holders;   bool bd_write_holder;   struct list_head bd_holder_disks;   struct block_device *bd_contains;   unsigned int bd_block_size;   struct hd_struct *bd_part;   unsigned int bd_part_count;   int bd_invalidated;   struct gendisk *bd_disk;   struct request_queue *bd_queue;   struct list_head bd_list;   unsigned long bd_private;   int bd_fsfreeze_count;   struct mutex bd_fsfreeze_mutex; } ;   563     struct posix_acl ;   589     struct inode_operations ;   589     union __anonunion____missing_field_name_277 {   const unsigned int i_nlink;   unsigned int __i_nlink; } ;   589     union __anonunion____missing_field_name_278 {   struct hlist_head i_dentry;   struct callback_head i_rcu; } ;   589     struct file_lock_context ;   589     struct cdev ;   589     union __anonunion____missing_field_name_279 {   struct pipe_inode_info *i_pipe;   struct block_device *i_bdev;   struct cdev *i_cdev;   char *i_link;   unsigned int i_dir_seq; } ;   589     struct inode {   umode_t i_mode;   unsigned short i_opflags;   kuid_t i_uid;   kgid_t i_gid;   unsigned int i_flags;   struct posix_acl *i_acl;   struct posix_acl *i_default_acl;   const struct inode_operations *i_op;   struct super_block *i_sb;   struct address_space *i_mapping;   void *i_security;   unsigned long i_ino;   union __anonunion____missing_field_name_277 __annonCompField44;   dev_t i_rdev;   loff_t i_size;   struct timespec i_atime;   struct timespec i_mtime;   struct timespec i_ctime;   spinlock_t i_lock;   unsigned short i_bytes;   unsigned int i_blkbits;   blkcnt_t i_blocks;   unsigned long i_state;   struct rw_semaphore i_rwsem;   unsigned long dirtied_when;   unsigned long dirtied_time_when;   struct hlist_node i_hash;   struct list_head i_io_list;   struct bdi_writeback *i_wb;   int i_wb_frn_winner;   u16 i_wb_frn_avg_time;   u16 i_wb_frn_history;   struct list_head i_lru;   struct list_head i_sb_list;   struct list_head i_wb_list;   union __anonunion____missing_field_name_278 __annonCompField45;   u64 i_version;   atomic_t i_count;   atomic_t i_dio_count;   atomic_t i_writecount;   atomic_t i_readcount;   const struct file_operations *i_fop;   struct file_lock_context *i_flctx;   struct address_space i_data;   struct list_head i_devices;   union __anonunion____missing_field_name_279 __annonCompField46;   __u32 i_generation;   __u32 i_fsnotify_mask;   struct hlist_head i_fsnotify_marks;   struct fscrypt_info *i_crypt_info;   void *i_private; } ;   843     struct fown_struct {   rwlock_t lock;   struct pid *pid;   enum pid_type pid_type;   kuid_t uid;   kuid_t euid;   int signum; } ;   851     struct file_ra_state {   unsigned long start;   unsigned int size;   unsigned int async_size;   unsigned int ra_pages;   unsigned int mmap_miss;   loff_t prev_pos; } ;   874     union __anonunion_f_u_280 {   struct llist_node fu_llist;   struct callback_head fu_rcuhead; } ;   874     struct file {   union __anonunion_f_u_280 f_u;   struct path f_path;   struct inode *f_inode;   const struct file_operations *f_op;   spinlock_t f_lock;   atomic_long_t f_count;   unsigned int f_flags;   fmode_t f_mode;   struct mutex f_pos_lock;   loff_t f_pos;   struct fown_struct f_owner;   const struct cred *f_cred;   struct file_ra_state f_ra;   u64 f_version;   void *f_security;   void *private_data;   struct list_head f_ep_links;   struct list_head f_tfile_llink;   struct address_space *f_mapping; } ;   959     typedef void *fl_owner_t;   960     struct file_lock ;   961     struct file_lock_operations {   void (*fl_copy_lock)(struct file_lock *, struct file_lock *);   void (*fl_release_private)(struct file_lock *); } ;   967     struct lock_manager_operations {   int (*lm_compare_owner)(struct file_lock *, struct file_lock *);   unsigned long int (*lm_owner_key)(struct file_lock *);   fl_owner_t  (*lm_get_owner)(fl_owner_t );   void (*lm_put_owner)(fl_owner_t );   void (*lm_notify)(struct file_lock *);   int (*lm_grant)(struct file_lock *, int);   bool  (*lm_break)(struct file_lock *);   int (*lm_change)(struct file_lock *, int, struct list_head *);   void (*lm_setup)(struct file_lock *, void **); } ;   994     struct nlm_lockowner ;   995     struct nfs_lock_info {   u32 state;   struct nlm_lockowner *owner;   struct list_head list; } ;    14     struct nfs4_lock_state ;    15     struct nfs4_lock_info {   struct nfs4_lock_state *owner; } ;    19     struct fasync_struct ;    19     struct __anonstruct_afs_282 {   struct list_head link;   int state; } ;    19     union __anonunion_fl_u_281 {   struct nfs_lock_info nfs_fl;   struct nfs4_lock_info nfs4_fl;   struct __anonstruct_afs_282 afs; } ;    19     struct file_lock {   struct file_lock *fl_next;   struct list_head fl_list;   struct hlist_node fl_link;   struct list_head fl_block;   fl_owner_t fl_owner;   unsigned int fl_flags;   unsigned char fl_type;   unsigned int fl_pid;   int fl_link_cpu;   struct pid *fl_nspid;   wait_queue_head_t fl_wait;   struct file *fl_file;   loff_t fl_start;   loff_t fl_end;   struct fasync_struct *fl_fasync;   unsigned long fl_break_time;   unsigned long fl_downgrade_time;   const struct file_lock_operations *fl_ops;   const struct lock_manager_operations *fl_lmops;   union __anonunion_fl_u_281 fl_u; } ;  1047     struct file_lock_context {   spinlock_t flc_lock;   struct list_head flc_flock;   struct list_head flc_posix;   struct list_head flc_lease; } ;  1102     struct files_struct ;  1255     struct fasync_struct {   spinlock_t fa_lock;   int magic;   int fa_fd;   struct fasync_struct *fa_next;   struct file *fa_file;   struct callback_head fa_rcu; } ;  1290     struct sb_writers {   int frozen;   wait_queue_head_t wait_unfrozen;   struct percpu_rw_semaphore rw_sem[3U]; } ;  1320     struct super_operations ;  1320     struct xattr_handler ;  1320     struct mtd_info ;  1320     struct super_block {   struct list_head s_list;   dev_t s_dev;   unsigned char s_blocksize_bits;   unsigned long s_blocksize;   loff_t s_maxbytes;   struct file_system_type *s_type;   const struct super_operations *s_op;   const struct dquot_operations *dq_op;   const struct quotactl_ops *s_qcop;   const struct export_operations *s_export_op;   unsigned long s_flags;   unsigned long s_iflags;   unsigned long s_magic;   struct dentry *s_root;   struct rw_semaphore s_umount;   int s_count;   atomic_t s_active;   void *s_security;   const struct xattr_handler **s_xattr;   const struct fscrypt_operations *s_cop;   struct hlist_bl_head s_anon;   struct list_head s_mounts;   struct block_device *s_bdev;   struct backing_dev_info *s_bdi;   struct mtd_info *s_mtd;   struct hlist_node s_instances;   unsigned int s_quota_types;   struct quota_info s_dquot;   struct sb_writers s_writers;   char s_id[32U];   u8 s_uuid[16U];   void *s_fs_info;   unsigned int s_max_links;   fmode_t s_mode;   u32 s_time_gran;   struct mutex s_vfs_rename_mutex;   char *s_subtype;   char *s_options;   const struct dentry_operations *s_d_op;   int cleancache_poolid;   struct shrinker s_shrink;   atomic_long_t s_remove_count;   int s_readonly_remount;   struct workqueue_struct *s_dio_done_wq;   struct hlist_head s_pins;   struct user_namespace *s_user_ns;   struct list_lru s_dentry_lru;   struct list_lru s_inode_lru;   struct callback_head rcu;   struct work_struct destroy_work;   struct mutex s_sync_lock;   int s_stack_depth;   spinlock_t s_inode_list_lock;   struct list_head s_inodes;   spinlock_t s_inode_wblist_lock;   struct list_head s_inodes_wb; } ;  1603     struct fiemap_extent_info {   unsigned int fi_flags;   unsigned int fi_extents_mapped;   unsigned int fi_extents_max;   struct fiemap_extent *fi_extents_start; } ;  1616     struct dir_context ;  1641     struct dir_context {   int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int);   loff_t pos; } ;  1648     struct file_operations {   struct module *owner;   loff_t  (*llseek)(struct file *, loff_t , int);   ssize_t  (*read)(struct file *, char *, size_t , loff_t *);   ssize_t  (*write)(struct file *, const char *, size_t , loff_t *);   ssize_t  (*read_iter)(struct kiocb *, struct iov_iter *);   ssize_t  (*write_iter)(struct kiocb *, struct iov_iter *);   int (*iterate)(struct file *, struct dir_context *);   int (*iterate_shared)(struct file *, struct dir_context *);   unsigned int (*poll)(struct file *, struct poll_table_struct *);   long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long);   long int (*compat_ioctl)(struct file *, unsigned int, unsigned long);   int (*mmap)(struct file *, struct vm_area_struct *);   int (*open)(struct inode *, struct file *);   int (*flush)(struct file *, fl_owner_t );   int (*release)(struct inode *, struct file *);   int (*fsync)(struct file *, loff_t , loff_t , int);   int (*aio_fsync)(struct kiocb *, int);   int (*fasync)(int, struct file *, int);   int (*lock)(struct file *, int, struct file_lock *);   ssize_t  (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int);   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   int (*check_flags)(int);   int (*flock)(struct file *, int, struct file_lock *);   ssize_t  (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int);   ssize_t  (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int);   int (*setlease)(struct file *, long, struct file_lock **, void **);   long int (*fallocate)(struct file *, int, loff_t , loff_t );   void (*show_fdinfo)(struct seq_file *, struct file *);   ssize_t  (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int);   int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 );   ssize_t  (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ;  1717     struct inode_operations {   struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int);   const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *);   int (*permission)(struct inode *, int);   struct posix_acl * (*get_acl)(struct inode *, int);   int (*readlink)(struct dentry *, char *, int);   int (*create)(struct inode *, struct dentry *, umode_t , bool );   int (*link)(struct dentry *, struct inode *, struct dentry *);   int (*unlink)(struct inode *, struct dentry *);   int (*symlink)(struct inode *, struct dentry *, const char *);   int (*mkdir)(struct inode *, struct dentry *, umode_t );   int (*rmdir)(struct inode *, struct dentry *);   int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t );   int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *);   int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int);   int (*setattr)(struct dentry *, struct iattr *);   int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *);   int (*setxattr)(struct dentry *, struct inode *, const char *, const void *, size_t , int);   ssize_t  (*getxattr)(struct dentry *, struct inode *, const char *, void *, size_t );   ssize_t  (*listxattr)(struct dentry *, char *, size_t );   int (*removexattr)(struct dentry *, const char *);   int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 );   int (*update_time)(struct inode *, struct timespec *, int);   int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *);   int (*tmpfile)(struct inode *, struct dentry *, umode_t );   int (*set_acl)(struct inode *, struct posix_acl *, int); } ;  1774     struct super_operations {   struct inode * (*alloc_inode)(struct super_block *);   void (*destroy_inode)(struct inode *);   void (*dirty_inode)(struct inode *, int);   int (*write_inode)(struct inode *, struct writeback_control *);   int (*drop_inode)(struct inode *);   void (*evict_inode)(struct inode *);   void (*put_super)(struct super_block *);   int (*sync_fs)(struct super_block *, int);   int (*freeze_super)(struct super_block *);   int (*freeze_fs)(struct super_block *);   int (*thaw_super)(struct super_block *);   int (*unfreeze_fs)(struct super_block *);   int (*statfs)(struct dentry *, struct kstatfs *);   int (*remount_fs)(struct super_block *, int *, char *);   void (*umount_begin)(struct super_block *);   int (*show_options)(struct seq_file *, struct dentry *);   int (*show_devname)(struct seq_file *, struct dentry *);   int (*show_path)(struct seq_file *, struct dentry *);   int (*show_stats)(struct seq_file *, struct dentry *);   ssize_t  (*quota_read)(struct super_block *, int, char *, size_t , loff_t );   ssize_t  (*quota_write)(struct super_block *, int, const char *, size_t , loff_t );   struct dquot ** (*get_dquots)(struct inode *);   int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t );   long int (*nr_cached_objects)(struct super_block *, struct shrink_control *);   long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;  2018     struct file_system_type {   const char *name;   int fs_flags;   struct dentry * (*mount)(struct file_system_type *, int, const char *, void *);   void (*kill_sb)(struct super_block *);   struct module *owner;   struct file_system_type *next;   struct hlist_head fs_supers;   struct lock_class_key s_lock_key;   struct lock_class_key s_umount_key;   struct lock_class_key s_vfs_rename_key;   struct lock_class_key s_writers_key[3U];   struct lock_class_key i_lock_key;   struct lock_class_key i_mutex_key;   struct lock_class_key i_mutex_dir_key; } ;  2351     struct kmem_cache ;  3193     struct assoc_array_ptr ;  3193     struct assoc_array {   struct assoc_array_ptr *root;   unsigned long nr_leaves_on_tree; } ;    31     typedef int32_t key_serial_t;    34     typedef uint32_t key_perm_t;    35     struct key ;    36     struct user_struct ;    37     struct signal_struct ;    38     struct key_type ;    42     struct keyring_index_key {   struct key_type *type;   const char *description;   size_t desc_len; } ;    91     union key_payload {   void *rcu_data0;   void *data[4U]; } ;   128     union __anonunion____missing_field_name_283 {   struct list_head graveyard_link;   struct rb_node serial_node; } ;   128     struct key_user ;   128     union __anonunion____missing_field_name_284 {   time_t expiry;   time_t revoked_at; } ;   128     struct __anonstruct____missing_field_name_286 {   struct key_type *type;   char *description; } ;   128     union __anonunion____missing_field_name_285 {   struct keyring_index_key index_key;   struct __anonstruct____missing_field_name_286 __annonCompField49; } ;   128     struct __anonstruct____missing_field_name_288 {   struct list_head name_link;   struct assoc_array keys; } ;   128     union __anonunion____missing_field_name_287 {   union key_payload payload;   struct __anonstruct____missing_field_name_288 __annonCompField51;   int reject_error; } ;   128     struct key {   atomic_t usage;   key_serial_t serial;   union __anonunion____missing_field_name_283 __annonCompField47;   struct rw_semaphore sem;   struct key_user *user;   void *security;   union __anonunion____missing_field_name_284 __annonCompField48;   time_t last_used_at;   kuid_t uid;   kgid_t gid;   key_perm_t perm;   unsigned short quotalen;   unsigned short datalen;   unsigned long flags;   union __anonunion____missing_field_name_285 __annonCompField50;   union __anonunion____missing_field_name_287 __annonCompField52;   int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ;   377     struct audit_context ;    27     struct group_info {   atomic_t usage;   int ngroups;   int nblocks;   kgid_t small_block[32U];   kgid_t *blocks[0U]; } ;    90     struct cred {   atomic_t usage;   atomic_t subscribers;   void *put_addr;   unsigned int magic;   kuid_t uid;   kgid_t gid;   kuid_t suid;   kgid_t sgid;   kuid_t euid;   kgid_t egid;   kuid_t fsuid;   kgid_t fsgid;   unsigned int securebits;   kernel_cap_t cap_inheritable;   kernel_cap_t cap_permitted;   kernel_cap_t cap_effective;   kernel_cap_t cap_bset;   kernel_cap_t cap_ambient;   unsigned char jit_keyring;   struct key *session_keyring;   struct key *process_keyring;   struct key *thread_keyring;   struct key *request_key_auth;   void *security;   struct user_struct *user;   struct user_namespace *user_ns;   struct group_info *group_info;   struct callback_head rcu; } ;   377     struct seq_file {   char *buf;   size_t size;   size_t from;   size_t count;   size_t pad_until;   loff_t index;   loff_t read_pos;   u64 version;   struct mutex lock;   const struct seq_operations *op;   int poll_event;   const struct file *file;   void *private; } ;    30     struct seq_operations {   void * (*start)(struct seq_file *, loff_t *);   void (*stop)(struct seq_file *, void *);   void * (*next)(struct seq_file *, void *, loff_t *);   int (*show)(struct seq_file *, void *); } ;   222     struct pinctrl ;   223     struct pinctrl_state ;   194     struct dev_pin_info {   struct pinctrl *p;   struct pinctrl_state *default_state;   struct pinctrl_state *init_state;   struct pinctrl_state *sleep_state;   struct pinctrl_state *idle_state; } ;    84     struct plist_node {   int prio;   struct list_head prio_list;   struct list_head node_list; } ;    58     struct arch_uprobe_task {   unsigned long saved_scratch_register;   unsigned int saved_trap_nr;   unsigned int saved_tf; } ;    66     enum uprobe_task_state {   UTASK_RUNNING = 0,   UTASK_SSTEP = 1,   UTASK_SSTEP_ACK = 2,   UTASK_SSTEP_TRAPPED = 3 } ;    73     struct __anonstruct____missing_field_name_294 {   struct arch_uprobe_task autask;   unsigned long vaddr; } ;    73     struct __anonstruct____missing_field_name_295 {   struct callback_head dup_xol_work;   unsigned long dup_xol_addr; } ;    73     union __anonunion____missing_field_name_293 {   struct __anonstruct____missing_field_name_294 __annonCompField55;   struct __anonstruct____missing_field_name_295 __annonCompField56; } ;    73     struct uprobe ;    73     struct return_instance ;    73     struct uprobe_task {   enum uprobe_task_state state;   union __anonunion____missing_field_name_293 __annonCompField57;   struct uprobe *active_uprobe;   unsigned long xol_vaddr;   struct return_instance *return_instances;   unsigned int depth; } ;    94     struct return_instance {   struct uprobe *uprobe;   unsigned long func;   unsigned long stack;   unsigned long orig_ret_vaddr;   bool chained;   struct return_instance *next; } ;   110     struct xol_area ;   111     struct uprobes_state {   struct xol_area *xol_area; } ;   150     union __anonunion____missing_field_name_296 {   struct address_space *mapping;   void *s_mem;   atomic_t compound_mapcount; } ;   150     union __anonunion____missing_field_name_297 {   unsigned long index;   void *freelist; } ;   150     struct __anonstruct____missing_field_name_301 {   unsigned short inuse;   unsigned short objects;   unsigned char frozen; } ;   150     union __anonunion____missing_field_name_300 {   atomic_t _mapcount;   unsigned int active;   struct __anonstruct____missing_field_name_301 __annonCompField60;   int units; } ;   150     struct __anonstruct____missing_field_name_299 {   union __anonunion____missing_field_name_300 __annonCompField61;   atomic_t _refcount; } ;   150     union __anonunion____missing_field_name_298 {   unsigned long counters;   struct __anonstruct____missing_field_name_299 __annonCompField62; } ;   150     struct dev_pagemap ;   150     struct __anonstruct____missing_field_name_303 {   struct page *next;   int pages;   int pobjects; } ;   150     struct __anonstruct____missing_field_name_304 {   unsigned long compound_head;   unsigned int compound_dtor;   unsigned int compound_order; } ;   150     struct __anonstruct____missing_field_name_305 {   unsigned long __pad;   pgtable_t pmd_huge_pte; } ;   150     union __anonunion____missing_field_name_302 {   struct list_head lru;   struct dev_pagemap *pgmap;   struct __anonstruct____missing_field_name_303 __annonCompField64;   struct callback_head callback_head;   struct __anonstruct____missing_field_name_304 __annonCompField65;   struct __anonstruct____missing_field_name_305 __annonCompField66; } ;   150     union __anonunion____missing_field_name_306 {   unsigned long private;   spinlock_t *ptl;   struct kmem_cache *slab_cache; } ;   150     struct page {   unsigned long flags;   union __anonunion____missing_field_name_296 __annonCompField58;   union __anonunion____missing_field_name_297 __annonCompField59;   union __anonunion____missing_field_name_298 __annonCompField63;   union __anonunion____missing_field_name_302 __annonCompField67;   union __anonunion____missing_field_name_306 __annonCompField68;   struct mem_cgroup *mem_cgroup; } ;   197     struct page_frag {   struct page *page;   __u32 offset;   __u32 size; } ;   282     struct userfaultfd_ctx ;   282     struct vm_userfaultfd_ctx {   struct userfaultfd_ctx *ctx; } ;   289     struct __anonstruct_shared_307 {   struct rb_node rb;   unsigned long rb_subtree_last; } ;   289     struct anon_vma ;   289     struct mempolicy ;   289     struct vm_area_struct {   unsigned long vm_start;   unsigned long vm_end;   struct vm_area_struct *vm_next;   struct vm_area_struct *vm_prev;   struct rb_node vm_rb;   unsigned long rb_subtree_gap;   struct mm_struct *vm_mm;   pgprot_t vm_page_prot;   unsigned long vm_flags;   struct __anonstruct_shared_307 shared;   struct list_head anon_vma_chain;   struct anon_vma *anon_vma;   const struct vm_operations_struct *vm_ops;   unsigned long vm_pgoff;   struct file *vm_file;   void *vm_private_data;   struct mempolicy *vm_policy;   struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;   362     struct core_thread {   struct task_struct *task;   struct core_thread *next; } ;   367     struct core_state {   atomic_t nr_threads;   struct core_thread dumper;   struct completion startup; } ;   381     struct task_rss_stat {   int events;   int count[4U]; } ;   389     struct mm_rss_stat {   atomic_long_t count[4U]; } ;   394     struct kioctx_table ;   395     struct linux_binfmt ;   395     struct mmu_notifier_mm ;   395     struct mm_struct {   struct vm_area_struct *mmap;   struct rb_root mm_rb;   u32 vmacache_seqnum;   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   unsigned long mmap_base;   unsigned long mmap_legacy_base;   unsigned long task_size;   unsigned long highest_vm_end;   pgd_t *pgd;   atomic_t mm_users;   atomic_t mm_count;   atomic_long_t nr_ptes;   atomic_long_t nr_pmds;   int map_count;   spinlock_t page_table_lock;   struct rw_semaphore mmap_sem;   struct list_head mmlist;   unsigned long hiwater_rss;   unsigned long hiwater_vm;   unsigned long total_vm;   unsigned long locked_vm;   unsigned long pinned_vm;   unsigned long data_vm;   unsigned long exec_vm;   unsigned long stack_vm;   unsigned long def_flags;   unsigned long start_code;   unsigned long end_code;   unsigned long start_data;   unsigned long end_data;   unsigned long start_brk;   unsigned long brk;   unsigned long start_stack;   unsigned long arg_start;   unsigned long arg_end;   unsigned long env_start;   unsigned long env_end;   unsigned long saved_auxv[46U];   struct mm_rss_stat rss_stat;   struct linux_binfmt *binfmt;   cpumask_var_t cpu_vm_mask_var;   mm_context_t context;   unsigned long flags;   struct core_state *core_state;   spinlock_t ioctx_lock;   struct kioctx_table *ioctx_table;   struct task_struct *owner;   struct file *exe_file;   struct mmu_notifier_mm *mmu_notifier_mm;   struct cpumask cpumask_allocation;   unsigned long numa_next_scan;   unsigned long numa_scan_offset;   int numa_scan_seq;   bool tlb_flush_pending;   struct uprobes_state uprobes_state;   void *bd_addr;   atomic_long_t hugetlb_usage;   struct work_struct async_put_work; } ;   565     struct vm_fault ;     4     typedef unsigned long cputime_t;    25     struct sem_undo_list ;    25     struct sysv_sem {   struct sem_undo_list *undo_list; } ;    26     struct sysv_shm {   struct list_head shm_clist; } ;    24     struct __anonstruct_sigset_t_309 {   unsigned long sig[1U]; } ;    24     typedef struct __anonstruct_sigset_t_309 sigset_t;    25     struct siginfo ;    17     typedef void __signalfn_t(int);    18     typedef __signalfn_t *__sighandler_t;    20     typedef void __restorefn_t();    21     typedef __restorefn_t *__sigrestore_t;    34     union sigval {   int sival_int;   void *sival_ptr; } ;    10     typedef union sigval sigval_t;    11     struct __anonstruct__kill_311 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid; } ;    11     struct __anonstruct__timer_312 {   __kernel_timer_t _tid;   int _overrun;   char _pad[0U];   sigval_t _sigval;   int _sys_private; } ;    11     struct __anonstruct__rt_313 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   sigval_t _sigval; } ;    11     struct __anonstruct__sigchld_314 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   int _status;   __kernel_clock_t _utime;   __kernel_clock_t _stime; } ;    11     struct __anonstruct__addr_bnd_317 {   void *_lower;   void *_upper; } ;    11     union __anonunion____missing_field_name_316 {   struct __anonstruct__addr_bnd_317 _addr_bnd;   __u32 _pkey; } ;    11     struct __anonstruct__sigfault_315 {   void *_addr;   short _addr_lsb;   union __anonunion____missing_field_name_316 __annonCompField69; } ;    11     struct __anonstruct__sigpoll_318 {   long _band;   int _fd; } ;    11     struct __anonstruct__sigsys_319 {   void *_call_addr;   int _syscall;   unsigned int _arch; } ;    11     union __anonunion__sifields_310 {   int _pad[28U];   struct __anonstruct__kill_311 _kill;   struct __anonstruct__timer_312 _timer;   struct __anonstruct__rt_313 _rt;   struct __anonstruct__sigchld_314 _sigchld;   struct __anonstruct__sigfault_315 _sigfault;   struct __anonstruct__sigpoll_318 _sigpoll;   struct __anonstruct__sigsys_319 _sigsys; } ;    11     struct siginfo {   int si_signo;   int si_errno;   int si_code;   union __anonunion__sifields_310 _sifields; } ;   118     typedef struct siginfo siginfo_t;    22     struct sigpending {   struct list_head list;   sigset_t signal; } ;   257     struct sigaction {   __sighandler_t sa_handler;   unsigned long sa_flags;   __sigrestore_t sa_restorer;   sigset_t sa_mask; } ;   271     struct k_sigaction {   struct sigaction sa; } ;    43     struct seccomp_filter ;    44     struct seccomp {   int mode;   struct seccomp_filter *filter; } ;    40     struct rt_mutex_waiter ;    41     struct rlimit {   __kernel_ulong_t rlim_cur;   __kernel_ulong_t rlim_max; } ;    11     struct timerqueue_node {   struct rb_node node;   ktime_t expires; } ;    12     struct timerqueue_head {   struct rb_root head;   struct timerqueue_node *next; } ;    50     struct hrtimer_clock_base ;    51     struct hrtimer_cpu_base ;    60     enum hrtimer_restart {   HRTIMER_NORESTART = 0,   HRTIMER_RESTART = 1 } ;    65     struct hrtimer {   struct timerqueue_node node;   ktime_t _softexpires;   enum hrtimer_restart  (*function)(struct hrtimer *);   struct hrtimer_clock_base *base;   u8 state;   u8 is_rel;   int start_pid;   void *start_site;   char start_comm[16U]; } ;   125     struct hrtimer_clock_base {   struct hrtimer_cpu_base *cpu_base;   int index;   clockid_t clockid;   struct timerqueue_head active;   ktime_t  (*get_time)();   ktime_t offset; } ;   158     struct hrtimer_cpu_base {   raw_spinlock_t lock;   seqcount_t seq;   struct hrtimer *running;   unsigned int cpu;   unsigned int active_bases;   unsigned int clock_was_set_seq;   bool migration_enabled;   bool nohz_active;   unsigned char in_hrtirq;   unsigned char hres_active;   unsigned char hang_detected;   ktime_t expires_next;   struct hrtimer *next_timer;   unsigned int nr_events;   unsigned int nr_retries;   unsigned int nr_hangs;   unsigned int max_hang_time;   struct hrtimer_clock_base clock_base[4U]; } ;    12     enum kcov_mode {   KCOV_MODE_DISABLED = 0,   KCOV_MODE_TRACE = 1 } ;    17     struct task_io_accounting {   u64 rchar;   u64 wchar;   u64 syscr;   u64 syscw;   u64 read_bytes;   u64 write_bytes;   u64 cancelled_write_bytes; } ;    45     struct latency_record {   unsigned long backtrace[12U];   unsigned int count;   unsigned long time;   unsigned long max; } ;    41     struct percpu_ref ;    55     typedef void percpu_ref_func_t(struct percpu_ref *);    68     struct percpu_ref {   atomic_long_t count;   unsigned long percpu_count_ptr;   percpu_ref_func_t *release;   percpu_ref_func_t *confirm_switch;   bool force_atomic;   struct callback_head rcu; } ;   325     struct cgroup ;   326     struct cgroup_root ;   327     struct cgroup_subsys ;   328     struct cgroup_taskset ;   372     struct cgroup_file {   struct kernfs_node *kn; } ;    90     struct cgroup_subsys_state {   struct cgroup *cgroup;   struct cgroup_subsys *ss;   struct percpu_ref refcnt;   struct cgroup_subsys_state *parent;   struct list_head sibling;   struct list_head children;   int id;   unsigned int flags;   u64 serial_nr;   atomic_t online_cnt;   struct callback_head callback_head;   struct work_struct destroy_work; } ;   141     struct css_set {   atomic_t refcount;   struct hlist_node hlist;   struct list_head tasks;   struct list_head mg_tasks;   struct list_head cgrp_links;   struct cgroup *dfl_cgrp;   struct cgroup_subsys_state *subsys[13U];   struct list_head mg_preload_node;   struct list_head mg_node;   struct cgroup *mg_src_cgrp;   struct cgroup *mg_dst_cgrp;   struct css_set *mg_dst_cset;   struct list_head e_cset_node[13U];   struct list_head task_iters;   bool dead;   struct callback_head callback_head; } ;   221     struct cgroup {   struct cgroup_subsys_state self;   unsigned long flags;   int id;   int level;   int populated_cnt;   struct kernfs_node *kn;   struct cgroup_file procs_file;   struct cgroup_file events_file;   u16 subtree_control;   u16 subtree_ss_mask;   u16 old_subtree_control;   u16 old_subtree_ss_mask;   struct cgroup_subsys_state *subsys[13U];   struct cgroup_root *root;   struct list_head cset_links;   struct list_head e_csets[13U];   struct list_head pidlists;   struct mutex pidlist_mutex;   wait_queue_head_t offline_waitq;   struct work_struct release_agent_work;   int ancestor_ids[]; } ;   306     struct cgroup_root {   struct kernfs_root *kf_root;   unsigned int subsys_mask;   int hierarchy_id;   struct cgroup cgrp;   int cgrp_ancestor_id_storage;   atomic_t nr_cgrps;   struct list_head root_list;   unsigned int flags;   struct idr cgroup_idr;   char release_agent_path[4096U];   char name[64U]; } ;   345     struct cftype {   char name[64U];   unsigned long private;   size_t max_write_len;   unsigned int flags;   unsigned int file_offset;   struct cgroup_subsys *ss;   struct list_head node;   struct kernfs_ops *kf_ops;   u64  (*read_u64)(struct cgroup_subsys_state *, struct cftype *);   s64  (*read_s64)(struct cgroup_subsys_state *, struct cftype *);   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 );   int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 );   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   struct lock_class_key lockdep_key; } ;   430     struct cgroup_subsys {   struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *);   int (*css_online)(struct cgroup_subsys_state *);   void (*css_offline)(struct cgroup_subsys_state *);   void (*css_released)(struct cgroup_subsys_state *);   void (*css_free)(struct cgroup_subsys_state *);   void (*css_reset)(struct cgroup_subsys_state *);   int (*can_attach)(struct cgroup_taskset *);   void (*cancel_attach)(struct cgroup_taskset *);   void (*attach)(struct cgroup_taskset *);   void (*post_attach)();   int (*can_fork)(struct task_struct *);   void (*cancel_fork)(struct task_struct *);   void (*fork)(struct task_struct *);   void (*exit)(struct task_struct *);   void (*free)(struct task_struct *);   void (*bind)(struct cgroup_subsys_state *);   bool early_init;   bool implicit_on_dfl;   bool broken_hierarchy;   bool warned_broken_hierarchy;   int id;   const char *name;   const char *legacy_name;   struct cgroup_root *root;   struct idr css_idr;   struct list_head cfts;   struct cftype *dfl_cftypes;   struct cftype *legacy_cftypes;   unsigned int depends_on; } ;   128     struct futex_pi_state ;   129     struct robust_list_head ;   130     struct bio_list ;   131     struct fs_struct ;   132     struct perf_event_context ;   133     struct blk_plug ;   134     struct nameidata ;   188     struct cfs_rq ;   189     struct task_group ;   493     struct sighand_struct {   atomic_t count;   struct k_sigaction action[64U];   spinlock_t siglock;   wait_queue_head_t signalfd_wqh; } ;   536     struct pacct_struct {   int ac_flag;   long ac_exitcode;   unsigned long ac_mem;   cputime_t ac_utime;   cputime_t ac_stime;   unsigned long ac_minflt;   unsigned long ac_majflt; } ;   544     struct cpu_itimer {   cputime_t expires;   cputime_t incr;   u32 error;   u32 incr_error; } ;   551     struct prev_cputime {   cputime_t utime;   cputime_t stime;   raw_spinlock_t lock; } ;   576     struct task_cputime {   cputime_t utime;   cputime_t stime;   unsigned long long sum_exec_runtime; } ;   592     struct task_cputime_atomic {   atomic64_t utime;   atomic64_t stime;   atomic64_t sum_exec_runtime; } ;   614     struct thread_group_cputimer {   struct task_cputime_atomic cputime_atomic;   bool running;   bool checking_timer; } ;   659     struct autogroup ;   660     struct tty_struct ;   660     struct taskstats ;   660     struct tty_audit_buf ;   660     struct signal_struct {   atomic_t sigcnt;   atomic_t live;   int nr_threads;   atomic_t oom_victims;   struct list_head thread_head;   wait_queue_head_t wait_chldexit;   struct task_struct *curr_target;   struct sigpending shared_pending;   int group_exit_code;   int notify_count;   struct task_struct *group_exit_task;   int group_stop_count;   unsigned int flags;   unsigned char is_child_subreaper;   unsigned char has_child_subreaper;   int posix_timer_id;   struct list_head posix_timers;   struct hrtimer real_timer;   struct pid *leader_pid;   ktime_t it_real_incr;   struct cpu_itimer it[2U];   struct thread_group_cputimer cputimer;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   struct pid *tty_old_pgrp;   int leader;   struct tty_struct *tty;   struct autogroup *autogroup;   seqlock_t stats_lock;   cputime_t utime;   cputime_t stime;   cputime_t cutime;   cputime_t cstime;   cputime_t gtime;   cputime_t cgtime;   struct prev_cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   unsigned long cnvcsw;   unsigned long cnivcsw;   unsigned long min_flt;   unsigned long maj_flt;   unsigned long cmin_flt;   unsigned long cmaj_flt;   unsigned long inblock;   unsigned long oublock;   unsigned long cinblock;   unsigned long coublock;   unsigned long maxrss;   unsigned long cmaxrss;   struct task_io_accounting ioac;   unsigned long long sum_sched_runtime;   struct rlimit rlim[16U];   struct pacct_struct pacct;   struct taskstats *stats;   unsigned int audit_tty;   struct tty_audit_buf *tty_audit_buf;   bool oom_flag_origin;   short oom_score_adj;   short oom_score_adj_min;   struct mutex cred_guard_mutex; } ;   835     struct user_struct {   atomic_t __count;   atomic_t processes;   atomic_t sigpending;   atomic_t inotify_watches;   atomic_t inotify_devs;   atomic_t fanotify_listeners;   atomic_long_t epoll_watches;   unsigned long mq_bytes;   unsigned long locked_shm;   unsigned long unix_inflight;   atomic_long_t pipe_bufs;   struct key *uid_keyring;   struct key *session_keyring;   struct hlist_node uidhash_node;   kuid_t uid;   atomic_long_t locked_vm; } ;   880     struct reclaim_state ;   881     struct sched_info {   unsigned long pcount;   unsigned long long run_delay;   unsigned long long last_arrival;   unsigned long long last_queued; } ;   896     struct task_delay_info {   spinlock_t lock;   unsigned int flags;   u64 blkio_start;   u64 blkio_delay;   u64 swapin_delay;   u32 blkio_count;   u32 swapin_count;   u64 freepages_start;   u64 freepages_delay;   u32 freepages_count; } ;   953     struct wake_q_node {   struct wake_q_node *next; } ;  1220     struct load_weight {   unsigned long weight;   u32 inv_weight; } ;  1228     struct sched_avg {   u64 last_update_time;   u64 load_sum;   u32 util_sum;   u32 period_contrib;   unsigned long load_avg;   unsigned long util_avg; } ;  1286     struct sched_statistics {   u64 wait_start;   u64 wait_max;   u64 wait_count;   u64 wait_sum;   u64 iowait_count;   u64 iowait_sum;   u64 sleep_start;   u64 sleep_max;   s64 sum_sleep_runtime;   u64 block_start;   u64 block_max;   u64 exec_max;   u64 slice_max;   u64 nr_migrations_cold;   u64 nr_failed_migrations_affine;   u64 nr_failed_migrations_running;   u64 nr_failed_migrations_hot;   u64 nr_forced_migrations;   u64 nr_wakeups;   u64 nr_wakeups_sync;   u64 nr_wakeups_migrate;   u64 nr_wakeups_local;   u64 nr_wakeups_remote;   u64 nr_wakeups_affine;   u64 nr_wakeups_affine_attempts;   u64 nr_wakeups_passive;   u64 nr_wakeups_idle; } ;  1321     struct sched_entity {   struct load_weight load;   struct rb_node run_node;   struct list_head group_node;   unsigned int on_rq;   u64 exec_start;   u64 sum_exec_runtime;   u64 vruntime;   u64 prev_sum_exec_runtime;   u64 nr_migrations;   struct sched_statistics statistics;   int depth;   struct sched_entity *parent;   struct cfs_rq *cfs_rq;   struct cfs_rq *my_q;   struct sched_avg avg; } ;  1358     struct rt_rq ;  1358     struct sched_rt_entity {   struct list_head run_list;   unsigned long timeout;   unsigned long watchdog_stamp;   unsigned int time_slice;   unsigned short on_rq;   unsigned short on_list;   struct sched_rt_entity *back;   struct sched_rt_entity *parent;   struct rt_rq *rt_rq;   struct rt_rq *my_q; } ;  1376     struct sched_dl_entity {   struct rb_node rb_node;   u64 dl_runtime;   u64 dl_deadline;   u64 dl_period;   u64 dl_bw;   s64 runtime;   u64 deadline;   unsigned int flags;   int dl_throttled;   int dl_boosted;   int dl_yielded;   struct hrtimer dl_timer; } ;  1440     struct tlbflush_unmap_batch {   struct cpumask cpumask;   bool flush_required;   bool writable; } ;  1459     struct sched_class ;  1459     struct compat_robust_list_head ;  1459     struct numa_group ;  1459     struct kcov ;  1459     struct task_struct {   volatile long state;   void *stack;   atomic_t usage;   unsigned int flags;   unsigned int ptrace;   struct llist_node wake_entry;   int on_cpu;   unsigned int wakee_flips;   unsigned long wakee_flip_decay_ts;   struct task_struct *last_wakee;   int wake_cpu;   int on_rq;   int prio;   int static_prio;   int normal_prio;   unsigned int rt_priority;   const struct sched_class *sched_class;   struct sched_entity se;   struct sched_rt_entity rt;   struct task_group *sched_task_group;   struct sched_dl_entity dl;   struct hlist_head preempt_notifiers;   unsigned int policy;   int nr_cpus_allowed;   cpumask_t cpus_allowed;   unsigned long rcu_tasks_nvcsw;   bool rcu_tasks_holdout;   struct list_head rcu_tasks_holdout_list;   int rcu_tasks_idle_cpu;   struct sched_info sched_info;   struct list_head tasks;   struct plist_node pushable_tasks;   struct rb_node pushable_dl_tasks;   struct mm_struct *mm;   struct mm_struct *active_mm;   u32 vmacache_seqnum;   struct vm_area_struct *vmacache[4U];   struct task_rss_stat rss_stat;   int exit_state;   int exit_code;   int exit_signal;   int pdeath_signal;   unsigned long jobctl;   unsigned int personality;   unsigned char sched_reset_on_fork;   unsigned char sched_contributes_to_load;   unsigned char sched_migrated;   unsigned char sched_remote_wakeup;   unsigned char;   unsigned char in_execve;   unsigned char in_iowait;   unsigned char restore_sigmask;   unsigned char memcg_may_oom;   unsigned char memcg_kmem_skip_account;   unsigned char brk_randomized;   unsigned long atomic_flags;   struct restart_block restart_block;   pid_t pid;   pid_t tgid;   struct task_struct *real_parent;   struct task_struct *parent;   struct list_head children;   struct list_head sibling;   struct task_struct *group_leader;   struct list_head ptraced;   struct list_head ptrace_entry;   struct pid_link pids[3U];   struct list_head thread_group;   struct list_head thread_node;   struct completion *vfork_done;   int *set_child_tid;   int *clear_child_tid;   cputime_t utime;   cputime_t stime;   cputime_t utimescaled;   cputime_t stimescaled;   cputime_t gtime;   struct prev_cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   u64 start_time;   u64 real_start_time;   unsigned long min_flt;   unsigned long maj_flt;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   const struct cred *real_cred;   const struct cred *cred;   char comm[16U];   struct nameidata *nameidata;   struct sysv_sem sysvsem;   struct sysv_shm sysvshm;   unsigned long last_switch_count;   struct fs_struct *fs;   struct files_struct *files;   struct nsproxy *nsproxy;   struct signal_struct *signal;   struct sighand_struct *sighand;   sigset_t blocked;   sigset_t real_blocked;   sigset_t saved_sigmask;   struct sigpending pending;   unsigned long sas_ss_sp;   size_t sas_ss_size;   unsigned int sas_ss_flags;   struct callback_head *task_works;   struct audit_context *audit_context;   kuid_t loginuid;   unsigned int sessionid;   struct seccomp seccomp;   u32 parent_exec_id;   u32 self_exec_id;   spinlock_t alloc_lock;   raw_spinlock_t pi_lock;   struct wake_q_node wake_q;   struct rb_root pi_waiters;   struct rb_node *pi_waiters_leftmost;   struct rt_mutex_waiter *pi_blocked_on;   struct mutex_waiter *blocked_on;   unsigned int irq_events;   unsigned long hardirq_enable_ip;   unsigned long hardirq_disable_ip;   unsigned int hardirq_enable_event;   unsigned int hardirq_disable_event;   int hardirqs_enabled;   int hardirq_context;   unsigned long softirq_disable_ip;   unsigned long softirq_enable_ip;   unsigned int softirq_disable_event;   unsigned int softirq_enable_event;   int softirqs_enabled;   int softirq_context;   u64 curr_chain_key;   int lockdep_depth;   unsigned int lockdep_recursion;   struct held_lock held_locks[48U];   gfp_t lockdep_reclaim_gfp;   unsigned int in_ubsan;   void *journal_info;   struct bio_list *bio_list;   struct blk_plug *plug;   struct reclaim_state *reclaim_state;   struct backing_dev_info *backing_dev_info;   struct io_context *io_context;   unsigned long ptrace_message;   siginfo_t *last_siginfo;   struct task_io_accounting ioac;   u64 acct_rss_mem1;   u64 acct_vm_mem1;   cputime_t acct_timexpd;   nodemask_t mems_allowed;   seqcount_t mems_allowed_seq;   int cpuset_mem_spread_rotor;   int cpuset_slab_spread_rotor;   struct css_set *cgroups;   struct list_head cg_list;   struct robust_list_head *robust_list;   struct compat_robust_list_head *compat_robust_list;   struct list_head pi_state_list;   struct futex_pi_state *pi_state_cache;   struct perf_event_context *perf_event_ctxp[2U];   struct mutex perf_event_mutex;   struct list_head perf_event_list;   struct mempolicy *mempolicy;   short il_next;   short pref_node_fork;   int numa_scan_seq;   unsigned int numa_scan_period;   unsigned int numa_scan_period_max;   int numa_preferred_nid;   unsigned long numa_migrate_retry;   u64 node_stamp;   u64 last_task_numa_placement;   u64 last_sum_exec_runtime;   struct callback_head numa_work;   struct list_head numa_entry;   struct numa_group *numa_group;   unsigned long *numa_faults;   unsigned long total_numa_faults;   unsigned long numa_faults_locality[3U];   unsigned long numa_pages_migrated;   struct tlbflush_unmap_batch tlb_ubc;   struct callback_head rcu;   struct pipe_inode_info *splice_pipe;   struct page_frag task_frag;   struct task_delay_info *delays;   int make_it_fail;   int nr_dirtied;   int nr_dirtied_pause;   unsigned long dirty_paused_when;   int latency_record_count;   struct latency_record latency_record[32U];   u64 timer_slack_ns;   u64 default_timer_slack_ns;   unsigned int kasan_depth;   unsigned long trace;   unsigned long trace_recursion;   enum kcov_mode kcov_mode;   unsigned int kcov_size;   void *kcov_area;   struct kcov *kcov;   struct mem_cgroup *memcg_in_oom;   gfp_t memcg_oom_gfp_mask;   int memcg_oom_order;   unsigned int memcg_nr_pages_over_high;   struct uprobe_task *utask;   unsigned int sequential_io;   unsigned int sequential_io_avg;   unsigned long task_state_change;   int pagefault_disabled;   struct task_struct *oom_reaper_list;   struct thread_struct thread; } ;    76     struct dma_map_ops ;    76     struct dev_archdata {   struct dma_map_ops *dma_ops;   void *iommu; } ;    24     struct device_private ;    25     struct device_driver ;    26     struct driver_private ;    27     struct class ;    28     struct subsys_private ;    29     struct bus_type ;    30     struct device_node ;    31     struct fwnode_handle ;    32     struct iommu_ops ;    33     struct iommu_group ;    61     struct device_attribute ;    61     struct bus_type {   const char *name;   const char *dev_name;   struct device *dev_root;   struct device_attribute *dev_attrs;   const struct attribute_group **bus_groups;   const struct attribute_group **dev_groups;   const struct attribute_group **drv_groups;   int (*match)(struct device *, struct device_driver *);   int (*uevent)(struct device *, struct kobj_uevent_env *);   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*online)(struct device *);   int (*offline)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct dev_pm_ops *pm;   const struct iommu_ops *iommu_ops;   struct subsys_private *p;   struct lock_class_key lock_key; } ;   142     struct device_type ;   201     enum probe_type {   PROBE_DEFAULT_STRATEGY = 0,   PROBE_PREFER_ASYNCHRONOUS = 1,   PROBE_FORCE_SYNCHRONOUS = 2 } ;   207     struct of_device_id ;   207     struct acpi_device_id ;   207     struct device_driver {   const char *name;   struct bus_type *bus;   struct module *owner;   const char *mod_name;   bool suppress_bind_attrs;   enum probe_type probe_type;   const struct of_device_id *of_match_table;   const struct acpi_device_id *acpi_match_table;   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct attribute_group **groups;   const struct dev_pm_ops *pm;   struct driver_private *p; } ;   357     struct class_attribute ;   357     struct class {   const char *name;   struct module *owner;   struct class_attribute *class_attrs;   const struct attribute_group **dev_groups;   struct kobject *dev_kobj;   int (*dev_uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *);   void (*class_release)(struct class *);   void (*dev_release)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct kobj_ns_type_operations *ns_type;   const void * (*namespace)(struct device *);   const struct dev_pm_ops *pm;   struct subsys_private *p; } ;   450     struct class_attribute {   struct attribute attr;   ssize_t  (*show)(struct class *, struct class_attribute *, char *);   ssize_t  (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;   518     struct device_type {   const char *name;   const struct attribute_group **groups;   int (*uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *);   void (*release)(struct device *);   const struct dev_pm_ops *pm; } ;   546     struct device_attribute {   struct attribute attr;   ssize_t  (*show)(struct device *, struct device_attribute *, char *);   ssize_t  (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;   699     struct device_dma_parameters {   unsigned int max_segment_size;   unsigned long segment_boundary_mask; } ;   708     struct irq_domain ;   708     struct dma_coherent_mem ;   708     struct cma ;   708     struct device {   struct device *parent;   struct device_private *p;   struct kobject kobj;   const char *init_name;   const struct device_type *type;   struct mutex mutex;   struct bus_type *bus;   struct device_driver *driver;   void *platform_data;   void *driver_data;   struct dev_pm_info power;   struct dev_pm_domain *pm_domain;   struct irq_domain *msi_domain;   struct dev_pin_info *pins;   struct list_head msi_list;   int numa_node;   u64 *dma_mask;   u64 coherent_dma_mask;   unsigned long dma_pfn_offset;   struct device_dma_parameters *dma_parms;   struct list_head dma_pools;   struct dma_coherent_mem *dma_mem;   struct cma *cma_area;   struct dev_archdata archdata;   struct device_node *of_node;   struct fwnode_handle *fwnode;   dev_t devt;   u32 id;   spinlock_t devres_lock;   struct list_head devres_head;   struct klist_node knode_class;   struct class *class;   const struct attribute_group **groups;   void (*release)(struct device *);   struct iommu_group *iommu_group;   bool offline_disabled;   bool offline; } ;   862     struct wakeup_source {   const char *name;   struct list_head entry;   spinlock_t lock;   struct wake_irq *wakeirq;   struct timer_list timer;   unsigned long timer_expires;   ktime_t total_time;   ktime_t max_time;   ktime_t last_time;   ktime_t start_prevent_time;   ktime_t prevent_sleep_time;   unsigned long event_count;   unsigned long active_count;   unsigned long relax_count;   unsigned long expire_count;   unsigned long wakeup_count;   bool active;   bool autosleep_enabled; } ;  1327     enum irqreturn {   IRQ_NONE = 0,   IRQ_HANDLED = 1,   IRQ_WAKE_THREAD = 2 } ;    16     typedef enum irqreturn irqreturn_t;   423     struct proc_dir_entry ;    63     struct exception_table_entry {   int insn;   int fixup;   int handler; } ;   708     struct miscdevice {   int minor;   const char *name;   const struct file_operations *fops;   struct list_head list;   struct device *parent;   struct device *this_device;   const struct attribute_group **groups;   const char *nodename;   umode_t mode; } ;    19     struct vdso_image {   void *data;   unsigned long size;   unsigned long alt;   unsigned long alt_len;   long sym_vvar_start;   long sym_vvar_page;   long sym_hpet_page;   long sym_pvclock_page;   long sym_VDSO32_NOTE_MASK;   long sym___kernel_sigreturn;   long sym___kernel_rt_sigreturn;   long sym___kernel_vsyscall;   long sym_int80_landing_pad; } ;    15     typedef __u64 Elf64_Addr;    16     typedef __u16 Elf64_Half;    18     typedef __u64 Elf64_Off;    20     typedef __u32 Elf64_Word;    21     typedef __u64 Elf64_Xword;   190     struct elf64_sym {   Elf64_Word st_name;   unsigned char st_info;   unsigned char st_other;   Elf64_Half st_shndx;   Elf64_Addr st_value;   Elf64_Xword st_size; } ;   198     typedef struct elf64_sym Elf64_Sym;   219     struct elf64_hdr {   unsigned char e_ident[16U];   Elf64_Half e_type;   Elf64_Half e_machine;   Elf64_Word e_version;   Elf64_Addr e_entry;   Elf64_Off e_phoff;   Elf64_Off e_shoff;   Elf64_Word e_flags;   Elf64_Half e_ehsize;   Elf64_Half e_phentsize;   Elf64_Half e_phnum;   Elf64_Half e_shentsize;   Elf64_Half e_shnum;   Elf64_Half e_shstrndx; } ;   235     typedef struct elf64_hdr Elf64_Ehdr;   314     struct elf64_shdr {   Elf64_Word sh_name;   Elf64_Word sh_type;   Elf64_Xword sh_flags;   Elf64_Addr sh_addr;   Elf64_Off sh_offset;   Elf64_Xword sh_size;   Elf64_Word sh_link;   Elf64_Word sh_info;   Elf64_Xword sh_addralign;   Elf64_Xword sh_entsize; } ;   326     typedef struct elf64_shdr Elf64_Shdr;    53     struct kernel_param ;    58     struct kernel_param_ops {   unsigned int flags;   int (*set)(const char *, const struct kernel_param *);   int (*get)(char *, const struct kernel_param *);   void (*free)(void *); } ;    62     struct kparam_string ;    62     struct kparam_array ;    62     union __anonunion____missing_field_name_357 {   void *arg;   const struct kparam_string *str;   const struct kparam_array *arr; } ;    62     struct kernel_param {   const char *name;   struct module *mod;   const struct kernel_param_ops *ops;   const u16 perm;   s8 level;   u8 flags;   union __anonunion____missing_field_name_357 __annonCompField81; } ;    83     struct kparam_string {   unsigned int maxlen;   char *string; } ;    89     struct kparam_array {   unsigned int max;   unsigned int elemsize;   unsigned int *num;   const struct kernel_param_ops *ops;   void *elem; } ;    24     struct latch_tree_node {   struct rb_node node[2U]; } ;   211     struct mod_arch_specific { } ;    39     struct module_param_attrs ;    39     struct module_kobject {   struct kobject kobj;   struct module *mod;   struct kobject *drivers_dir;   struct module_param_attrs *mp;   struct completion *kobj_completion; } ;    50     struct module_attribute {   struct attribute attr;   ssize_t  (*show)(struct module_attribute *, struct module_kobject *, char *);   ssize_t  (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t );   void (*setup)(struct module *, const char *);   int (*test)(struct module *);   void (*free)(struct module *); } ;   277     enum module_state {   MODULE_STATE_LIVE = 0,   MODULE_STATE_COMING = 1,   MODULE_STATE_GOING = 2,   MODULE_STATE_UNFORMED = 3 } ;   284     struct mod_tree_node {   struct module *mod;   struct latch_tree_node node; } ;   291     struct module_layout {   void *base;   unsigned int size;   unsigned int text_size;   unsigned int ro_size;   unsigned int ro_after_init_size;   struct mod_tree_node mtn; } ;   307     struct mod_kallsyms {   Elf64_Sym *symtab;   unsigned int num_symtab;   char *strtab; } ;   321     struct klp_modinfo {   Elf64_Ehdr hdr;   Elf64_Shdr *sechdrs;   char *secstrings;   unsigned int symndx; } ;   329     struct module_sect_attrs ;   329     struct module_notes_attrs ;   329     struct trace_event_call ;   329     struct trace_enum_map ;   329     struct module {   enum module_state state;   struct list_head list;   char name[56U];   struct module_kobject mkobj;   struct module_attribute *modinfo_attrs;   const char *version;   const char *srcversion;   struct kobject *holders_dir;   const struct kernel_symbol *syms;   const unsigned long *crcs;   unsigned int num_syms;   struct mutex param_lock;   struct kernel_param *kp;   unsigned int num_kp;   unsigned int num_gpl_syms;   const struct kernel_symbol *gpl_syms;   const unsigned long *gpl_crcs;   const struct kernel_symbol *unused_syms;   const unsigned long *unused_crcs;   unsigned int num_unused_syms;   unsigned int num_unused_gpl_syms;   const struct kernel_symbol *unused_gpl_syms;   const unsigned long *unused_gpl_crcs;   bool sig_ok;   bool async_probe_requested;   const struct kernel_symbol *gpl_future_syms;   const unsigned long *gpl_future_crcs;   unsigned int num_gpl_future_syms;   unsigned int num_exentries;   struct exception_table_entry *extable;   int (*init)();   struct module_layout core_layout;   struct module_layout init_layout;   struct mod_arch_specific arch;   unsigned int taints;   unsigned int num_bugs;   struct list_head bug_list;   struct bug_entry *bug_table;   struct mod_kallsyms *kallsyms;   struct mod_kallsyms core_kallsyms;   struct module_sect_attrs *sect_attrs;   struct module_notes_attrs *notes_attrs;   char *args;   void *percpu;   unsigned int percpu_size;   unsigned int num_tracepoints;   const struct tracepoint **tracepoints_ptrs;   unsigned int num_trace_bprintk_fmt;   const char **trace_bprintk_fmt_start;   struct trace_event_call **trace_events;   unsigned int num_trace_events;   struct trace_enum_map **trace_enums;   unsigned int num_trace_enums;   bool klp;   bool klp_alive;   struct klp_modinfo *klp_info;   struct list_head source_list;   struct list_head target_list;   void (*exit)();   atomic_t refcnt;   ctor_fn_t  (**ctors)();   unsigned int num_ctors; } ;    13     typedef unsigned long kernel_ulong_t;    14     struct pci_device_id {   __u32 vendor;   __u32 device;   __u32 subvendor;   __u32 subdevice;   __u32 class;   __u32 class_mask;   kernel_ulong_t driver_data; } ;   186     struct acpi_device_id {   __u8 id[9U];   kernel_ulong_t driver_data;   __u32 cls;   __u32 cls_msk; } ;   229     struct of_device_id {   char name[32U];   char type[32U];   char compatible[128U];   const void *data; } ;    70     struct hotplug_slot ;    70     struct pci_slot {   struct pci_bus *bus;   struct list_head list;   struct hotplug_slot *hotplug;   unsigned char number;   struct kobject kobj; } ;   108     typedef int pci_power_t;   135     typedef unsigned int pci_channel_state_t;   136     enum pci_channel_state {   pci_channel_io_normal = 1,   pci_channel_io_frozen = 2,   pci_channel_io_perm_failure = 3 } ;   161     typedef unsigned short pci_dev_flags_t;   188     typedef unsigned short pci_bus_flags_t;   245     struct pcie_link_state ;   246     struct pci_vpd ;   247     struct pci_sriov ;   249     struct pci_driver ;   249     union __anonunion____missing_field_name_375 {   struct pci_sriov *sriov;   struct pci_dev *physfn; } ;   249     struct pci_dev {   struct list_head bus_list;   struct pci_bus *bus;   struct pci_bus *subordinate;   void *sysdata;   struct proc_dir_entry *procent;   struct pci_slot *slot;   unsigned int devfn;   unsigned short vendor;   unsigned short device;   unsigned short subsystem_vendor;   unsigned short subsystem_device;   unsigned int class;   u8 revision;   u8 hdr_type;   u8 pcie_cap;   u8 msi_cap;   u8 msix_cap;   unsigned char pcie_mpss;   u8 rom_base_reg;   u8 pin;   u16 pcie_flags_reg;   unsigned long *dma_alias_mask;   struct pci_driver *driver;   u64 dma_mask;   struct device_dma_parameters dma_parms;   pci_power_t current_state;   u8 pm_cap;   unsigned char pme_support;   unsigned char pme_interrupt;   unsigned char pme_poll;   unsigned char d1_support;   unsigned char d2_support;   unsigned char no_d1d2;   unsigned char no_d3cold;   unsigned char bridge_d3;   unsigned char d3cold_allowed;   unsigned char mmio_always_on;   unsigned char wakeup_prepared;   unsigned char runtime_d3cold;   unsigned char ignore_hotplug;   unsigned int d3_delay;   unsigned int d3cold_delay;   struct pcie_link_state *link_state;   pci_channel_state_t error_state;   struct device dev;   int cfg_size;   unsigned int irq;   struct cpumask *irq_affinity;   struct resource resource[17U];   bool match_driver;   unsigned char transparent;   unsigned char multifunction;   unsigned char is_added;   unsigned char is_busmaster;   unsigned char no_msi;   unsigned char no_64bit_msi;   unsigned char block_cfg_access;   unsigned char broken_parity_status;   unsigned char irq_reroute_variant;   unsigned char msi_enabled;   unsigned char msix_enabled;   unsigned char ari_enabled;   unsigned char ats_enabled;   unsigned char is_managed;   unsigned char needs_freset;   unsigned char state_saved;   unsigned char is_physfn;   unsigned char is_virtfn;   unsigned char reset_fn;   unsigned char is_hotplug_bridge;   unsigned char __aer_firmware_first_valid;   unsigned char __aer_firmware_first;   unsigned char broken_intx_masking;   unsigned char io_window_1k;   unsigned char irq_managed;   unsigned char has_secondary_link;   unsigned char non_compliant_bars;   pci_dev_flags_t dev_flags;   atomic_t enable_cnt;   u32 saved_config_space[16U];   struct hlist_head saved_cap_space;   struct bin_attribute *rom_attr;   int rom_attr_enabled;   struct bin_attribute *res_attr[17U];   struct bin_attribute *res_attr_wc[17U];   const struct attribute_group **msi_irq_groups;   struct pci_vpd *vpd;   union __anonunion____missing_field_name_375 __annonCompField84;   u16 ats_cap;   u8 ats_stu;   atomic_t ats_ref_cnt;   phys_addr_t rom;   size_t romlen;   char *driver_override; } ;   452     struct pci_ops ;   452     struct msi_controller ;   452     struct pci_bus {   struct list_head node;   struct pci_bus *parent;   struct list_head children;   struct list_head devices;   struct pci_dev *self;   struct list_head slots;   struct resource *resource[4U];   struct list_head resources;   struct resource busn_res;   struct pci_ops *ops;   struct msi_controller *msi;   void *sysdata;   struct proc_dir_entry *procdir;   unsigned char number;   unsigned char primary;   unsigned char max_bus_speed;   unsigned char cur_bus_speed;   char name[48U];   unsigned short bridge_ctl;   pci_bus_flags_t bus_flags;   struct device *bridge;   struct device dev;   struct bin_attribute *legacy_io;   struct bin_attribute *legacy_mem;   unsigned char is_added; } ;   576     struct pci_ops {   int (*add_bus)(struct pci_bus *);   void (*remove_bus)(struct pci_bus *);   void * (*map_bus)(struct pci_bus *, unsigned int, int);   int (*read)(struct pci_bus *, unsigned int, int, int, u32 *);   int (*write)(struct pci_bus *, unsigned int, int, int, u32 ); } ;   606     struct pci_dynids {   spinlock_t lock;   struct list_head list; } ;   620     typedef unsigned int pci_ers_result_t;   630     struct pci_error_handlers {   pci_ers_result_t  (*error_detected)(struct pci_dev *, enum pci_channel_state );   pci_ers_result_t  (*mmio_enabled)(struct pci_dev *);   pci_ers_result_t  (*link_reset)(struct pci_dev *);   pci_ers_result_t  (*slot_reset)(struct pci_dev *);   void (*reset_notify)(struct pci_dev *, bool );   void (*resume)(struct pci_dev *); } ;   663     struct pci_driver {   struct list_head node;   const char *name;   const struct pci_device_id *id_table;   int (*probe)(struct pci_dev *, const struct pci_device_id *);   void (*remove)(struct pci_dev *);   int (*suspend)(struct pci_dev *, pm_message_t );   int (*suspend_late)(struct pci_dev *, pm_message_t );   int (*resume_early)(struct pci_dev *);   int (*resume)(struct pci_dev *);   void (*shutdown)(struct pci_dev *);   int (*sriov_configure)(struct pci_dev *, int);   const struct pci_error_handlers *err_handler;   struct device_driver driver;   struct pci_dynids dynids; } ;   273     struct vm_fault {   unsigned int flags;   gfp_t gfp_mask;   unsigned long pgoff;   void *virtual_address;   struct page *cow_page;   struct page *page;   void *entry; } ;   308     struct fault_env {   struct vm_area_struct *vma;   unsigned long address;   unsigned int flags;   pmd_t *pmd;   pte_t *pte;   spinlock_t *ptl;   pgtable_t prealloc_pte; } ;   335     struct vm_operations_struct {   void (*open)(struct vm_area_struct *);   void (*close)(struct vm_area_struct *);   int (*mremap)(struct vm_area_struct *);   int (*fault)(struct vm_area_struct *, struct vm_fault *);   int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int);   void (*map_pages)(struct fault_env *, unsigned long, unsigned long);   int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*access)(struct vm_area_struct *, unsigned long, void *, int, int);   const char * (*name)(struct vm_area_struct *);   int (*set_policy)(struct vm_area_struct *, struct mempolicy *);   struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long);   struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;  2451     struct scatterlist {   unsigned long sg_magic;   unsigned long page_link;   unsigned int offset;   unsigned int length;   dma_addr_t dma_address;   unsigned int dma_length; } ;    21     struct sg_table {   struct scatterlist *sgl;   unsigned int nents;   unsigned int orig_nents; } ;    89     enum dma_data_direction {   DMA_BIDIRECTIONAL = 0,   DMA_TO_DEVICE = 1,   DMA_FROM_DEVICE = 2,   DMA_NONE = 3 } ;   158     struct dma_map_ops {   void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long);   void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long);   int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long);   int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long);   dma_addr_t  (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long);   void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long);   int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long);   void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long);   void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction );   void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction );   int (*mapping_error)(struct device *, dma_addr_t );   int (*dma_supported)(struct device *, u64 );   int (*set_dma_mask)(struct device *, u64 );   int is_phys; } ;   134     struct pollfd {   int fd;   short events;   short revents; } ;    32     struct poll_table_struct {   void (*_qproc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);   unsigned long _key; } ;    40     typedef struct poll_table_struct poll_table;   103     struct nosy_stats {   __u32 total_packet_count;   __u32 lost_packet_count; } ;    52     struct __anonstruct_buffer_387 {   __le32 control;   __le32 pointer; } ;    52     struct pcl {   __le32 next;   __le32 async_error_next;   u32 user_data;   __le32 pcl_status;   __le32 remaining_transfer_count;   __le32 next_data_buffer;   struct __anonstruct_buffer_387 buffer[13U]; } ;    66     struct packet {   unsigned int length;   char data[0U]; } ;    71     struct packet_buffer {   char *data;   size_t capacity;   long total_packet_count;   long lost_packet_count;   atomic_t size;   struct packet *head;   struct packet *tail;   wait_queue_head_t wait; } ;    80     struct pcilynx {   struct pci_dev *pci_device;   char *registers;   struct pcl *rcv_start_pcl;   struct pcl *rcv_pcl;   __le32 *rcv_buffer;   dma_addr_t rcv_start_pcl_bus;   dma_addr_t rcv_pcl_bus;   dma_addr_t rcv_buffer_bus;   spinlock_t client_list_lock;   struct list_head client_list;   struct miscdevice misc;   struct list_head link;   struct kref kref; } ;   117     struct client {   struct pcilynx *lynx;   u32 tcode_mask;   struct packet_buffer buffer;   struct list_head link; } ;     1     void * __builtin_memcpy(void *, const void *, unsigned long);     1     unsigned long int __builtin_object_size(void *, int);     1     long int __builtin_expect(long exp, long c);   218     void __read_once_size(const volatile void *p, void *res, int size);   243     void __write_once_size(volatile void *p, void *res, int size);    31     void * __memcpy(void *, const void *, size_t );    58     unsigned int readl(const volatile void *addr);    66     void writel(unsigned int val, volatile void *addr);   184     void * ldv_ioremap_nocache_1(resource_size_t ldv_func_arg1, unsigned long ldv_func_arg2);   188     void * ldv_ioremap_nocache_3(resource_size_t ldv_func_arg1, unsigned long ldv_func_arg2);   208     void ldv_iounmap_2(volatile void *addr);   212     void ldv_iounmap_4(volatile void *addr);    33     extern struct module __this_module;   184     void __might_sleep(const char *, int, int);   254     void __might_fault(const char *, int);    71     void warn_slowpath_null(const char *, const int);    16     void __xadd_wrong_size();    24     int atomic_read(const atomic_t *v);    36     void atomic_set(atomic_t *v, int i);    48     void atomic_add(int i, atomic_t *v);    62     void atomic_sub(int i, atomic_t *v);    78     bool  atomic_sub_and_test(int i, atomic_t *v);   154     int atomic_add_return(int i, atomic_t *v);    10     void ldv_error();    26     void * ldv_undef_ptr();     7     int LDV_IO_MEMS = 0;    11     void * ldv_io_mem_remap(void *addr);    23     void ldv_io_mem_unmap(const volatile void *addr);    29     void ldv_check_final_state();    25     void INIT_LIST_HEAD(struct list_head *list);    48     void __list_add(struct list_head *, struct list_head *, struct list_head *);    75     void list_add_tail(struct list_head *new, struct list_head *head);   112     void __list_del_entry(struct list_head *);   143     void list_del_init(struct list_head *entry);   187     int list_empty(const struct list_head *head);   138     void mutex_lock_nested(struct mutex *, unsigned int);   174     void mutex_unlock(struct mutex *);    93     void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);    22     void _raw_spin_lock(raw_spinlock_t *);    31     void _raw_spin_lock_irq(raw_spinlock_t *);    41     void _raw_spin_unlock(raw_spinlock_t *);    43     void _raw_spin_unlock_irq(raw_spinlock_t *);   289     raw_spinlock_t * spinlock_check(spinlock_t *lock);   300     void spin_lock(spinlock_t *lock);   330     void spin_lock_irq(spinlock_t *lock);   345     void spin_unlock(spinlock_t *lock);   355     void spin_unlock_irq(spinlock_t *lock);    72     void __init_waitqueue_head(wait_queue_head_t *, const char *, struct lock_class_key *);   200     void __wake_up(wait_queue_head_t *, unsigned int, int, void *);   990     long int prepare_to_wait_event(wait_queue_head_t *, wait_queue_t *, int);   991     void finish_wait(wait_queue_head_t *, wait_queue_t *);    58     void getnstimeofday64(struct timespec *);    31     void kref_init(struct kref *kref);    40     void kref_get(struct kref *kref);    67     int kref_sub(struct kref *kref, unsigned int count, void (*release)(struct kref *));    96     int kref_put(struct kref *kref, void (*release)(struct kref *));   832     unsigned int iminor(const struct inode *inode);  2819     int nonseekable_open(struct inode *, struct file *);   441     void schedule();   912     void * dev_get_drvdata(const struct device *dev);   917     void dev_set_drvdata(struct device *dev, void *data);  1135     void dev_err(const struct device *, const char *, ...);  1141     void _dev_info(const struct device *, const char *, ...);     5     void kasan_check_read(const void *, unsigned int);   697     unsigned long int _copy_to_user(void *, const void *, unsigned int);   722     void __copy_to_user_overflow();   775     unsigned long int copy_to_user(void *to, const void *from, unsigned long n);   139     int request_threaded_irq(unsigned int, irqreturn_t  (*)(int, void *), irqreturn_t  (*)(int, void *), unsigned long, const char *, void *);   144     int request_irq(unsigned int irq, irqreturn_t  (*handler)(int, void *), unsigned long flags, const char *name, void *dev);   158     void free_irq(unsigned int, void *);    70     int misc_register(struct miscdevice *);    71     void misc_deregister(struct miscdevice *);   154     void kfree(const void *);   318     void * __kmalloc(size_t , gfp_t );   466     void * kmalloc(size_t size, gfp_t flags);   622     void * kzalloc(size_t size, gfp_t flags);   994     int pci_enable_device(struct pci_dev *);  1011     void pci_disable_device(struct pci_dev *);  1014     void pci_set_master(struct pci_dev *);  1606     void * pci_get_drvdata(struct pci_dev *pdev);  1611     void pci_set_drvdata(struct pci_dev *pdev, void *data);    53     void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *);    28     extern struct dma_map_ops *dma_ops;    30     struct dma_map_ops * get_dma_ops(struct device *dev);    42     bool  arch_dma_alloc_attrs(struct device **, gfp_t *);    46     int dma_supported(struct device *, u64 );   404     void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs);   445     void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag);   451     void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle);   499     int dma_set_mask(struct device *dev, u64 mask);    16     void * pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);    31     void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);   113     int pci_set_dma_mask(struct pci_dev *dev, u64 mask);    42     void poll_wait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p);    51     char driver_name[5U] = { 'n', 'o', 's', 'y', '\x0' };    99     struct pcilynx * lynx_get(struct pcilynx *lynx);   107     void lynx_release(struct kref *kref);   113     void lynx_put(struct pcilynx *lynx);   125     struct mutex card_mutex = { { 1 }, { { { { { 0 } }, 3735899821U, 4294967295U, (void *)-1, { 0, { 0, 0 }, "card_mutex.wait_lock", 0, 0UL } } } }, { &(card_mutex.wait_list), &(card_mutex.wait_list) }, 0, (void *)(&card_mutex), { 0, { 0, 0 }, "card_mutex", 0, 0UL } };   126     struct list_head card_list = { &card_list, &card_list };   129     int packet_buffer_init(struct packet_buffer *buffer, size_t capacity);   145     void packet_buffer_destroy(struct packet_buffer *buffer);   151     int packet_buffer_get(struct client *client, char *data, size_t user_length);   195     void packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length);   228     void reg_write(struct pcilynx *lynx, int offset, u32 data);   234     u32  reg_read(struct pcilynx *lynx, int offset);   240     void reg_set_bits(struct pcilynx *lynx, int offset, u32 mask);   250     void run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus, int dmachan);   259     int set_phy_reg(struct pcilynx *lynx, int addr, int val);   278     int nosy_open(struct inode *inode, struct file *file);   316     int nosy_release(struct inode *inode, struct file *file);   333     unsigned int nosy_poll(struct file *file, poll_table *pt);   350     ssize_t  nosy_read(struct file *file, char *buffer, size_t count, loff_t *offset);   358     long int nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg);   403     const struct file_operations nosy_ops = { &__this_module, 0, &nosy_read, 0, 0, 0, 0, 0, &nosy_poll, &nosy_ioctl, 0, 0, &nosy_open, 0, &nosy_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };   415     void packet_irq_handler(struct pcilynx *lynx);   447     void bus_reset_irq_handler(struct pcilynx *lynx);   465     irqreturn_t  irq_handler(int irq, void *device);   505     void remove_card(struct pci_dev *dev);   538     int add_card(struct pci_dev *dev, const struct pci_device_id *unused);   701     const struct pci_device_id __mod_pci__pci_table_device_table[2U] = {  };   735     void ldv_check_return_value(int);   738     void ldv_check_return_value_probe(int);   741     void ldv_initialize();   744     void ldv_handler_precall();   747     int nondet_int();   750     int LDV_IN_INTERRUPT = 0;   753     void ldv_main0_sequence_infinite_withcheck_stateful();           return ;         }        {       755     struct file *var_group1;   756     char *var_nosy_read_15_p1;   757     unsigned long var_nosy_read_15_p2;   758     loff_t *var_nosy_read_15_p3;   759     long res_nosy_read_15;   760     unsigned int var_nosy_ioctl_16_p1;   761     unsigned long var_nosy_ioctl_16_p2;   762     poll_table *var_nosy_poll_14_p1;   763     struct inode *var_group2;   764     int res_nosy_open_12;   765     struct pci_dev *var_group3;   766     const struct pci_device_id *var_add_card_21_p1;   767     int res_add_card_21;   768     int var_irq_handler_19_p0;   769     void *var_irq_handler_19_p1;   770     int ldv_s_nosy_ops_file_operations;   771     int ldv_s_lynx_pci_driver_pci_driver;   772     int tmp;   773     int tmp___0;   895     ldv_s_nosy_ops_file_operations = 0;   898     ldv_s_lynx_pci_driver_pci_driver = 0;   885     LDV_IN_INTERRUPT = 1;   894     ldv_initialize() { /* Function call is skipped due to function is undefined */}   903     goto ldv_35019;   903     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}   907     goto ldv_35018;   904     ldv_35018:;   908     tmp = nondet_int() { /* Function call is skipped due to function is undefined */}   908     switch (tmp);           {         }  540       struct pcilynx *lynx;   541       unsigned int p;   542       unsigned int end;   543       int ret;   544       int i;   545       int tmp;   546       int tmp___0;   547       void *tmp___1;   548       struct lock_class_key __key;   549       void *tmp___2;   550       void *tmp___3;   551       void *tmp___4;   552       void *tmp___5;   553       int tmp___6;             {   115         int tmp;               {   501           struct dma_map_ops *ops;   502           struct dma_map_ops *tmp;   503           int tmp___0;   504           int tmp___1;                 {    32             long tmp;    35             assume(!(tmp != 0L));    35             assume(!(((unsigned long)(dev->archdata.dma_ops)) == ((unsigned long)((struct dma_map_ops *)0))));    38             return dev->archdata.dma_ops;;                 }  501           ops = tmp;   503           unsigned long __CPAchecker_TMP_0 = (unsigned long)(ops->set_dma_mask);   503           assume(__CPAchecker_TMP_0 != ((unsigned long)((int (*)(struct device *, u64 ))0)));   504           assume(!((ops->set_dma_mask) == (&nosy_open)));   504           assume(!((ops->set_dma_mask) == (&nosy_poll)));   504           assume(!((ops->set_dma_mask) == (&nosy_release)));   504           tmp___0 = (*(ops->set_dma_mask))(dev, mask);   504           return tmp___0;;               }  115         return tmp;;             }  549       tmp___0 = pci_enable_device(dev) { /* Function call is skipped due to function is undefined */}   553       pci_set_master(dev) { /* Function call is skipped due to function is undefined */}             {   624         void *tmp;               {   468           void *tmp___2;   483           tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}   483           return tmp___2;;               }  624         return tmp;;             }  555       lynx = (struct pcilynx *)tmp___1;   561       lynx->pci_device = dev;             {               {   919           dev->driver_data = data;   920           return ;;               } 1614         return ;;             }            {   291         return &(lock->__annonCompField29.rlock);;             }  564       __raw_spin_lock_init(&(lynx->client_list_lock.__annonCompField29.rlock), "&(&lynx->client_list_lock)->rlock", &__key) { /* Function call is skipped due to function is undefined */}             {    27         union __anonunion___u_80 __u;    27         __u.__val = list;               {   245           switch (size);   246           assume(!(size == 1));   247           assume(!(size == 2));   248           assume(!(size == 4));   249           assume(size == 8);   249           *((volatile __u64 *)p) = *((__u64 *)res);   249           goto ldv_900;   256           return ;;               }   28         list->prev = list;    29         return ;;             }            {               {    38           union __anonunion___u_49 __u;    38           __u.__val = i;                 {   245             switch (size);   246             assume(!(size == 1));   247             assume(!(size == 2));   248             assume(size == 4);   248             *((volatile __u32 *)p) = *((__u32 *)res);   248             goto ldv_900;   256             return ;;                 }   40           return ;;               }   34         return ;;             }            {    47         void *tmp;               {             }   13           void *ptr;    14           void *tmp;    14           tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */}    14           ptr = tmp;               }  568       lynx->registers = (char *)tmp___2;             {    18         void *tmp;    19         struct device *__CPAchecker_TMP_0;    19         assume(!(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0))));    19         __CPAchecker_TMP_0 = (struct device *)0;               {   448           void *tmp;                 {   407             struct dma_map_ops *ops;   408             struct dma_map_ops *tmp;   409             void *cpu_addr;   410             long tmp___0;   411             _Bool tmp___1;   412             int tmp___2;                   {    32               long tmp;    35               assume(tmp != 0L);    36               return dma_ops;;                   }  408             ops = tmp;   411             assume(!(tmp___0 != 0L));   416             tmp___1 = arch_dma_alloc_attrs(&dev, &flag) { /* Function call is skipped due to function is undefined */}   416             assume(!(tmp___1 == 0));   416             tmp___2 = 0;   416             assume(tmp___2 == 0);   418             unsigned long __CPAchecker_TMP_0 = (unsigned long)(ops->alloc);   418             assume(!(__CPAchecker_TMP_0 == ((unsigned long)((void * (*)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long))0))));   421             cpu_addr = (*(ops->alloc))(dev, size, dma_handle, flag, attrs);   422             debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr) { /* Function call is skipped due to function is undefined */}   423             return cpu_addr;;                 }  448           return tmp;;               }   19         return tmp;;             }  571       lynx->rcv_start_pcl = (struct pcl *)tmp___3;             {    18         void *tmp;    19         struct device *__CPAchecker_TMP_0;    19         assume(!(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0))));    19         __CPAchecker_TMP_0 = (struct device *)0;               {   448           void *tmp;                 {   407             struct dma_map_ops *ops;   408             struct dma_map_ops *tmp;   409             void *cpu_addr;   410             long tmp___0;   411             _Bool tmp___1;   412             int tmp___2;                   {    32               long tmp;    35               assume(tmp != 0L);    36               return dma_ops;;                   }  408             ops = tmp;   411             assume(!(tmp___0 != 0L));   416             tmp___1 = arch_dma_alloc_attrs(&dev, &flag) { /* Function call is skipped due to function is undefined */}   416             assume(!(tmp___1 == 0));   416             tmp___2 = 0;   416             assume(tmp___2 == 0);   418             unsigned long __CPAchecker_TMP_0 = (unsigned long)(ops->alloc);   418             assume(!(__CPAchecker_TMP_0 == ((unsigned long)((void * (*)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long))0))));   421             cpu_addr = (*(ops->alloc))(dev, size, dma_handle, flag, attrs);   422             debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr) { /* Function call is skipped due to function is undefined */}   423             return cpu_addr;;                 }  448           return tmp;;               }   19         return tmp;;             }  573       lynx->rcv_pcl = (struct pcl *)tmp___4;             {    18         void *tmp;    19         struct device *__CPAchecker_TMP_0;    19         assume(!(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0))));    19         __CPAchecker_TMP_0 = (struct device *)0;               {   448           void *tmp;                 {   407             struct dma_map_ops *ops;   408             struct dma_map_ops *tmp;   409             void *cpu_addr;   410             long tmp___0;   411             _Bool tmp___1;   412             int tmp___2;                   {    32               long tmp;    35               assume(tmp != 0L);    36               return dma_ops;;                   }  408             ops = tmp;   411             assume(!(tmp___0 != 0L));   416             tmp___1 = arch_dma_alloc_attrs(&dev, &flag) { /* Function call is skipped due to function is undefined */}   416             assume(!(tmp___1 == 0));   416             tmp___2 = 0;   416             assume(tmp___2 == 0);   418             unsigned long __CPAchecker_TMP_0 = (unsigned long)(ops->alloc);   418             assume(!(__CPAchecker_TMP_0 == ((unsigned long)((void * (*)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long))0))));   421             cpu_addr = (*(ops->alloc))(dev, size, dma_handle, flag, attrs);   422             debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr) { /* Function call is skipped due to function is undefined */}   423             return cpu_addr;;                 }  448           return tmp;;               }   19         return tmp;;             }  575       lynx->rcv_buffer = (__le32 *)tmp___5;   577       unsigned long __CPAchecker_TMP_0 = (unsigned long)(lynx->rcv_start_pcl);   580       dev_err((const struct device *)(&(dev->dev)), "Failed to allocate receive buffer\n") { /* Function call is skipped due to function is undefined */}   581       ret = -12;   582       goto fail_deallocate;   673       unsigned long __CPAchecker_TMP_7 = (unsigned long)(lynx->rcv_start_pcl);   676       unsigned long __CPAchecker_TMP_9 = (unsigned long)(lynx->rcv_pcl);   679       unsigned long __CPAchecker_TMP_11 = (unsigned long)(lynx->rcv_buffer);   682       volatile void *__CPAchecker_TMP_13 = (volatile void *)(lynx->registers);             {           }}  |              Source code         
     1 #ifndef _ASM_X86_ATOMIC_H
    2 #define _ASM_X86_ATOMIC_H
    3 
    4 #include <linux/compiler.h>
    5 #include <linux/types.h>
    6 #include <asm/alternative.h>
    7 #include <asm/cmpxchg.h>
    8 #include <asm/rmwcc.h>
    9 #include <asm/barrier.h>
   10 
   11 /*
   12  * Atomic operations that C can't guarantee us.  Useful for
   13  * resource counting etc..
   14  */
   15 
   16 #define ATOMIC_INIT(i)	{ (i) }
   17 
   18 /**
   19  * atomic_read - read atomic variable
   20  * @v: pointer of type atomic_t
   21  *
   22  * Atomically reads the value of @v.
   23  */
   24 static __always_inline int atomic_read(const atomic_t *v)
   25 {
   26 	return READ_ONCE((v)->counter);
   27 }
   28 
   29 /**
   30  * atomic_set - set atomic variable
   31  * @v: pointer of type atomic_t
   32  * @i: required value
   33  *
   34  * Atomically sets the value of @v to @i.
   35  */
   36 static __always_inline void atomic_set(atomic_t *v, int i)
   37 {
   38 	WRITE_ONCE(v->counter, i);
   39 }
   40 
   41 /**
   42  * atomic_add - add integer to atomic variable
   43  * @i: integer value to add
   44  * @v: pointer of type atomic_t
   45  *
   46  * Atomically adds @i to @v.
   47  */
   48 static __always_inline void atomic_add(int i, atomic_t *v)
   49 {
   50 	asm volatile(LOCK_PREFIX "addl %1,%0"
   51 		     : "+m" (v->counter)
   52 		     : "ir" (i));
   53 }
   54 
   55 /**
   56  * atomic_sub - subtract integer from atomic variable
   57  * @i: integer value to subtract
   58  * @v: pointer of type atomic_t
   59  *
   60  * Atomically subtracts @i from @v.
   61  */
   62 static __always_inline void atomic_sub(int i, atomic_t *v)
   63 {
   64 	asm volatile(LOCK_PREFIX "subl %1,%0"
   65 		     : "+m" (v->counter)
   66 		     : "ir" (i));
   67 }
   68 
   69 /**
   70  * atomic_sub_and_test - subtract value from variable and test result
   71  * @i: integer value to subtract
   72  * @v: pointer of type atomic_t
   73  *
   74  * Atomically subtracts @i from @v and returns
   75  * true if the result is zero, or false for all
   76  * other cases.
   77  */
   78 static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
   79 {
   80 	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
   81 }
   82 
   83 /**
   84  * atomic_inc - increment atomic variable
   85  * @v: pointer of type atomic_t
   86  *
   87  * Atomically increments @v by 1.
   88  */
   89 static __always_inline void atomic_inc(atomic_t *v)
   90 {
   91 	asm volatile(LOCK_PREFIX "incl %0"
   92 		     : "+m" (v->counter));
   93 }
   94 
   95 /**
   96  * atomic_dec - decrement atomic variable
   97  * @v: pointer of type atomic_t
   98  *
   99  * Atomically decrements @v by 1.
  100  */
  101 static __always_inline void atomic_dec(atomic_t *v)
  102 {
  103 	asm volatile(LOCK_PREFIX "decl %0"
  104 		     : "+m" (v->counter));
  105 }
  106 
  107 /**
  108  * atomic_dec_and_test - decrement and test
  109  * @v: pointer of type atomic_t
  110  *
  111  * Atomically decrements @v by 1 and
  112  * returns true if the result is 0, or false for all other
  113  * cases.
  114  */
  115 static __always_inline bool atomic_dec_and_test(atomic_t *v)
  116 {
  117 	GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
  118 }
  119 
  120 /**
  121  * atomic_inc_and_test - increment and test
  122  * @v: pointer of type atomic_t
  123  *
  124  * Atomically increments @v by 1
  125  * and returns true if the result is zero, or false for all
  126  * other cases.
  127  */
  128 static __always_inline bool atomic_inc_and_test(atomic_t *v)
  129 {
  130 	GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
  131 }
  132 
  133 /**
  134  * atomic_add_negative - add and test if negative
  135  * @i: integer value to add
  136  * @v: pointer of type atomic_t
  137  *
  138  * Atomically adds @i to @v and returns true
  139  * if the result is negative, or false when
  140  * result is greater than or equal to zero.
  141  */
  142 static __always_inline bool atomic_add_negative(int i, atomic_t *v)
  143 {
  144 	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
  145 }
  146 
  147 /**
  148  * atomic_add_return - add integer and return
  149  * @i: integer value to add
  150  * @v: pointer of type atomic_t
  151  *
  152  * Atomically adds @i to @v and returns @i + @v
  153  */
  154 static __always_inline int atomic_add_return(int i, atomic_t *v)
  155 {
  156 	return i + xadd(&v->counter, i);
  157 }
  158 
  159 /**
  160  * atomic_sub_return - subtract integer and return
  161  * @v: pointer of type atomic_t
  162  * @i: integer value to subtract
  163  *
  164  * Atomically subtracts @i from @v and returns @v - @i
  165  */
  166 static __always_inline int atomic_sub_return(int i, atomic_t *v)
  167 {
  168 	return atomic_add_return(-i, v);
  169 }
  170 
  171 #define atomic_inc_return(v)  (atomic_add_return(1, v))
  172 #define atomic_dec_return(v)  (atomic_sub_return(1, v))
  173 
  174 static __always_inline int atomic_fetch_add(int i, atomic_t *v)
  175 {
  176 	return xadd(&v->counter, i);
  177 }
  178 
  179 static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
  180 {
  181 	return xadd(&v->counter, -i);
  182 }
  183 
  184 static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  185 {
  186 	return cmpxchg(&v->counter, old, new);
  187 }
  188 
  189 static inline int atomic_xchg(atomic_t *v, int new)
  190 {
  191 	return xchg(&v->counter, new);
  192 }
  193 
  194 #define ATOMIC_OP(op)							\
  195 static inline void atomic_##op(int i, atomic_t *v)			\
  196 {									\
  197 	asm volatile(LOCK_PREFIX #op"l %1,%0"				\
  198 			: "+m" (v->counter)				\
  199 			: "ir" (i)					\
  200 			: "memory");					\
  201 }
  202 
  203 #define ATOMIC_FETCH_OP(op, c_op)					\
  204 static inline int atomic_fetch_##op(int i, atomic_t *v)		\
  205 {									\
  206 	int old, val = atomic_read(v);					\
  207 	for (;;) {							\
  208 		old = atomic_cmpxchg(v, val, val c_op i);		\
  209 		if (old == val)						\
  210 			break;						\
  211 		val = old;						\
  212 	}								\
  213 	return old;							\
  214 }
  215 
  216 #define ATOMIC_OPS(op, c_op)						\
  217 	ATOMIC_OP(op)							\
  218 	ATOMIC_FETCH_OP(op, c_op)
  219 
  220 ATOMIC_OPS(and, &)
  221 ATOMIC_OPS(or , |)
  222 ATOMIC_OPS(xor, ^)
  223 
  224 #undef ATOMIC_OPS
  225 #undef ATOMIC_FETCH_OP
  226 #undef ATOMIC_OP
  227 
  228 /**
  229  * __atomic_add_unless - add unless the number is already a given value
  230  * @v: pointer of type atomic_t
  231  * @a: the amount to add to v...
  232  * @u: ...unless v is equal to u.
  233  *
  234  * Atomically adds @a to @v, so long as @v was not already @u.
  235  * Returns the old value of @v.
  236  */
  237 static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
  238 {
  239 	int c, old;
  240 	c = atomic_read(v);
  241 	for (;;) {
  242 		if (unlikely(c == (u)))
  243 			break;
  244 		old = atomic_cmpxchg((v), c, c + (a));
  245 		if (likely(old == c))
  246 			break;
  247 		c = old;
  248 	}
  249 	return c;
  250 }
  251 
  252 /**
  253  * atomic_inc_short - increment of a short integer
  254  * @v: pointer to type int
  255  *
  256  * Atomically adds 1 to @v
  257  * Returns the new value of @u
  258  */
  259 static __always_inline short int atomic_inc_short(short int *v)
  260 {
  261 	asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
  262 	return *v;
  263 }
  264 
  265 #ifdef CONFIG_X86_32
  266 # include <asm/atomic64_32.h>
  267 #else
  268 # include <asm/atomic64_64.h>
  269 #endif
  270 
  271 #endif /* _ASM_X86_ATOMIC_H */                 1 #ifndef _ASM_X86_DMA_MAPPING_H
    2 #define _ASM_X86_DMA_MAPPING_H
    3 
    4 /*
    5  * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
    6  * Documentation/DMA-API.txt for documentation.
    7  */
    8 
    9 #include <linux/kmemcheck.h>
   10 #include <linux/scatterlist.h>
   11 #include <linux/dma-debug.h>
   12 #include <asm/io.h>
   13 #include <asm/swiotlb.h>
   14 #include <linux/dma-contiguous.h>
   15 
   16 #ifdef CONFIG_ISA
   17 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
   18 #else
   19 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
   20 #endif
   21 
   22 #define DMA_ERROR_CODE	0
   23 
   24 extern int iommu_merge;
   25 extern struct device x86_dma_fallback_dev;
   26 extern int panic_on_overflow;
   27 
   28 extern struct dma_map_ops *dma_ops;
   29 
   30 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
   31 {
   32 #ifndef CONFIG_X86_DEV_DMA_OPS
   33 	return dma_ops;
   34 #else
   35 	if (unlikely(!dev) || !dev->archdata.dma_ops)
   36 		return dma_ops;
   37 	else
   38 		return dev->archdata.dma_ops;
   39 #endif
   40 }
   41 
   42 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
   43 #define arch_dma_alloc_attrs arch_dma_alloc_attrs
   44 
   45 #define HAVE_ARCH_DMA_SUPPORTED 1
   46 extern int dma_supported(struct device *hwdev, u64 mask);
   47 
   48 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
   49 					dma_addr_t *dma_addr, gfp_t flag,
   50 					unsigned long attrs);
   51 
   52 extern void dma_generic_free_coherent(struct device *dev, size_t size,
   53 				      void *vaddr, dma_addr_t dma_addr,
   54 				      unsigned long attrs);
   55 
   56 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
   57 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
   58 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
   59 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
   60 #else
   61 
   62 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
   63 {
   64 	if (!dev->dma_mask)
   65 		return 0;
   66 
   67 	return addr + size - 1 <= *dev->dma_mask;
   68 }
   69 
   70 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
   71 {
   72 	return paddr;
   73 }
   74 
   75 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
   76 {
   77 	return daddr;
   78 }
   79 #endif /* CONFIG_X86_DMA_REMAP */
   80 
   81 static inline void
   82 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
   83 	enum dma_data_direction dir)
   84 {
   85 	flush_write_buffers();
   86 }
   87 
   88 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
   89 						    gfp_t gfp)
   90 {
   91 	unsigned long dma_mask = 0;
   92 
   93 	dma_mask = dev->coherent_dma_mask;
   94 	if (!dma_mask)
   95 		dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
   96 
   97 	return dma_mask;
   98 }
   99 
  100 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
  101 {
  102 	unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
  103 
  104 	if (dma_mask <= DMA_BIT_MASK(24))
  105 		gfp |= GFP_DMA;
  106 #ifdef CONFIG_X86_64
  107 	if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
  108 		gfp |= GFP_DMA32;
  109 #endif
  110        return gfp;
  111 }
  112 
  113 #endif                 1 
    2 /*
    3  * nosy - Snoop mode driver for TI PCILynx 1394 controllers
    4  * Copyright (C) 2002-2007 Kristian Høgsberg
    5  *
    6  * This program is free software; you can redistribute it and/or modify
    7  * it under the terms of the GNU General Public License as published by
    8  * the Free Software Foundation; either version 2 of the License, or
    9  * (at your option) any later version.
   10  *
   11  * This program is distributed in the hope that it will be useful,
   12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   14  * GNU General Public License for more details.
   15  *
   16  * You should have received a copy of the GNU General Public License
   17  * along with this program; if not, write to the Free Software Foundation,
   18  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
   19  */
   20 
   21 #include <linux/device.h>
   22 #include <linux/errno.h>
   23 #include <linux/fs.h>
   24 #include <linux/init.h>
   25 #include <linux/interrupt.h>
   26 #include <linux/io.h>
   27 #include <linux/kernel.h>
   28 #include <linux/kref.h>
   29 #include <linux/miscdevice.h>
   30 #include <linux/module.h>
   31 #include <linux/mutex.h>
   32 #include <linux/pci.h>
   33 #include <linux/poll.h>
   34 #include <linux/sched.h> /* required for linux/wait.h */
   35 #include <linux/slab.h>
   36 #include <linux/spinlock.h>
   37 #include <linux/time64.h>
   38 #include <linux/timex.h>
   39 #include <linux/uaccess.h>
   40 #include <linux/wait.h>
   41 #include <linux/dma-mapping.h>
   42 #include <linux/atomic.h>
   43 #include <asm/byteorder.h>
   44 
   45 #include "nosy.h"
   46 #include "nosy-user.h"
   47 
   48 #define TCODE_PHY_PACKET		0x10
   49 #define PCI_DEVICE_ID_TI_PCILYNX	0x8000
   50 
   51 static char driver_name[] = KBUILD_MODNAME;
   52 
   53 /* this is the physical layout of a PCL, its size is 128 bytes */
   54 struct pcl {
   55 	__le32 next;
   56 	__le32 async_error_next;
   57 	u32 user_data;
   58 	__le32 pcl_status;
   59 	__le32 remaining_transfer_count;
   60 	__le32 next_data_buffer;
   61 	struct {
   62 		__le32 control;
   63 		__le32 pointer;
   64 	} buffer[13];
   65 };
   66 
   67 struct packet {
   68 	unsigned int length;
   69 	char data[0];
   70 };
   71 
   72 struct packet_buffer {
   73 	char *data;
   74 	size_t capacity;
   75 	long total_packet_count, lost_packet_count;
   76 	atomic_t size;
   77 	struct packet *head, *tail;
   78 	wait_queue_head_t wait;
   79 };
   80 
   81 struct pcilynx {
   82 	struct pci_dev *pci_device;
   83 	__iomem char *registers;
   84 
   85 	struct pcl *rcv_start_pcl, *rcv_pcl;
   86 	__le32 *rcv_buffer;
   87 
   88 	dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus;
   89 
   90 	spinlock_t client_list_lock;
   91 	struct list_head client_list;
   92 
   93 	struct miscdevice misc;
   94 	struct list_head link;
   95 	struct kref kref;
   96 };
   97 
   98 static inline struct pcilynx *
   99 lynx_get(struct pcilynx *lynx)
  100 {
  101 	kref_get(&lynx->kref);
  102 
  103 	return lynx;
  104 }
  105 
  106 static void
  107 lynx_release(struct kref *kref)
  108 {
  109 	kfree(container_of(kref, struct pcilynx, kref));
  110 }
  111 
  112 static inline void
  113 lynx_put(struct pcilynx *lynx)
  114 {
  115 	kref_put(&lynx->kref, lynx_release);
  116 }
  117 
  118 struct client {
  119 	struct pcilynx *lynx;
  120 	u32 tcode_mask;
  121 	struct packet_buffer buffer;
  122 	struct list_head link;
  123 };
  124 
  125 static DEFINE_MUTEX(card_mutex);
  126 static LIST_HEAD(card_list);
  127 
  128 static int
  129 packet_buffer_init(struct packet_buffer *buffer, size_t capacity)
  130 {
  131 	buffer->data = kmalloc(capacity, GFP_KERNEL);
  132 	if (buffer->data == NULL)
  133 		return -ENOMEM;
  134 	buffer->head = (struct packet *) buffer->data;
  135 	buffer->tail = (struct packet *) buffer->data;
  136 	buffer->capacity = capacity;
  137 	buffer->lost_packet_count = 0;
  138 	atomic_set(&buffer->size, 0);
  139 	init_waitqueue_head(&buffer->wait);
  140 
  141 	return 0;
  142 }
  143 
  144 static void
  145 packet_buffer_destroy(struct packet_buffer *buffer)
  146 {
  147 	kfree(buffer->data);
  148 }
  149 
  150 static int
  151 packet_buffer_get(struct client *client, char __user *data, size_t user_length)
  152 {
  153 	struct packet_buffer *buffer = &client->buffer;
  154 	size_t length;
  155 	char *end;
  156 
  157 	if (wait_event_interruptible(buffer->wait,
  158 				     atomic_read(&buffer->size) > 0) ||
  159 				     list_empty(&client->lynx->link))
  160 		return -ERESTARTSYS;
  161 
  162 	if (atomic_read(&buffer->size) == 0)
  163 		return -ENODEV;
  164 
  165 	/* FIXME: Check length <= user_length. */
  166 
  167 	end = buffer->data + buffer->capacity;
  168 	length = buffer->head->length;
  169 
  170 	if (&buffer->head->data[length] < end) {
  171 		if (copy_to_user(data, buffer->head->data, length))
  172 			return -EFAULT;
  173 		buffer->head = (struct packet *) &buffer->head->data[length];
  174 	} else {
  175 		size_t split = end - buffer->head->data;
  176 
  177 		if (copy_to_user(data, buffer->head->data, split))
  178 			return -EFAULT;
  179 		if (copy_to_user(data + split, buffer->data, length - split))
  180 			return -EFAULT;
  181 		buffer->head = (struct packet *) &buffer->data[length - split];
  182 	}
  183 
  184 	/*
  185 	 * Decrease buffer->size as the last thing, since this is what
  186 	 * keeps the interrupt from overwriting the packet we are
  187 	 * retrieving from the buffer.
  188 	 */
  189 	atomic_sub(sizeof(struct packet) + length, &buffer->size);
  190 
  191 	return length;
  192 }
  193 
  194 static void
  195 packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length)
  196 {
  197 	char *end;
  198 
  199 	buffer->total_packet_count++;
  200 
  201 	if (buffer->capacity <
  202 	    atomic_read(&buffer->size) + sizeof(struct packet) + length) {
  203 		buffer->lost_packet_count++;
  204 		return;
  205 	}
  206 
  207 	end = buffer->data + buffer->capacity;
  208 	buffer->tail->length = length;
  209 
  210 	if (&buffer->tail->data[length] < end) {
  211 		memcpy(buffer->tail->data, data, length);
  212 		buffer->tail = (struct packet *) &buffer->tail->data[length];
  213 	} else {
  214 		size_t split = end - buffer->tail->data;
  215 
  216 		memcpy(buffer->tail->data, data, split);
  217 		memcpy(buffer->data, data + split, length - split);
  218 		buffer->tail = (struct packet *) &buffer->data[length - split];
  219 	}
  220 
  221 	/* Finally, adjust buffer size and wake up userspace reader. */
  222 
  223 	atomic_add(sizeof(struct packet) + length, &buffer->size);
  224 	wake_up_interruptible(&buffer->wait);
  225 }
  226 
  227 static inline void
  228 reg_write(struct pcilynx *lynx, int offset, u32 data)
  229 {
  230 	writel(data, lynx->registers + offset);
  231 }
  232 
  233 static inline u32
  234 reg_read(struct pcilynx *lynx, int offset)
  235 {
  236 	return readl(lynx->registers + offset);
  237 }
  238 
  239 static inline void
  240 reg_set_bits(struct pcilynx *lynx, int offset, u32 mask)
  241 {
  242 	reg_write(lynx, offset, (reg_read(lynx, offset) | mask));
  243 }
  244 
  245 /*
  246  * Maybe the pcl programs could be set up to just append data instead
  247  * of using a whole packet.
  248  */
  249 static inline void
  250 run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus,
  251 			   int dmachan)
  252 {
  253 	reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus);
  254 	reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20,
  255 		  DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK);
  256 }
  257 
  258 static int
  259 set_phy_reg(struct pcilynx *lynx, int addr, int val)
  260 {
  261 	if (addr > 15) {
  262 		dev_err(&lynx->pci_device->dev,
  263 			"PHY register address %d out of range\n", addr);
  264 		return -1;
  265 	}
  266 	if (val > 0xff) {
  267 		dev_err(&lynx->pci_device->dev,
  268 			"PHY register value %d out of range\n", val);
  269 		return -1;
  270 	}
  271 	reg_write(lynx, LINK_PHY, LINK_PHY_WRITE |
  272 		  LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val));
  273 
  274 	return 0;
  275 }
  276 
  277 static int
  278 nosy_open(struct inode *inode, struct file *file)
  279 {
  280 	int minor = iminor(inode);
  281 	struct client *client;
  282 	struct pcilynx *tmp, *lynx = NULL;
  283 
  284 	mutex_lock(&card_mutex);
  285 	list_for_each_entry(tmp, &card_list, link)
  286 		if (tmp->misc.minor == minor) {
  287 			lynx = lynx_get(tmp);
  288 			break;
  289 		}
  290 	mutex_unlock(&card_mutex);
  291 	if (lynx == NULL)
  292 		return -ENODEV;
  293 
  294 	client = kmalloc(sizeof *client, GFP_KERNEL);
  295 	if (client == NULL)
  296 		goto fail;
  297 
  298 	client->tcode_mask = ~0;
  299 	client->lynx = lynx;
  300 	INIT_LIST_HEAD(&client->link);
  301 
  302 	if (packet_buffer_init(&client->buffer, 128 * 1024) < 0)
  303 		goto fail;
  304 
  305 	file->private_data = client;
  306 
  307 	return nonseekable_open(inode, file);
  308 fail:
  309 	kfree(client);
  310 	lynx_put(lynx);
  311 
  312 	return -ENOMEM;
  313 }
  314 
  315 static int
  316 nosy_release(struct inode *inode, struct file *file)
  317 {
  318 	struct client *client = file->private_data;
  319 	struct pcilynx *lynx = client->lynx;
  320 
  321 	spin_lock_irq(&lynx->client_list_lock);
  322 	list_del_init(&client->link);
  323 	spin_unlock_irq(&lynx->client_list_lock);
  324 
  325 	packet_buffer_destroy(&client->buffer);
  326 	kfree(client);
  327 	lynx_put(lynx);
  328 
  329 	return 0;
  330 }
  331 
  332 static unsigned int
  333 nosy_poll(struct file *file, poll_table *pt)
  334 {
  335 	struct client *client = file->private_data;
  336 	unsigned int ret = 0;
  337 
  338 	poll_wait(file, &client->buffer.wait, pt);
  339 
  340 	if (atomic_read(&client->buffer.size) > 0)
  341 		ret = POLLIN | POLLRDNORM;
  342 
  343 	if (list_empty(&client->lynx->link))
  344 		ret |= POLLHUP;
  345 
  346 	return ret;
  347 }
  348 
  349 static ssize_t
  350 nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset)
  351 {
  352 	struct client *client = file->private_data;
  353 
  354 	return packet_buffer_get(client, buffer, count);
  355 }
  356 
  357 static long
  358 nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  359 {
  360 	struct client *client = file->private_data;
  361 	spinlock_t *client_list_lock = &client->lynx->client_list_lock;
  362 	struct nosy_stats stats;
  363 
  364 	switch (cmd) {
  365 	case NOSY_IOC_GET_STATS:
  366 		spin_lock_irq(client_list_lock);
  367 		stats.total_packet_count = client->buffer.total_packet_count;
  368 		stats.lost_packet_count  = client->buffer.lost_packet_count;
  369 		spin_unlock_irq(client_list_lock);
  370 
  371 		if (copy_to_user((void __user *) arg, &stats, sizeof stats))
  372 			return -EFAULT;
  373 		else
  374 			return 0;
  375 
  376 	case NOSY_IOC_START:
  377 		spin_lock_irq(client_list_lock);
  378 		list_add_tail(&client->link, &client->lynx->client_list);
  379 		spin_unlock_irq(client_list_lock);
  380 
  381 		return 0;
  382 
  383 	case NOSY_IOC_STOP:
  384 		spin_lock_irq(client_list_lock);
  385 		list_del_init(&client->link);
  386 		spin_unlock_irq(client_list_lock);
  387 
  388 		return 0;
  389 
  390 	case NOSY_IOC_FILTER:
  391 		spin_lock_irq(client_list_lock);
  392 		client->tcode_mask = arg;
  393 		spin_unlock_irq(client_list_lock);
  394 
  395 		return 0;
  396 
  397 	default:
  398 		return -EINVAL;
  399 		/* Flush buffer, configure filter. */
  400 	}
  401 }
  402 
  403 static const struct file_operations nosy_ops = {
  404 	.owner =		THIS_MODULE,
  405 	.read =			nosy_read,
  406 	.unlocked_ioctl =	nosy_ioctl,
  407 	.poll =			nosy_poll,
  408 	.open =			nosy_open,
  409 	.release =		nosy_release,
  410 };
  411 
  412 #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
  413 
  414 static void
  415 packet_irq_handler(struct pcilynx *lynx)
  416 {
  417 	struct client *client;
  418 	u32 tcode_mask, tcode, timestamp;
  419 	size_t length;
  420 	struct timespec64 ts64;
  421 
  422 	/* FIXME: Also report rcv_speed. */
  423 
  424 	length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff;
  425 	tcode  = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf;
  426 
  427 	ktime_get_real_ts64(&ts64);
  428 	timestamp = ts64.tv_nsec / NSEC_PER_USEC;
  429 	lynx->rcv_buffer[0] = (__force __le32)timestamp;
  430 
  431 	if (length == PHY_PACKET_SIZE)
  432 		tcode_mask = 1 << TCODE_PHY_PACKET;
  433 	else
  434 		tcode_mask = 1 << tcode;
  435 
  436 	spin_lock(&lynx->client_list_lock);
  437 
  438 	list_for_each_entry(client, &lynx->client_list, link)
  439 		if (client->tcode_mask & tcode_mask)
  440 			packet_buffer_put(&client->buffer,
  441 					  lynx->rcv_buffer, length + 4);
  442 
  443 	spin_unlock(&lynx->client_list_lock);
  444 }
  445 
  446 static void
  447 bus_reset_irq_handler(struct pcilynx *lynx)
  448 {
  449 	struct client *client;
  450 	struct timespec64 ts64;
  451 	u32    timestamp;
  452 
  453 	ktime_get_real_ts64(&ts64);
  454 	timestamp = ts64.tv_nsec / NSEC_PER_USEC;
  455 
  456 	spin_lock(&lynx->client_list_lock);
  457 
  458 	list_for_each_entry(client, &lynx->client_list, link)
  459 		packet_buffer_put(&client->buffer, ×tamp, 4);
  460 
  461 	spin_unlock(&lynx->client_list_lock);
  462 }
  463 
  464 static irqreturn_t
  465 irq_handler(int irq, void *device)
  466 {
  467 	struct pcilynx *lynx = device;
  468 	u32 pci_int_status;
  469 
  470 	pci_int_status = reg_read(lynx, PCI_INT_STATUS);
  471 
  472 	if (pci_int_status == ~0)
  473 		/* Card was ejected. */
  474 		return IRQ_NONE;
  475 
  476 	if ((pci_int_status & PCI_INT_INT_PEND) == 0)
  477 		/* Not our interrupt, bail out quickly. */
  478 		return IRQ_NONE;
  479 
  480 	if ((pci_int_status & PCI_INT_P1394_INT) != 0) {
  481 		u32 link_int_status;
  482 
  483 		link_int_status = reg_read(lynx, LINK_INT_STATUS);
  484 		reg_write(lynx, LINK_INT_STATUS, link_int_status);
  485 
  486 		if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0)
  487 			bus_reset_irq_handler(lynx);
  488 	}
  489 
  490 	/* Clear the PCI_INT_STATUS register only after clearing the
  491 	 * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will
  492 	 * be set again immediately. */
  493 
  494 	reg_write(lynx, PCI_INT_STATUS, pci_int_status);
  495 
  496 	if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) {
  497 		packet_irq_handler(lynx);
  498 		run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
  499 	}
  500 
  501 	return IRQ_HANDLED;
  502 }
  503 
  504 static void
  505 remove_card(struct pci_dev *dev)
  506 {
  507 	struct pcilynx *lynx = pci_get_drvdata(dev);
  508 	struct client *client;
  509 
  510 	mutex_lock(&card_mutex);
  511 	list_del_init(&lynx->link);
  512 	misc_deregister(&lynx->misc);
  513 	mutex_unlock(&card_mutex);
  514 
  515 	reg_write(lynx, PCI_INT_ENABLE, 0);
  516 	free_irq(lynx->pci_device->irq, lynx);
  517 
  518 	spin_lock_irq(&lynx->client_list_lock);
  519 	list_for_each_entry(client, &lynx->client_list, link)
  520 		wake_up_interruptible(&client->buffer.wait);
  521 	spin_unlock_irq(&lynx->client_list_lock);
  522 
  523 	pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
  524 			    lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
  525 	pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
  526 			    lynx->rcv_pcl, lynx->rcv_pcl_bus);
  527 	pci_free_consistent(lynx->pci_device, PAGE_SIZE,
  528 			    lynx->rcv_buffer, lynx->rcv_buffer_bus);
  529 
  530 	iounmap(lynx->registers);
  531 	pci_disable_device(dev);
  532 	lynx_put(lynx);
  533 }
  534 
  535 #define RCV_BUFFER_SIZE (16 * 1024)
  536 
  537 static int
  538 add_card(struct pci_dev *dev, const struct pci_device_id *unused)
  539 {
  540 	struct pcilynx *lynx;
  541 	u32 p, end;
  542 	int ret, i;
  543 
  544 	if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
  545 		dev_err(&dev->dev,
  546 		    "DMA address limits not supported for PCILynx hardware\n");
  547 		return -ENXIO;
  548 	}
  549 	if (pci_enable_device(dev)) {
  550 		dev_err(&dev->dev, "Failed to enable PCILynx hardware\n");
  551 		return -ENXIO;
  552 	}
  553 	pci_set_master(dev);
  554 
  555 	lynx = kzalloc(sizeof *lynx, GFP_KERNEL);
  556 	if (lynx == NULL) {
  557 		dev_err(&dev->dev, "Failed to allocate control structure\n");
  558 		ret = -ENOMEM;
  559 		goto fail_disable;
  560 	}
  561 	lynx->pci_device = dev;
  562 	pci_set_drvdata(dev, lynx);
  563 
  564 	spin_lock_init(&lynx->client_list_lock);
  565 	INIT_LIST_HEAD(&lynx->client_list);
  566 	kref_init(&lynx->kref);
  567 
  568 	lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
  569 					  PCILYNX_MAX_REGISTER);
  570 
  571 	lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
  572 				sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
  573 	lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device,
  574 				sizeof(struct pcl), &lynx->rcv_pcl_bus);
  575 	lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device,
  576 				RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus);
  577 	if (lynx->rcv_start_pcl == NULL ||
  578 	    lynx->rcv_pcl == NULL ||
  579 	    lynx->rcv_buffer == NULL) {
  580 		dev_err(&dev->dev, "Failed to allocate receive buffer\n");
  581 		ret = -ENOMEM;
  582 		goto fail_deallocate;
  583 	}
  584 	lynx->rcv_start_pcl->next	= cpu_to_le32(lynx->rcv_pcl_bus);
  585 	lynx->rcv_pcl->next		= cpu_to_le32(PCL_NEXT_INVALID);
  586 	lynx->rcv_pcl->async_error_next	= cpu_to_le32(PCL_NEXT_INVALID);
  587 
  588 	lynx->rcv_pcl->buffer[0].control =
  589 			cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044);
  590 	lynx->rcv_pcl->buffer[0].pointer =
  591 			cpu_to_le32(lynx->rcv_buffer_bus + 4);
  592 	p = lynx->rcv_buffer_bus + 2048;
  593 	end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE;
  594 	for (i = 1; p < end; i++, p += 2048) {
  595 		lynx->rcv_pcl->buffer[i].control =
  596 			cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048);
  597 		lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p);
  598 	}
  599 	lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF);
  600 
  601 	reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
  602 	/* Fix buggy cards with autoboot pin not tied low: */
  603 	reg_write(lynx, DMA0_CHAN_CTRL, 0);
  604 	reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24);
  605 
  606 #if 0
  607 	/* now, looking for PHY register set */
  608 	if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
  609 		lynx->phyic.reg_1394a = 1;
  610 		PRINT(KERN_INFO, lynx->id,
  611 		      "found 1394a conform PHY (using extended register set)");
  612 		lynx->phyic.vendor = get_phy_vendorid(lynx);
  613 		lynx->phyic.product = get_phy_productid(lynx);
  614 	} else {
  615 		lynx->phyic.reg_1394a = 0;
  616 		PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
  617 	}
  618 #endif
  619 
  620 	/* Setup the general receive FIFO max size. */
  621 	reg_write(lynx, FIFO_SIZES, 255);
  622 
  623 	reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
  624 
  625 	reg_write(lynx, LINK_INT_ENABLE,
  626 		  LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD |
  627 		  LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK |
  628 		  LINK_INT_AT_STUCK | LINK_INT_SNTRJ |
  629 		  LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW |
  630 		  LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW);
  631 
  632 	/* Disable the L flag in self ID packets. */
  633 	set_phy_reg(lynx, 4, 0);
  634 
  635 	/* Put this baby into snoop mode */
  636 	reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE);
  637 
  638 	run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
  639 
  640 	if (request_irq(dev->irq, irq_handler, IRQF_SHARED,
  641 			driver_name, lynx)) {
  642 		dev_err(&dev->dev,
  643 			"Failed to allocate shared interrupt %d\n", dev->irq);
  644 		ret = -EIO;
  645 		goto fail_deallocate;
  646 	}
  647 
  648 	lynx->misc.parent = &dev->dev;
  649 	lynx->misc.minor = MISC_DYNAMIC_MINOR;
  650 	lynx->misc.name = "nosy";
  651 	lynx->misc.fops = &nosy_ops;
  652 
  653 	mutex_lock(&card_mutex);
  654 	ret = misc_register(&lynx->misc);
  655 	if (ret) {
  656 		dev_err(&dev->dev, "Failed to register misc char device\n");
  657 		mutex_unlock(&card_mutex);
  658 		goto fail_free_irq;
  659 	}
  660 	list_add_tail(&lynx->link, &card_list);
  661 	mutex_unlock(&card_mutex);
  662 
  663 	dev_info(&dev->dev,
  664 		 "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq);
  665 
  666 	return 0;
  667 
  668 fail_free_irq:
  669 	reg_write(lynx, PCI_INT_ENABLE, 0);
  670 	free_irq(lynx->pci_device->irq, lynx);
  671 
  672 fail_deallocate:
  673 	if (lynx->rcv_start_pcl)
  674 		pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
  675 				lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
  676 	if (lynx->rcv_pcl)
  677 		pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
  678 				lynx->rcv_pcl, lynx->rcv_pcl_bus);
  679 	if (lynx->rcv_buffer)
  680 		pci_free_consistent(lynx->pci_device, PAGE_SIZE,
  681 				lynx->rcv_buffer, lynx->rcv_buffer_bus);
  682 	iounmap(lynx->registers);
  683 	kfree(lynx);
  684 
  685 fail_disable:
  686 	pci_disable_device(dev);
  687 
  688 	return ret;
  689 }
  690 
  691 static struct pci_device_id pci_table[] = {
  692 	{
  693 		.vendor =    PCI_VENDOR_ID_TI,
  694 		.device =    PCI_DEVICE_ID_TI_PCILYNX,
  695 		.subvendor = PCI_ANY_ID,
  696 		.subdevice = PCI_ANY_ID,
  697 	},
  698 	{ }	/* Terminating entry */
  699 };
  700 
  701 MODULE_DEVICE_TABLE(pci, pci_table);
  702 
  703 static struct pci_driver lynx_pci_driver = {
  704 	.name =		driver_name,
  705 	.id_table =	pci_table,
  706 	.probe =	add_card,
  707 	.remove =	remove_card,
  708 };
  709 
  710 module_pci_driver(lynx_pci_driver);
  711 
  712 MODULE_AUTHOR("Kristian Hoegsberg");
  713 MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
  714 MODULE_LICENSE("GPL");
  715 
  716 
  717 
  718 
  719 
  720 /* LDV_COMMENT_BEGIN_MAIN */
  721 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
  722 
  723 /*###########################################################################*/
  724 
  725 /*############## Driver Environment Generator 0.2 output ####################*/
  726 
  727 /*###########################################################################*/
  728 
  729 
  730 
  731 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
  732 void ldv_check_final_state(void);
  733 
  734 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
  735 void ldv_check_return_value(int res);
  736 
  737 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
  738 void ldv_check_return_value_probe(int res);
  739 
  740 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
  741 void ldv_initialize(void);
  742 
  743 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
  744 void ldv_handler_precall(void);
  745 
  746 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
  747 int nondet_int(void);
  748 
  749 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
  750 int LDV_IN_INTERRUPT;
  751 
  752 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
  753 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
  754 
  755 
  756 
  757 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
  758 	/*============================= VARIABLE DECLARATION PART   =============================*/
  759 	/** STRUCT: struct type: file_operations, struct name: nosy_ops **/
  760 	/* content: static ssize_t nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset)*/
  761 	/* LDV_COMMENT_BEGIN_PREP */
  762 	#define TCODE_PHY_PACKET		0x10
  763 	#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  764 	/* LDV_COMMENT_END_PREP */
  765 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "nosy_read" */
  766 	struct file * var_group1;
  767 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "nosy_read" */
  768 	char __user * var_nosy_read_15_p1;
  769 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "nosy_read" */
  770 	size_t  var_nosy_read_15_p2;
  771 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "nosy_read" */
  772 	loff_t * var_nosy_read_15_p3;
  773 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "nosy_read" */
  774 	static ssize_t res_nosy_read_15;
  775 	/* LDV_COMMENT_BEGIN_PREP */
  776 	#define PHY_PACKET_SIZE 12 
  777 	#define RCV_BUFFER_SIZE (16 * 1024)
  778 	#if 0
  779 	#endif
  780 	/* LDV_COMMENT_END_PREP */
  781 	/* content: static long nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)*/
  782 	/* LDV_COMMENT_BEGIN_PREP */
  783 	#define TCODE_PHY_PACKET		0x10
  784 	#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  785 	/* LDV_COMMENT_END_PREP */
  786 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "nosy_ioctl" */
  787 	unsigned int  var_nosy_ioctl_16_p1;
  788 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "nosy_ioctl" */
  789 	unsigned long  var_nosy_ioctl_16_p2;
  790 	/* LDV_COMMENT_BEGIN_PREP */
  791 	#define PHY_PACKET_SIZE 12 
  792 	#define RCV_BUFFER_SIZE (16 * 1024)
  793 	#if 0
  794 	#endif
  795 	/* LDV_COMMENT_END_PREP */
  796 	/* content: static unsigned int nosy_poll(struct file *file, poll_table *pt)*/
  797 	/* LDV_COMMENT_BEGIN_PREP */
  798 	#define TCODE_PHY_PACKET		0x10
  799 	#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  800 	/* LDV_COMMENT_END_PREP */
  801 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "nosy_poll" */
  802 	poll_table * var_nosy_poll_14_p1;
  803 	/* LDV_COMMENT_BEGIN_PREP */
  804 	#define PHY_PACKET_SIZE 12 
  805 	#define RCV_BUFFER_SIZE (16 * 1024)
  806 	#if 0
  807 	#endif
  808 	/* LDV_COMMENT_END_PREP */
  809 	/* content: static int nosy_open(struct inode *inode, struct file *file)*/
  810 	/* LDV_COMMENT_BEGIN_PREP */
  811 	#define TCODE_PHY_PACKET		0x10
  812 	#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  813 	/* LDV_COMMENT_END_PREP */
  814 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "nosy_open" */
  815 	struct inode * var_group2;
  816 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "nosy_open" */
  817 	static int res_nosy_open_12;
  818 	/* LDV_COMMENT_BEGIN_PREP */
  819 	#define PHY_PACKET_SIZE 12 
  820 	#define RCV_BUFFER_SIZE (16 * 1024)
  821 	#if 0
  822 	#endif
  823 	/* LDV_COMMENT_END_PREP */
  824 	/* content: static int nosy_release(struct inode *inode, struct file *file)*/
  825 	/* LDV_COMMENT_BEGIN_PREP */
  826 	#define TCODE_PHY_PACKET		0x10
  827 	#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  828 	/* LDV_COMMENT_END_PREP */
  829 	/* LDV_COMMENT_BEGIN_PREP */
  830 	#define PHY_PACKET_SIZE 12 
  831 	#define RCV_BUFFER_SIZE (16 * 1024)
  832 	#if 0
  833 	#endif
  834 	/* LDV_COMMENT_END_PREP */
  835 
  836 	/** STRUCT: struct type: pci_driver, struct name: lynx_pci_driver **/
  837 	/* content: static int add_card(struct pci_dev *dev, const struct pci_device_id *unused)*/
  838 	/* LDV_COMMENT_BEGIN_PREP */
  839 	#define TCODE_PHY_PACKET		0x10
  840 	#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  841 	#define PHY_PACKET_SIZE 12 
  842 	#define RCV_BUFFER_SIZE (16 * 1024)
  843 	/* LDV_COMMENT_END_PREP */
  844 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "add_card" */
  845 	struct pci_dev * var_group3;
  846 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "add_card" */
  847 	const struct pci_device_id * var_add_card_21_p1;
  848 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "add_card" */
  849 	static int res_add_card_21;
  850 	/* content: static void remove_card(struct pci_dev *dev)*/
  851 	/* LDV_COMMENT_BEGIN_PREP */
  852 	#define TCODE_PHY_PACKET		0x10
  853 	#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  854 	#define PHY_PACKET_SIZE 12 
  855 	/* LDV_COMMENT_END_PREP */
  856 	/* LDV_COMMENT_BEGIN_PREP */
  857 	#define RCV_BUFFER_SIZE (16 * 1024)
  858 	#if 0
  859 	#endif
  860 	/* LDV_COMMENT_END_PREP */
  861 
  862 	/** CALLBACK SECTION request_irq **/
  863 	/* content: static irqreturn_t irq_handler(int irq, void *device)*/
  864 	/* LDV_COMMENT_BEGIN_PREP */
  865 	#define TCODE_PHY_PACKET		0x10
  866 	#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  867 	#define PHY_PACKET_SIZE 12 
  868 	/* LDV_COMMENT_END_PREP */
  869 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "irq_handler" */
  870 	int  var_irq_handler_19_p0;
  871 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "irq_handler" */
  872 	void * var_irq_handler_19_p1;
  873 	/* LDV_COMMENT_BEGIN_PREP */
  874 	#define RCV_BUFFER_SIZE (16 * 1024)
  875 	#if 0
  876 	#endif
  877 	/* LDV_COMMENT_END_PREP */
  878 
  879 
  880 
  881 
  882 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
  883 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
  884 	/*============================= VARIABLE INITIALIZING PART  =============================*/
  885 	LDV_IN_INTERRUPT=1;
  886 
  887 
  888 
  889 
  890 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
  891 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
  892 	/*============================= FUNCTION CALL SECTION       =============================*/
  893 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
  894 	ldv_initialize();
  895 	int ldv_s_nosy_ops_file_operations = 0;
  896 	
  897 
  898 	int ldv_s_lynx_pci_driver_pci_driver = 0;
  899 
  900 	
  901 
  902 
  903 	while(  nondet_int()
  904 		|| !(ldv_s_nosy_ops_file_operations == 0)
  905 		|| !(ldv_s_lynx_pci_driver_pci_driver == 0)
  906 	) {
  907 
  908 		switch(nondet_int()) {
  909 
  910 			case 0: {
  911 
  912 				/** STRUCT: struct type: file_operations, struct name: nosy_ops **/
  913 				if(ldv_s_nosy_ops_file_operations==0) {
  914 
  915 				/* content: static int nosy_open(struct inode *inode, struct file *file)*/
  916 				/* LDV_COMMENT_BEGIN_PREP */
  917 				#define TCODE_PHY_PACKET		0x10
  918 				#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  919 				/* LDV_COMMENT_END_PREP */
  920 				/* LDV_COMMENT_FUNCTION_CALL Function from field "open" from driver structure with callbacks "nosy_ops". Standart function test for correct return result. */
  921 				ldv_handler_precall();
  922 				res_nosy_open_12 = nosy_open( var_group2, var_group1);
  923 				 ldv_check_return_value(res_nosy_open_12);
  924 				 if(res_nosy_open_12) 
  925 					goto ldv_module_exit;
  926 				/* LDV_COMMENT_BEGIN_PREP */
  927 				#define PHY_PACKET_SIZE 12 
  928 				#define RCV_BUFFER_SIZE (16 * 1024)
  929 				#if 0
  930 				#endif
  931 				/* LDV_COMMENT_END_PREP */
  932 				ldv_s_nosy_ops_file_operations++;
  933 
  934 				}
  935 
  936 			}
  937 
  938 			break;
  939 			case 1: {
  940 
  941 				/** STRUCT: struct type: file_operations, struct name: nosy_ops **/
  942 				if(ldv_s_nosy_ops_file_operations==1) {
  943 
  944 				/* content: static ssize_t nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset)*/
  945 				/* LDV_COMMENT_BEGIN_PREP */
  946 				#define TCODE_PHY_PACKET		0x10
  947 				#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  948 				/* LDV_COMMENT_END_PREP */
  949 				/* LDV_COMMENT_FUNCTION_CALL Function from field "read" from driver structure with callbacks "nosy_ops". Standart function test for correct return result. */
  950 				ldv_handler_precall();
  951 				res_nosy_read_15 = nosy_read( var_group1, var_nosy_read_15_p1, var_nosy_read_15_p2, var_nosy_read_15_p3);
  952 				 ldv_check_return_value(res_nosy_read_15);
  953 				 if(res_nosy_read_15 < 0) 
  954 					goto ldv_module_exit;
  955 				/* LDV_COMMENT_BEGIN_PREP */
  956 				#define PHY_PACKET_SIZE 12 
  957 				#define RCV_BUFFER_SIZE (16 * 1024)
  958 				#if 0
  959 				#endif
  960 				/* LDV_COMMENT_END_PREP */
  961 				ldv_s_nosy_ops_file_operations++;
  962 
  963 				}
  964 
  965 			}
  966 
  967 			break;
  968 			case 2: {
  969 
  970 				/** STRUCT: struct type: file_operations, struct name: nosy_ops **/
  971 				if(ldv_s_nosy_ops_file_operations==2) {
  972 
  973 				/* content: static int nosy_release(struct inode *inode, struct file *file)*/
  974 				/* LDV_COMMENT_BEGIN_PREP */
  975 				#define TCODE_PHY_PACKET		0x10
  976 				#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  977 				/* LDV_COMMENT_END_PREP */
  978 				/* LDV_COMMENT_FUNCTION_CALL Function from field "release" from driver structure with callbacks "nosy_ops" */
  979 				ldv_handler_precall();
  980 				nosy_release( var_group2, var_group1);
  981 				/* LDV_COMMENT_BEGIN_PREP */
  982 				#define PHY_PACKET_SIZE 12 
  983 				#define RCV_BUFFER_SIZE (16 * 1024)
  984 				#if 0
  985 				#endif
  986 				/* LDV_COMMENT_END_PREP */
  987 				ldv_s_nosy_ops_file_operations=0;
  988 
  989 				}
  990 
  991 			}
  992 
  993 			break;
  994 			case 3: {
  995 
  996 				/** STRUCT: struct type: file_operations, struct name: nosy_ops **/
  997 				
  998 
  999 				/* content: static long nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)*/
 1000 				/* LDV_COMMENT_BEGIN_PREP */
 1001 				#define TCODE_PHY_PACKET		0x10
 1002 				#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
 1003 				/* LDV_COMMENT_END_PREP */
 1004 				/* LDV_COMMENT_FUNCTION_CALL Function from field "unlocked_ioctl" from driver structure with callbacks "nosy_ops" */
 1005 				ldv_handler_precall();
 1006 				nosy_ioctl( var_group1, var_nosy_ioctl_16_p1, var_nosy_ioctl_16_p2);
 1007 				/* LDV_COMMENT_BEGIN_PREP */
 1008 				#define PHY_PACKET_SIZE 12 
 1009 				#define RCV_BUFFER_SIZE (16 * 1024)
 1010 				#if 0
 1011 				#endif
 1012 				/* LDV_COMMENT_END_PREP */
 1013 				
 1014 
 1015 				
 1016 
 1017 			}
 1018 
 1019 			break;
 1020 			case 4: {
 1021 
 1022 				/** STRUCT: struct type: file_operations, struct name: nosy_ops **/
 1023 				
 1024 
 1025 				/* content: static unsigned int nosy_poll(struct file *file, poll_table *pt)*/
 1026 				/* LDV_COMMENT_BEGIN_PREP */
 1027 				#define TCODE_PHY_PACKET		0x10
 1028 				#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
 1029 				/* LDV_COMMENT_END_PREP */
 1030 				/* LDV_COMMENT_FUNCTION_CALL Function from field "poll" from driver structure with callbacks "nosy_ops" */
 1031 				ldv_handler_precall();
 1032 				nosy_poll( var_group1, var_nosy_poll_14_p1);
 1033 				/* LDV_COMMENT_BEGIN_PREP */
 1034 				#define PHY_PACKET_SIZE 12 
 1035 				#define RCV_BUFFER_SIZE (16 * 1024)
 1036 				#if 0
 1037 				#endif
 1038 				/* LDV_COMMENT_END_PREP */
 1039 				
 1040 
 1041 				
 1042 
 1043 			}
 1044 
 1045 			break;
 1046 			case 5: {
 1047 
 1048 				/** STRUCT: struct type: pci_driver, struct name: lynx_pci_driver **/
 1049 				if(ldv_s_lynx_pci_driver_pci_driver==0) {
 1050 
 1051 				/* content: static int add_card(struct pci_dev *dev, const struct pci_device_id *unused)*/
 1052 				/* LDV_COMMENT_BEGIN_PREP */
 1053 				#define TCODE_PHY_PACKET		0x10
 1054 				#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
 1055 				#define PHY_PACKET_SIZE 12 
 1056 				#define RCV_BUFFER_SIZE (16 * 1024)
 1057 				/* LDV_COMMENT_END_PREP */
 1058 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "lynx_pci_driver". Standart function test for correct return result. */
 1059 				res_add_card_21 = add_card( var_group3, var_add_card_21_p1);
 1060 				 ldv_check_return_value(res_add_card_21);
 1061 				 ldv_check_return_value_probe(res_add_card_21);
 1062 				 if(res_add_card_21) 
 1063 					goto ldv_module_exit;
 1064 				ldv_s_lynx_pci_driver_pci_driver++;
 1065 
 1066 				}
 1067 
 1068 			}
 1069 
 1070 			break;
 1071 			case 6: {
 1072 
 1073 				/** STRUCT: struct type: pci_driver, struct name: lynx_pci_driver **/
 1074 				if(ldv_s_lynx_pci_driver_pci_driver==1) {
 1075 
 1076 				/* content: static void remove_card(struct pci_dev *dev)*/
 1077 				/* LDV_COMMENT_BEGIN_PREP */
 1078 				#define TCODE_PHY_PACKET		0x10
 1079 				#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
 1080 				#define PHY_PACKET_SIZE 12 
 1081 				/* LDV_COMMENT_END_PREP */
 1082 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "lynx_pci_driver" */
 1083 				ldv_handler_precall();
 1084 				remove_card( var_group3);
 1085 				/* LDV_COMMENT_BEGIN_PREP */
 1086 				#define RCV_BUFFER_SIZE (16 * 1024)
 1087 				#if 0
 1088 				#endif
 1089 				/* LDV_COMMENT_END_PREP */
 1090 				ldv_s_lynx_pci_driver_pci_driver=0;
 1091 
 1092 				}
 1093 
 1094 			}
 1095 
 1096 			break;
 1097 			case 7: {
 1098 
 1099 				/** CALLBACK SECTION request_irq **/
 1100 				LDV_IN_INTERRUPT=2;
 1101 
 1102 				/* content: static irqreturn_t irq_handler(int irq, void *device)*/
 1103 				/* LDV_COMMENT_BEGIN_PREP */
 1104 				#define TCODE_PHY_PACKET		0x10
 1105 				#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
 1106 				#define PHY_PACKET_SIZE 12 
 1107 				/* LDV_COMMENT_END_PREP */
 1108 				/* LDV_COMMENT_FUNCTION_CALL */
 1109 				ldv_handler_precall();
 1110 				irq_handler( var_irq_handler_19_p0, var_irq_handler_19_p1);
 1111 				/* LDV_COMMENT_BEGIN_PREP */
 1112 				#define RCV_BUFFER_SIZE (16 * 1024)
 1113 				#if 0
 1114 				#endif
 1115 				/* LDV_COMMENT_END_PREP */
 1116 				LDV_IN_INTERRUPT=1;
 1117 
 1118 				
 1119 
 1120 			}
 1121 
 1122 			break;
 1123 			default: break;
 1124 
 1125 		}
 1126 
 1127 	}
 1128 
 1129 	ldv_module_exit: 
 1130 
 1131 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 1132 	ldv_final: ldv_check_final_state();
 1133 
 1134 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 1135 	return;
 1136 
 1137 }
 1138 #endif
 1139 
 1140 /* LDV_COMMENT_END_MAIN */                 1 
    2 #include <asm/io.h>
    3 #include <verifier/rcv.h>
    4 #include <verifier/set.h>
    5 
    6 
    7 Set LDV_IO_MEMS = 0;
    8 
    9 
   10 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_io_mem_remap') Create some io-memory map for specified address */
   11 void *ldv_io_mem_remap(void *addr) {
   12     ldv_assert(ldv_set_not_contains(LDV_IO_MEMS, addr));
   13 
   14     void *ptr = ldv_undef_ptr();
   15     if (ptr != NULL) {
   16         ldv_set_add(LDV_IO_MEMS, addr);
   17         return ptr;
   18     }
   19     return NULL;
   20 }
   21 
   22 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_io_mem_unmap') Delete some io-memory map for specified address */
   23 void ldv_io_mem_unmap(const volatile void *addr) {
   24     ldv_assert(ldv_set_contains(LDV_IO_MEMS, addr));
   25     ldv_set_remove(LDV_IO_MEMS, addr);
   26 }
   27 
   28 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all io-memory map are unmapped properly */
   29 void ldv_check_final_state(void) {
   30     ldv_assert(ldv_set_is_empty(LDV_IO_MEMS));
   31 }
   32 #line 1 "/home/ubuntu/launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--152_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/12887/dscv_tempdir/dscv/ri/152_1a/drivers/firewire/nosy.c"
   33 
   34 /*
   35  * nosy - Snoop mode driver for TI PCILynx 1394 controllers
   36  * Copyright (C) 2002-2007 Kristian Høgsberg
   37  *
   38  * This program is free software; you can redistribute it and/or modify
   39  * it under the terms of the GNU General Public License as published by
   40  * the Free Software Foundation; either version 2 of the License, or
   41  * (at your option) any later version.
   42  *
   43  * This program is distributed in the hope that it will be useful,
   44  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   45  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   46  * GNU General Public License for more details.
   47  *
   48  * You should have received a copy of the GNU General Public License
   49  * along with this program; if not, write to the Free Software Foundation,
   50  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
   51  */
   52 
   53 #include <linux/device.h>
   54 #include <linux/errno.h>
   55 #include <linux/fs.h>
   56 #include <linux/init.h>
   57 #include <linux/interrupt.h>
   58 #include <linux/io.h>
   59 #include <linux/kernel.h>
   60 #include <linux/kref.h>
   61 #include <linux/miscdevice.h>
   62 #include <linux/module.h>
   63 #include <linux/mutex.h>
   64 #include <linux/pci.h>
   65 #include <linux/poll.h>
   66 #include <linux/sched.h> /* required for linux/wait.h */
   67 #include <linux/slab.h>
   68 #include <linux/spinlock.h>
   69 #include <linux/time64.h>
   70 #include <linux/timex.h>
   71 #include <linux/uaccess.h>
   72 #include <linux/wait.h>
   73 #include <linux/dma-mapping.h>
   74 #include <linux/atomic.h>
   75 #include <asm/byteorder.h>
   76 
   77 #include "nosy.h"
   78 #include "nosy-user.h"
   79 
   80 #define TCODE_PHY_PACKET		0x10
   81 #define PCI_DEVICE_ID_TI_PCILYNX	0x8000
   82 
   83 static char driver_name[] = KBUILD_MODNAME;
   84 
   85 /* this is the physical layout of a PCL, its size is 128 bytes */
   86 struct pcl {
   87 	__le32 next;
   88 	__le32 async_error_next;
   89 	u32 user_data;
   90 	__le32 pcl_status;
   91 	__le32 remaining_transfer_count;
   92 	__le32 next_data_buffer;
   93 	struct {
   94 		__le32 control;
   95 		__le32 pointer;
   96 	} buffer[13];
   97 };
   98 
   99 struct packet {
  100 	unsigned int length;
  101 	char data[0];
  102 };
  103 
  104 struct packet_buffer {
  105 	char *data;
  106 	size_t capacity;
  107 	long total_packet_count, lost_packet_count;
  108 	atomic_t size;
  109 	struct packet *head, *tail;
  110 	wait_queue_head_t wait;
  111 };
  112 
  113 struct pcilynx {
  114 	struct pci_dev *pci_device;
  115 	__iomem char *registers;
  116 
  117 	struct pcl *rcv_start_pcl, *rcv_pcl;
  118 	__le32 *rcv_buffer;
  119 
  120 	dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus;
  121 
  122 	spinlock_t client_list_lock;
  123 	struct list_head client_list;
  124 
  125 	struct miscdevice misc;
  126 	struct list_head link;
  127 	struct kref kref;
  128 };
  129 
  130 static inline struct pcilynx *
  131 lynx_get(struct pcilynx *lynx)
  132 {
  133 	kref_get(&lynx->kref);
  134 
  135 	return lynx;
  136 }
  137 
  138 static void
  139 lynx_release(struct kref *kref)
  140 {
  141 	kfree(container_of(kref, struct pcilynx, kref));
  142 }
  143 
  144 static inline void
  145 lynx_put(struct pcilynx *lynx)
  146 {
  147 	kref_put(&lynx->kref, lynx_release);
  148 }
  149 
  150 struct client {
  151 	struct pcilynx *lynx;
  152 	u32 tcode_mask;
  153 	struct packet_buffer buffer;
  154 	struct list_head link;
  155 };
  156 
  157 static DEFINE_MUTEX(card_mutex);
  158 static LIST_HEAD(card_list);
  159 
  160 static int
  161 packet_buffer_init(struct packet_buffer *buffer, size_t capacity)
  162 {
  163 	buffer->data = kmalloc(capacity, GFP_KERNEL);
  164 	if (buffer->data == NULL)
  165 		return -ENOMEM;
  166 	buffer->head = (struct packet *) buffer->data;
  167 	buffer->tail = (struct packet *) buffer->data;
  168 	buffer->capacity = capacity;
  169 	buffer->lost_packet_count = 0;
  170 	atomic_set(&buffer->size, 0);
  171 	init_waitqueue_head(&buffer->wait);
  172 
  173 	return 0;
  174 }
  175 
  176 static void
  177 packet_buffer_destroy(struct packet_buffer *buffer)
  178 {
  179 	kfree(buffer->data);
  180 }
  181 
  182 static int
  183 packet_buffer_get(struct client *client, char __user *data, size_t user_length)
  184 {
  185 	struct packet_buffer *buffer = &client->buffer;
  186 	size_t length;
  187 	char *end;
  188 
  189 	if (wait_event_interruptible(buffer->wait,
  190 				     atomic_read(&buffer->size) > 0) ||
  191 				     list_empty(&client->lynx->link))
  192 		return -ERESTARTSYS;
  193 
  194 	if (atomic_read(&buffer->size) == 0)
  195 		return -ENODEV;
  196 
  197 	/* FIXME: Check length <= user_length. */
  198 
  199 	end = buffer->data + buffer->capacity;
  200 	length = buffer->head->length;
  201 
  202 	if (&buffer->head->data[length] < end) {
  203 		if (copy_to_user(data, buffer->head->data, length))
  204 			return -EFAULT;
  205 		buffer->head = (struct packet *) &buffer->head->data[length];
  206 	} else {
  207 		size_t split = end - buffer->head->data;
  208 
  209 		if (copy_to_user(data, buffer->head->data, split))
  210 			return -EFAULT;
  211 		if (copy_to_user(data + split, buffer->data, length - split))
  212 			return -EFAULT;
  213 		buffer->head = (struct packet *) &buffer->data[length - split];
  214 	}
  215 
  216 	/*
  217 	 * Decrease buffer->size as the last thing, since this is what
  218 	 * keeps the interrupt from overwriting the packet we are
  219 	 * retrieving from the buffer.
  220 	 */
  221 	atomic_sub(sizeof(struct packet) + length, &buffer->size);
  222 
  223 	return length;
  224 }
  225 
  226 static void
  227 packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length)
  228 {
  229 	char *end;
  230 
  231 	buffer->total_packet_count++;
  232 
  233 	if (buffer->capacity <
  234 	    atomic_read(&buffer->size) + sizeof(struct packet) + length) {
  235 		buffer->lost_packet_count++;
  236 		return;
  237 	}
  238 
  239 	end = buffer->data + buffer->capacity;
  240 	buffer->tail->length = length;
  241 
  242 	if (&buffer->tail->data[length] < end) {
  243 		memcpy(buffer->tail->data, data, length);
  244 		buffer->tail = (struct packet *) &buffer->tail->data[length];
  245 	} else {
  246 		size_t split = end - buffer->tail->data;
  247 
  248 		memcpy(buffer->tail->data, data, split);
  249 		memcpy(buffer->data, data + split, length - split);
  250 		buffer->tail = (struct packet *) &buffer->data[length - split];
  251 	}
  252 
  253 	/* Finally, adjust buffer size and wake up userspace reader. */
  254 
  255 	atomic_add(sizeof(struct packet) + length, &buffer->size);
  256 	wake_up_interruptible(&buffer->wait);
  257 }
  258 
  259 static inline void
  260 reg_write(struct pcilynx *lynx, int offset, u32 data)
  261 {
  262 	writel(data, lynx->registers + offset);
  263 }
  264 
  265 static inline u32
  266 reg_read(struct pcilynx *lynx, int offset)
  267 {
  268 	return readl(lynx->registers + offset);
  269 }
  270 
  271 static inline void
  272 reg_set_bits(struct pcilynx *lynx, int offset, u32 mask)
  273 {
  274 	reg_write(lynx, offset, (reg_read(lynx, offset) | mask));
  275 }
  276 
  277 /*
  278  * Maybe the pcl programs could be set up to just append data instead
  279  * of using a whole packet.
  280  */
  281 static inline void
  282 run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus,
  283 			   int dmachan)
  284 {
  285 	reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus);
  286 	reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20,
  287 		  DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK);
  288 }
  289 
  290 static int
  291 set_phy_reg(struct pcilynx *lynx, int addr, int val)
  292 {
  293 	if (addr > 15) {
  294 		dev_err(&lynx->pci_device->dev,
  295 			"PHY register address %d out of range\n", addr);
  296 		return -1;
  297 	}
  298 	if (val > 0xff) {
  299 		dev_err(&lynx->pci_device->dev,
  300 			"PHY register value %d out of range\n", val);
  301 		return -1;
  302 	}
  303 	reg_write(lynx, LINK_PHY, LINK_PHY_WRITE |
  304 		  LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val));
  305 
  306 	return 0;
  307 }
  308 
  309 static int
  310 nosy_open(struct inode *inode, struct file *file)
  311 {
  312 	int minor = iminor(inode);
  313 	struct client *client;
  314 	struct pcilynx *tmp, *lynx = NULL;
  315 
  316 	mutex_lock(&card_mutex);
  317 	list_for_each_entry(tmp, &card_list, link)
  318 		if (tmp->misc.minor == minor) {
  319 			lynx = lynx_get(tmp);
  320 			break;
  321 		}
  322 	mutex_unlock(&card_mutex);
  323 	if (lynx == NULL)
  324 		return -ENODEV;
  325 
  326 	client = kmalloc(sizeof *client, GFP_KERNEL);
  327 	if (client == NULL)
  328 		goto fail;
  329 
  330 	client->tcode_mask = ~0;
  331 	client->lynx = lynx;
  332 	INIT_LIST_HEAD(&client->link);
  333 
  334 	if (packet_buffer_init(&client->buffer, 128 * 1024) < 0)
  335 		goto fail;
  336 
  337 	file->private_data = client;
  338 
  339 	return nonseekable_open(inode, file);
  340 fail:
  341 	kfree(client);
  342 	lynx_put(lynx);
  343 
  344 	return -ENOMEM;
  345 }
  346 
  347 static int
  348 nosy_release(struct inode *inode, struct file *file)
  349 {
  350 	struct client *client = file->private_data;
  351 	struct pcilynx *lynx = client->lynx;
  352 
  353 	spin_lock_irq(&lynx->client_list_lock);
  354 	list_del_init(&client->link);
  355 	spin_unlock_irq(&lynx->client_list_lock);
  356 
  357 	packet_buffer_destroy(&client->buffer);
  358 	kfree(client);
  359 	lynx_put(lynx);
  360 
  361 	return 0;
  362 }
  363 
  364 static unsigned int
  365 nosy_poll(struct file *file, poll_table *pt)
  366 {
  367 	struct client *client = file->private_data;
  368 	unsigned int ret = 0;
  369 
  370 	poll_wait(file, &client->buffer.wait, pt);
  371 
  372 	if (atomic_read(&client->buffer.size) > 0)
  373 		ret = POLLIN | POLLRDNORM;
  374 
  375 	if (list_empty(&client->lynx->link))
  376 		ret |= POLLHUP;
  377 
  378 	return ret;
  379 }
  380 
  381 static ssize_t
  382 nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset)
  383 {
  384 	struct client *client = file->private_data;
  385 
  386 	return packet_buffer_get(client, buffer, count);
  387 }
  388 
  389 static long
  390 nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  391 {
  392 	struct client *client = file->private_data;
  393 	spinlock_t *client_list_lock = &client->lynx->client_list_lock;
  394 	struct nosy_stats stats;
  395 
  396 	switch (cmd) {
  397 	case NOSY_IOC_GET_STATS:
  398 		spin_lock_irq(client_list_lock);
  399 		stats.total_packet_count = client->buffer.total_packet_count;
  400 		stats.lost_packet_count  = client->buffer.lost_packet_count;
  401 		spin_unlock_irq(client_list_lock);
  402 
  403 		if (copy_to_user((void __user *) arg, &stats, sizeof stats))
  404 			return -EFAULT;
  405 		else
  406 			return 0;
  407 
  408 	case NOSY_IOC_START:
  409 		spin_lock_irq(client_list_lock);
  410 		list_add_tail(&client->link, &client->lynx->client_list);
  411 		spin_unlock_irq(client_list_lock);
  412 
  413 		return 0;
  414 
  415 	case NOSY_IOC_STOP:
  416 		spin_lock_irq(client_list_lock);
  417 		list_del_init(&client->link);
  418 		spin_unlock_irq(client_list_lock);
  419 
  420 		return 0;
  421 
  422 	case NOSY_IOC_FILTER:
  423 		spin_lock_irq(client_list_lock);
  424 		client->tcode_mask = arg;
  425 		spin_unlock_irq(client_list_lock);
  426 
  427 		return 0;
  428 
  429 	default:
  430 		return -EINVAL;
  431 		/* Flush buffer, configure filter. */
  432 	}
  433 }
  434 
  435 static const struct file_operations nosy_ops = {
  436 	.owner =		THIS_MODULE,
  437 	.read =			nosy_read,
  438 	.unlocked_ioctl =	nosy_ioctl,
  439 	.poll =			nosy_poll,
  440 	.open =			nosy_open,
  441 	.release =		nosy_release,
  442 };
  443 
  444 #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
  445 
  446 static void
  447 packet_irq_handler(struct pcilynx *lynx)
  448 {
  449 	struct client *client;
  450 	u32 tcode_mask, tcode, timestamp;
  451 	size_t length;
  452 	struct timespec64 ts64;
  453 
  454 	/* FIXME: Also report rcv_speed. */
  455 
  456 	length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff;
  457 	tcode  = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf;
  458 
  459 	ktime_get_real_ts64(&ts64);
  460 	timestamp = ts64.tv_nsec / NSEC_PER_USEC;
  461 	lynx->rcv_buffer[0] = (__force __le32)timestamp;
  462 
  463 	if (length == PHY_PACKET_SIZE)
  464 		tcode_mask = 1 << TCODE_PHY_PACKET;
  465 	else
  466 		tcode_mask = 1 << tcode;
  467 
  468 	spin_lock(&lynx->client_list_lock);
  469 
  470 	list_for_each_entry(client, &lynx->client_list, link)
  471 		if (client->tcode_mask & tcode_mask)
  472 			packet_buffer_put(&client->buffer,
  473 					  lynx->rcv_buffer, length + 4);
  474 
  475 	spin_unlock(&lynx->client_list_lock);
  476 }
  477 
  478 static void
  479 bus_reset_irq_handler(struct pcilynx *lynx)
  480 {
  481 	struct client *client;
  482 	struct timespec64 ts64;
  483 	u32    timestamp;
  484 
  485 	ktime_get_real_ts64(&ts64);
  486 	timestamp = ts64.tv_nsec / NSEC_PER_USEC;
  487 
  488 	spin_lock(&lynx->client_list_lock);
  489 
  490 	list_for_each_entry(client, &lynx->client_list, link)
  491 		packet_buffer_put(&client->buffer, ×tamp, 4);
  492 
  493 	spin_unlock(&lynx->client_list_lock);
  494 }
  495 
  496 static irqreturn_t
  497 irq_handler(int irq, void *device)
  498 {
  499 	struct pcilynx *lynx = device;
  500 	u32 pci_int_status;
  501 
  502 	pci_int_status = reg_read(lynx, PCI_INT_STATUS);
  503 
  504 	if (pci_int_status == ~0)
  505 		/* Card was ejected. */
  506 		return IRQ_NONE;
  507 
  508 	if ((pci_int_status & PCI_INT_INT_PEND) == 0)
  509 		/* Not our interrupt, bail out quickly. */
  510 		return IRQ_NONE;
  511 
  512 	if ((pci_int_status & PCI_INT_P1394_INT) != 0) {
  513 		u32 link_int_status;
  514 
  515 		link_int_status = reg_read(lynx, LINK_INT_STATUS);
  516 		reg_write(lynx, LINK_INT_STATUS, link_int_status);
  517 
  518 		if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0)
  519 			bus_reset_irq_handler(lynx);
  520 	}
  521 
  522 	/* Clear the PCI_INT_STATUS register only after clearing the
  523 	 * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will
  524 	 * be set again immediately. */
  525 
  526 	reg_write(lynx, PCI_INT_STATUS, pci_int_status);
  527 
  528 	if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) {
  529 		packet_irq_handler(lynx);
  530 		run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
  531 	}
  532 
  533 	return IRQ_HANDLED;
  534 }
  535 
  536 static void
  537 remove_card(struct pci_dev *dev)
  538 {
  539 	struct pcilynx *lynx = pci_get_drvdata(dev);
  540 	struct client *client;
  541 
  542 	mutex_lock(&card_mutex);
  543 	list_del_init(&lynx->link);
  544 	misc_deregister(&lynx->misc);
  545 	mutex_unlock(&card_mutex);
  546 
  547 	reg_write(lynx, PCI_INT_ENABLE, 0);
  548 	free_irq(lynx->pci_device->irq, lynx);
  549 
  550 	spin_lock_irq(&lynx->client_list_lock);
  551 	list_for_each_entry(client, &lynx->client_list, link)
  552 		wake_up_interruptible(&client->buffer.wait);
  553 	spin_unlock_irq(&lynx->client_list_lock);
  554 
  555 	pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
  556 			    lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
  557 	pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
  558 			    lynx->rcv_pcl, lynx->rcv_pcl_bus);
  559 	pci_free_consistent(lynx->pci_device, PAGE_SIZE,
  560 			    lynx->rcv_buffer, lynx->rcv_buffer_bus);
  561 
  562 	iounmap(lynx->registers);
  563 	pci_disable_device(dev);
  564 	lynx_put(lynx);
  565 }
  566 
  567 #define RCV_BUFFER_SIZE (16 * 1024)
  568 
  569 static int
  570 add_card(struct pci_dev *dev, const struct pci_device_id *unused)
  571 {
  572 	struct pcilynx *lynx;
  573 	u32 p, end;
  574 	int ret, i;
  575 
  576 	if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
  577 		dev_err(&dev->dev,
  578 		    "DMA address limits not supported for PCILynx hardware\n");
  579 		return -ENXIO;
  580 	}
  581 	if (pci_enable_device(dev)) {
  582 		dev_err(&dev->dev, "Failed to enable PCILynx hardware\n");
  583 		return -ENXIO;
  584 	}
  585 	pci_set_master(dev);
  586 
  587 	lynx = kzalloc(sizeof *lynx, GFP_KERNEL);
  588 	if (lynx == NULL) {
  589 		dev_err(&dev->dev, "Failed to allocate control structure\n");
  590 		ret = -ENOMEM;
  591 		goto fail_disable;
  592 	}
  593 	lynx->pci_device = dev;
  594 	pci_set_drvdata(dev, lynx);
  595 
  596 	spin_lock_init(&lynx->client_list_lock);
  597 	INIT_LIST_HEAD(&lynx->client_list);
  598 	kref_init(&lynx->kref);
  599 
  600 	lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
  601 					  PCILYNX_MAX_REGISTER);
  602 
  603 	lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
  604 				sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
  605 	lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device,
  606 				sizeof(struct pcl), &lynx->rcv_pcl_bus);
  607 	lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device,
  608 				RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus);
  609 	if (lynx->rcv_start_pcl == NULL ||
  610 	    lynx->rcv_pcl == NULL ||
  611 	    lynx->rcv_buffer == NULL) {
  612 		dev_err(&dev->dev, "Failed to allocate receive buffer\n");
  613 		ret = -ENOMEM;
  614 		goto fail_deallocate;
  615 	}
  616 	lynx->rcv_start_pcl->next	= cpu_to_le32(lynx->rcv_pcl_bus);
  617 	lynx->rcv_pcl->next		= cpu_to_le32(PCL_NEXT_INVALID);
  618 	lynx->rcv_pcl->async_error_next	= cpu_to_le32(PCL_NEXT_INVALID);
  619 
  620 	lynx->rcv_pcl->buffer[0].control =
  621 			cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044);
  622 	lynx->rcv_pcl->buffer[0].pointer =
  623 			cpu_to_le32(lynx->rcv_buffer_bus + 4);
  624 	p = lynx->rcv_buffer_bus + 2048;
  625 	end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE;
  626 	for (i = 1; p < end; i++, p += 2048) {
  627 		lynx->rcv_pcl->buffer[i].control =
  628 			cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048);
  629 		lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p);
  630 	}
  631 	lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF);
  632 
  633 	reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
  634 	/* Fix buggy cards with autoboot pin not tied low: */
  635 	reg_write(lynx, DMA0_CHAN_CTRL, 0);
  636 	reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24);
  637 
  638 #if 0
  639 	/* now, looking for PHY register set */
  640 	if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
  641 		lynx->phyic.reg_1394a = 1;
  642 		PRINT(KERN_INFO, lynx->id,
  643 		      "found 1394a conform PHY (using extended register set)");
  644 		lynx->phyic.vendor = get_phy_vendorid(lynx);
  645 		lynx->phyic.product = get_phy_productid(lynx);
  646 	} else {
  647 		lynx->phyic.reg_1394a = 0;
  648 		PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
  649 	}
  650 #endif
  651 
  652 	/* Setup the general receive FIFO max size. */
  653 	reg_write(lynx, FIFO_SIZES, 255);
  654 
  655 	reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
  656 
  657 	reg_write(lynx, LINK_INT_ENABLE,
  658 		  LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD |
  659 		  LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK |
  660 		  LINK_INT_AT_STUCK | LINK_INT_SNTRJ |
  661 		  LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW |
  662 		  LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW);
  663 
  664 	/* Disable the L flag in self ID packets. */
  665 	set_phy_reg(lynx, 4, 0);
  666 
  667 	/* Put this baby into snoop mode */
  668 	reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE);
  669 
  670 	run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
  671 
  672 	if (request_irq(dev->irq, irq_handler, IRQF_SHARED,
  673 			driver_name, lynx)) {
  674 		dev_err(&dev->dev,
  675 			"Failed to allocate shared interrupt %d\n", dev->irq);
  676 		ret = -EIO;
  677 		goto fail_deallocate;
  678 	}
  679 
  680 	lynx->misc.parent = &dev->dev;
  681 	lynx->misc.minor = MISC_DYNAMIC_MINOR;
  682 	lynx->misc.name = "nosy";
  683 	lynx->misc.fops = &nosy_ops;
  684 
  685 	mutex_lock(&card_mutex);
  686 	ret = misc_register(&lynx->misc);
  687 	if (ret) {
  688 		dev_err(&dev->dev, "Failed to register misc char device\n");
  689 		mutex_unlock(&card_mutex);
  690 		goto fail_free_irq;
  691 	}
  692 	list_add_tail(&lynx->link, &card_list);
  693 	mutex_unlock(&card_mutex);
  694 
  695 	dev_info(&dev->dev,
  696 		 "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq);
  697 
  698 	return 0;
  699 
  700 fail_free_irq:
  701 	reg_write(lynx, PCI_INT_ENABLE, 0);
  702 	free_irq(lynx->pci_device->irq, lynx);
  703 
  704 fail_deallocate:
  705 	if (lynx->rcv_start_pcl)
  706 		pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
  707 				lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
  708 	if (lynx->rcv_pcl)
  709 		pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
  710 				lynx->rcv_pcl, lynx->rcv_pcl_bus);
  711 	if (lynx->rcv_buffer)
  712 		pci_free_consistent(lynx->pci_device, PAGE_SIZE,
  713 				lynx->rcv_buffer, lynx->rcv_buffer_bus);
  714 	iounmap(lynx->registers);
  715 	kfree(lynx);
  716 
  717 fail_disable:
  718 	pci_disable_device(dev);
  719 
  720 	return ret;
  721 }
  722 
  723 static struct pci_device_id pci_table[] = {
  724 	{
  725 		.vendor =    PCI_VENDOR_ID_TI,
  726 		.device =    PCI_DEVICE_ID_TI_PCILYNX,
  727 		.subvendor = PCI_ANY_ID,
  728 		.subdevice = PCI_ANY_ID,
  729 	},
  730 	{ }	/* Terminating entry */
  731 };
  732 
  733 MODULE_DEVICE_TABLE(pci, pci_table);
  734 
  735 static struct pci_driver lynx_pci_driver = {
  736 	.name =		driver_name,
  737 	.id_table =	pci_table,
  738 	.probe =	add_card,
  739 	.remove =	remove_card,
  740 };
  741 
  742 module_pci_driver(lynx_pci_driver);
  743 
  744 MODULE_AUTHOR("Kristian Hoegsberg");
  745 MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
  746 MODULE_LICENSE("GPL");
  747 
  748 
  749 
  750 
  751 
  752 /* LDV_COMMENT_BEGIN_MAIN */
  753 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
  754 
  755 /*###########################################################################*/
  756 
  757 /*############## Driver Environment Generator 0.2 output ####################*/
  758 
  759 /*###########################################################################*/
  760 
  761 
  762 
  763 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
  764 void ldv_check_final_state(void);
  765 
  766 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
  767 void ldv_check_return_value(int res);
  768 
  769 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
  770 void ldv_check_return_value_probe(int res);
  771 
  772 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
  773 void ldv_initialize(void);
  774 
  775 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
  776 void ldv_handler_precall(void);
  777 
  778 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
  779 int nondet_int(void);
  780 
  781 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
  782 int LDV_IN_INTERRUPT;
  783 
  784 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
  785 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
  786 
  787 
  788 
  789 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
  790 	/*============================= VARIABLE DECLARATION PART   =============================*/
  791 	/** STRUCT: struct type: file_operations, struct name: nosy_ops **/
  792 	/* content: static ssize_t nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset)*/
  793 	/* LDV_COMMENT_BEGIN_PREP */
  794 	#define TCODE_PHY_PACKET		0x10
  795 	#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  796 	/* LDV_COMMENT_END_PREP */
  797 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "nosy_read" */
  798 	struct file * var_group1;
  799 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "nosy_read" */
  800 	char __user * var_nosy_read_15_p1;
  801 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "nosy_read" */
  802 	size_t  var_nosy_read_15_p2;
  803 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "nosy_read" */
  804 	loff_t * var_nosy_read_15_p3;
  805 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "nosy_read" */
  806 	static ssize_t res_nosy_read_15;
  807 	/* LDV_COMMENT_BEGIN_PREP */
  808 	#define PHY_PACKET_SIZE 12 
  809 	#define RCV_BUFFER_SIZE (16 * 1024)
  810 	#if 0
  811 	#endif
  812 	/* LDV_COMMENT_END_PREP */
  813 	/* content: static long nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)*/
  814 	/* LDV_COMMENT_BEGIN_PREP */
  815 	#define TCODE_PHY_PACKET		0x10
  816 	#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  817 	/* LDV_COMMENT_END_PREP */
  818 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "nosy_ioctl" */
  819 	unsigned int  var_nosy_ioctl_16_p1;
  820 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "nosy_ioctl" */
  821 	unsigned long  var_nosy_ioctl_16_p2;
  822 	/* LDV_COMMENT_BEGIN_PREP */
  823 	#define PHY_PACKET_SIZE 12 
  824 	#define RCV_BUFFER_SIZE (16 * 1024)
  825 	#if 0
  826 	#endif
  827 	/* LDV_COMMENT_END_PREP */
  828 	/* content: static unsigned int nosy_poll(struct file *file, poll_table *pt)*/
  829 	/* LDV_COMMENT_BEGIN_PREP */
  830 	#define TCODE_PHY_PACKET		0x10
  831 	#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  832 	/* LDV_COMMENT_END_PREP */
  833 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "nosy_poll" */
  834 	poll_table * var_nosy_poll_14_p1;
  835 	/* LDV_COMMENT_BEGIN_PREP */
  836 	#define PHY_PACKET_SIZE 12 
  837 	#define RCV_BUFFER_SIZE (16 * 1024)
  838 	#if 0
  839 	#endif
  840 	/* LDV_COMMENT_END_PREP */
  841 	/* content: static int nosy_open(struct inode *inode, struct file *file)*/
  842 	/* LDV_COMMENT_BEGIN_PREP */
  843 	#define TCODE_PHY_PACKET		0x10
  844 	#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  845 	/* LDV_COMMENT_END_PREP */
  846 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "nosy_open" */
  847 	struct inode * var_group2;
  848 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "nosy_open" */
  849 	static int res_nosy_open_12;
  850 	/* LDV_COMMENT_BEGIN_PREP */
  851 	#define PHY_PACKET_SIZE 12 
  852 	#define RCV_BUFFER_SIZE (16 * 1024)
  853 	#if 0
  854 	#endif
  855 	/* LDV_COMMENT_END_PREP */
  856 	/* content: static int nosy_release(struct inode *inode, struct file *file)*/
  857 	/* LDV_COMMENT_BEGIN_PREP */
  858 	#define TCODE_PHY_PACKET		0x10
  859 	#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  860 	/* LDV_COMMENT_END_PREP */
  861 	/* LDV_COMMENT_BEGIN_PREP */
  862 	#define PHY_PACKET_SIZE 12 
  863 	#define RCV_BUFFER_SIZE (16 * 1024)
  864 	#if 0
  865 	#endif
  866 	/* LDV_COMMENT_END_PREP */
  867 
  868 	/** STRUCT: struct type: pci_driver, struct name: lynx_pci_driver **/
  869 	/* content: static int add_card(struct pci_dev *dev, const struct pci_device_id *unused)*/
  870 	/* LDV_COMMENT_BEGIN_PREP */
  871 	#define TCODE_PHY_PACKET		0x10
  872 	#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  873 	#define PHY_PACKET_SIZE 12 
  874 	#define RCV_BUFFER_SIZE (16 * 1024)
  875 	/* LDV_COMMENT_END_PREP */
  876 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "add_card" */
  877 	struct pci_dev * var_group3;
  878 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "add_card" */
  879 	const struct pci_device_id * var_add_card_21_p1;
  880 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "add_card" */
  881 	static int res_add_card_21;
  882 	/* content: static void remove_card(struct pci_dev *dev)*/
  883 	/* LDV_COMMENT_BEGIN_PREP */
  884 	#define TCODE_PHY_PACKET		0x10
  885 	#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  886 	#define PHY_PACKET_SIZE 12 
  887 	/* LDV_COMMENT_END_PREP */
  888 	/* LDV_COMMENT_BEGIN_PREP */
  889 	#define RCV_BUFFER_SIZE (16 * 1024)
  890 	#if 0
  891 	#endif
  892 	/* LDV_COMMENT_END_PREP */
  893 
  894 	/** CALLBACK SECTION request_irq **/
  895 	/* content: static irqreturn_t irq_handler(int irq, void *device)*/
  896 	/* LDV_COMMENT_BEGIN_PREP */
  897 	#define TCODE_PHY_PACKET		0x10
  898 	#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  899 	#define PHY_PACKET_SIZE 12 
  900 	/* LDV_COMMENT_END_PREP */
  901 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "irq_handler" */
  902 	int  var_irq_handler_19_p0;
  903 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "irq_handler" */
  904 	void * var_irq_handler_19_p1;
  905 	/* LDV_COMMENT_BEGIN_PREP */
  906 	#define RCV_BUFFER_SIZE (16 * 1024)
  907 	#if 0
  908 	#endif
  909 	/* LDV_COMMENT_END_PREP */
  910 
  911 
  912 
  913 
  914 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
  915 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
  916 	/*============================= VARIABLE INITIALIZING PART  =============================*/
  917 	LDV_IN_INTERRUPT=1;
  918 
  919 
  920 
  921 
  922 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
  923 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
  924 	/*============================= FUNCTION CALL SECTION       =============================*/
  925 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
  926 	ldv_initialize();
  927 	int ldv_s_nosy_ops_file_operations = 0;
  928 	
  929 
  930 	int ldv_s_lynx_pci_driver_pci_driver = 0;
  931 
  932 	
  933 
  934 
  935 	while(  nondet_int()
  936 		|| !(ldv_s_nosy_ops_file_operations == 0)
  937 		|| !(ldv_s_lynx_pci_driver_pci_driver == 0)
  938 	) {
  939 
  940 		switch(nondet_int()) {
  941 
  942 			case 0: {
  943 
  944 				/** STRUCT: struct type: file_operations, struct name: nosy_ops **/
  945 				if(ldv_s_nosy_ops_file_operations==0) {
  946 
  947 				/* content: static int nosy_open(struct inode *inode, struct file *file)*/
  948 				/* LDV_COMMENT_BEGIN_PREP */
  949 				#define TCODE_PHY_PACKET		0x10
  950 				#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  951 				/* LDV_COMMENT_END_PREP */
  952 				/* LDV_COMMENT_FUNCTION_CALL Function from field "open" from driver structure with callbacks "nosy_ops". Standart function test for correct return result. */
  953 				ldv_handler_precall();
  954 				res_nosy_open_12 = nosy_open( var_group2, var_group1);
  955 				 ldv_check_return_value(res_nosy_open_12);
  956 				 if(res_nosy_open_12) 
  957 					goto ldv_module_exit;
  958 				/* LDV_COMMENT_BEGIN_PREP */
  959 				#define PHY_PACKET_SIZE 12 
  960 				#define RCV_BUFFER_SIZE (16 * 1024)
  961 				#if 0
  962 				#endif
  963 				/* LDV_COMMENT_END_PREP */
  964 				ldv_s_nosy_ops_file_operations++;
  965 
  966 				}
  967 
  968 			}
  969 
  970 			break;
  971 			case 1: {
  972 
  973 				/** STRUCT: struct type: file_operations, struct name: nosy_ops **/
  974 				if(ldv_s_nosy_ops_file_operations==1) {
  975 
  976 				/* content: static ssize_t nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset)*/
  977 				/* LDV_COMMENT_BEGIN_PREP */
  978 				#define TCODE_PHY_PACKET		0x10
  979 				#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
  980 				/* LDV_COMMENT_END_PREP */
  981 				/* LDV_COMMENT_FUNCTION_CALL Function from field "read" from driver structure with callbacks "nosy_ops". Standart function test for correct return result. */
  982 				ldv_handler_precall();
  983 				res_nosy_read_15 = nosy_read( var_group1, var_nosy_read_15_p1, var_nosy_read_15_p2, var_nosy_read_15_p3);
  984 				 ldv_check_return_value(res_nosy_read_15);
  985 				 if(res_nosy_read_15 < 0) 
  986 					goto ldv_module_exit;
  987 				/* LDV_COMMENT_BEGIN_PREP */
  988 				#define PHY_PACKET_SIZE 12 
  989 				#define RCV_BUFFER_SIZE (16 * 1024)
  990 				#if 0
  991 				#endif
  992 				/* LDV_COMMENT_END_PREP */
  993 				ldv_s_nosy_ops_file_operations++;
  994 
  995 				}
  996 
  997 			}
  998 
  999 			break;
 1000 			case 2: {
 1001 
 1002 				/** STRUCT: struct type: file_operations, struct name: nosy_ops **/
 1003 				if(ldv_s_nosy_ops_file_operations==2) {
 1004 
 1005 				/* content: static int nosy_release(struct inode *inode, struct file *file)*/
 1006 				/* LDV_COMMENT_BEGIN_PREP */
 1007 				#define TCODE_PHY_PACKET		0x10
 1008 				#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
 1009 				/* LDV_COMMENT_END_PREP */
 1010 				/* LDV_COMMENT_FUNCTION_CALL Function from field "release" from driver structure with callbacks "nosy_ops" */
 1011 				ldv_handler_precall();
 1012 				nosy_release( var_group2, var_group1);
 1013 				/* LDV_COMMENT_BEGIN_PREP */
 1014 				#define PHY_PACKET_SIZE 12 
 1015 				#define RCV_BUFFER_SIZE (16 * 1024)
 1016 				#if 0
 1017 				#endif
 1018 				/* LDV_COMMENT_END_PREP */
 1019 				ldv_s_nosy_ops_file_operations=0;
 1020 
 1021 				}
 1022 
 1023 			}
 1024 
 1025 			break;
 1026 			case 3: {
 1027 
 1028 				/** STRUCT: struct type: file_operations, struct name: nosy_ops **/
 1029 				
 1030 
 1031 				/* content: static long nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)*/
 1032 				/* LDV_COMMENT_BEGIN_PREP */
 1033 				#define TCODE_PHY_PACKET		0x10
 1034 				#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
 1035 				/* LDV_COMMENT_END_PREP */
 1036 				/* LDV_COMMENT_FUNCTION_CALL Function from field "unlocked_ioctl" from driver structure with callbacks "nosy_ops" */
 1037 				ldv_handler_precall();
 1038 				nosy_ioctl( var_group1, var_nosy_ioctl_16_p1, var_nosy_ioctl_16_p2);
 1039 				/* LDV_COMMENT_BEGIN_PREP */
 1040 				#define PHY_PACKET_SIZE 12 
 1041 				#define RCV_BUFFER_SIZE (16 * 1024)
 1042 				#if 0
 1043 				#endif
 1044 				/* LDV_COMMENT_END_PREP */
 1045 				
 1046 
 1047 				
 1048 
 1049 			}
 1050 
 1051 			break;
 1052 			case 4: {
 1053 
 1054 				/** STRUCT: struct type: file_operations, struct name: nosy_ops **/
 1055 				
 1056 
 1057 				/* content: static unsigned int nosy_poll(struct file *file, poll_table *pt)*/
 1058 				/* LDV_COMMENT_BEGIN_PREP */
 1059 				#define TCODE_PHY_PACKET		0x10
 1060 				#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
 1061 				/* LDV_COMMENT_END_PREP */
 1062 				/* LDV_COMMENT_FUNCTION_CALL Function from field "poll" from driver structure with callbacks "nosy_ops" */
 1063 				ldv_handler_precall();
 1064 				nosy_poll( var_group1, var_nosy_poll_14_p1);
 1065 				/* LDV_COMMENT_BEGIN_PREP */
 1066 				#define PHY_PACKET_SIZE 12 
 1067 				#define RCV_BUFFER_SIZE (16 * 1024)
 1068 				#if 0
 1069 				#endif
 1070 				/* LDV_COMMENT_END_PREP */
 1071 				
 1072 
 1073 				
 1074 
 1075 			}
 1076 
 1077 			break;
 1078 			case 5: {
 1079 
 1080 				/** STRUCT: struct type: pci_driver, struct name: lynx_pci_driver **/
 1081 				if(ldv_s_lynx_pci_driver_pci_driver==0) {
 1082 
 1083 				/* content: static int add_card(struct pci_dev *dev, const struct pci_device_id *unused)*/
 1084 				/* LDV_COMMENT_BEGIN_PREP */
 1085 				#define TCODE_PHY_PACKET		0x10
 1086 				#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
 1087 				#define PHY_PACKET_SIZE 12 
 1088 				#define RCV_BUFFER_SIZE (16 * 1024)
 1089 				/* LDV_COMMENT_END_PREP */
 1090 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "lynx_pci_driver". Standart function test for correct return result. */
 1091 				res_add_card_21 = add_card( var_group3, var_add_card_21_p1);
 1092 				 ldv_check_return_value(res_add_card_21);
 1093 				 ldv_check_return_value_probe(res_add_card_21);
 1094 				 if(res_add_card_21) 
 1095 					goto ldv_module_exit;
 1096 				ldv_s_lynx_pci_driver_pci_driver++;
 1097 
 1098 				}
 1099 
 1100 			}
 1101 
 1102 			break;
 1103 			case 6: {
 1104 
 1105 				/** STRUCT: struct type: pci_driver, struct name: lynx_pci_driver **/
 1106 				if(ldv_s_lynx_pci_driver_pci_driver==1) {
 1107 
 1108 				/* content: static void remove_card(struct pci_dev *dev)*/
 1109 				/* LDV_COMMENT_BEGIN_PREP */
 1110 				#define TCODE_PHY_PACKET		0x10
 1111 				#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
 1112 				#define PHY_PACKET_SIZE 12 
 1113 				/* LDV_COMMENT_END_PREP */
 1114 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "lynx_pci_driver" */
 1115 				ldv_handler_precall();
 1116 				remove_card( var_group3);
 1117 				/* LDV_COMMENT_BEGIN_PREP */
 1118 				#define RCV_BUFFER_SIZE (16 * 1024)
 1119 				#if 0
 1120 				#endif
 1121 				/* LDV_COMMENT_END_PREP */
 1122 				ldv_s_lynx_pci_driver_pci_driver=0;
 1123 
 1124 				}
 1125 
 1126 			}
 1127 
 1128 			break;
 1129 			case 7: {
 1130 
 1131 				/** CALLBACK SECTION request_irq **/
 1132 				LDV_IN_INTERRUPT=2;
 1133 
 1134 				/* content: static irqreturn_t irq_handler(int irq, void *device)*/
 1135 				/* LDV_COMMENT_BEGIN_PREP */
 1136 				#define TCODE_PHY_PACKET		0x10
 1137 				#define PCI_DEVICE_ID_TI_PCILYNX	0x8000
 1138 				#define PHY_PACKET_SIZE 12 
 1139 				/* LDV_COMMENT_END_PREP */
 1140 				/* LDV_COMMENT_FUNCTION_CALL */
 1141 				ldv_handler_precall();
 1142 				irq_handler( var_irq_handler_19_p0, var_irq_handler_19_p1);
 1143 				/* LDV_COMMENT_BEGIN_PREP */
 1144 				#define RCV_BUFFER_SIZE (16 * 1024)
 1145 				#if 0
 1146 				#endif
 1147 				/* LDV_COMMENT_END_PREP */
 1148 				LDV_IN_INTERRUPT=1;
 1149 
 1150 				
 1151 
 1152 			}
 1153 
 1154 			break;
 1155 			default: break;
 1156 
 1157 		}
 1158 
 1159 	}
 1160 
 1161 	ldv_module_exit: 
 1162 
 1163 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 1164 	ldv_final: ldv_check_final_state();
 1165 
 1166 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 1167 	return;
 1168 
 1169 }
 1170 #endif
 1171 
 1172 /* LDV_COMMENT_END_MAIN */
 1173 
 1174 #line 32 "/home/ubuntu/launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--152_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/12887/dscv_tempdir/dscv/ri/152_1a/drivers/firewire/nosy.o.c.prepared"                 1 #ifndef _LDV_RCV_H_
    2 #define _LDV_RCV_H_
    3 
    4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
    5    label like the standard assert(). */
    6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
    7 
    8 /* The error label wrapper. It is used because of some static verifiers (like
    9    BLAST) don't accept multiple error labels through a program. */
   10 static inline void ldv_error(void)
   11 {
   12   LDV_ERROR: goto LDV_ERROR;
   13 }
   14 
   15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
   16    avoided by verifiers. */
   17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
   18 
   19 /* Infinite loop, that causes verifiers to skip such paths. */
   20 static inline void ldv_stop(void) {
   21   LDV_STOP: goto LDV_STOP;
   22 }
   23 
   24 /* Special nondeterministic functions. */
   25 int ldv_undef_int(void);
   26 void *ldv_undef_ptr(void);
   27 unsigned long ldv_undef_ulong(void);
   28 long ldv_undef_long(void);
   29 /* Return nondeterministic negative integer number. */
   30 static inline int ldv_undef_int_negative(void)
   31 {
   32   int ret = ldv_undef_int();
   33 
   34   ldv_assume(ret < 0);
   35 
   36   return ret;
   37 }
   38 /* Return nondeterministic nonpositive integer number. */
   39 static inline int ldv_undef_int_nonpositive(void)
   40 {
   41   int ret = ldv_undef_int();
   42 
   43   ldv_assume(ret <= 0);
   44 
   45   return ret;
   46 }
   47 
   48 /* Add explicit model for __builin_expect GCC function. Without the model a
   49    return value will be treated as nondetermined by verifiers. */
   50 static inline long __builtin_expect(long exp, long c)
   51 {
   52   return exp;
   53 }
   54 
   55 /* This function causes the program to exit abnormally. GCC implements this
   56 function by using a target-dependent mechanism (such as intentionally executing
   57 an illegal instruction) or by calling abort. The mechanism used may vary from
   58 release to release so you should not rely on any particular implementation.
   59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
   60 static inline void __builtin_trap(void)
   61 {
   62   ldv_assert(0);
   63 }
   64 
   65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
   66 #define LDV_PTR_MAX 2012
   67 
   68 #endif /* _LDV_RCV_H_ */                 1 #ifndef __LINUX_COMPILER_H
    2 #define __LINUX_COMPILER_H
    3 
    4 #ifndef __ASSEMBLY__
    5 
    6 #ifdef __CHECKER__
    7 # define __user		__attribute__((noderef, address_space(1)))
    8 # define __kernel	__attribute__((address_space(0)))
    9 # define __safe		__attribute__((safe))
   10 # define __force	__attribute__((force))
   11 # define __nocast	__attribute__((nocast))
   12 # define __iomem	__attribute__((noderef, address_space(2)))
   13 # define __must_hold(x)	__attribute__((context(x,1,1)))
   14 # define __acquires(x)	__attribute__((context(x,0,1)))
   15 # define __releases(x)	__attribute__((context(x,1,0)))
   16 # define __acquire(x)	__context__(x,1)
   17 # define __release(x)	__context__(x,-1)
   18 # define __cond_lock(x,c)	((c) ? ({ __acquire(x); 1; }) : 0)
   19 # define __percpu	__attribute__((noderef, address_space(3)))
   20 #ifdef CONFIG_SPARSE_RCU_POINTER
   21 # define __rcu		__attribute__((noderef, address_space(4)))
   22 #else /* CONFIG_SPARSE_RCU_POINTER */
   23 # define __rcu
   24 #endif /* CONFIG_SPARSE_RCU_POINTER */
   25 # define __private	__attribute__((noderef))
   26 extern void __chk_user_ptr(const volatile void __user *);
   27 extern void __chk_io_ptr(const volatile void __iomem *);
   28 # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
   29 #else /* __CHECKER__ */
   30 # define __user
   31 # define __kernel
   32 # define __safe
   33 # define __force
   34 # define __nocast
   35 # define __iomem
   36 # define __chk_user_ptr(x) (void)0
   37 # define __chk_io_ptr(x) (void)0
   38 # define __builtin_warning(x, y...) (1)
   39 # define __must_hold(x)
   40 # define __acquires(x)
   41 # define __releases(x)
   42 # define __acquire(x) (void)0
   43 # define __release(x) (void)0
   44 # define __cond_lock(x,c) (c)
   45 # define __percpu
   46 # define __rcu
   47 # define __private
   48 # define ACCESS_PRIVATE(p, member) ((p)->member)
   49 #endif /* __CHECKER__ */
   50 
   51 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
   52 #define ___PASTE(a,b) a##b
   53 #define __PASTE(a,b) ___PASTE(a,b)
   54 
   55 #ifdef __KERNEL__
   56 
   57 #ifdef __GNUC__
   58 #include <linux/compiler-gcc.h>
   59 #endif
   60 
   61 #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
   62 #define notrace __attribute__((hotpatch(0,0)))
   63 #else
   64 #define notrace __attribute__((no_instrument_function))
   65 #endif
   66 
   67 /* Intel compiler defines __GNUC__. So we will overwrite implementations
   68  * coming from above header files here
   69  */
   70 #ifdef __INTEL_COMPILER
   71 # include <linux/compiler-intel.h>
   72 #endif
   73 
   74 /* Clang compiler defines __GNUC__. So we will overwrite implementations
   75  * coming from above header files here
   76  */
   77 #ifdef __clang__
   78 #include <linux/compiler-clang.h>
   79 #endif
   80 
   81 /*
   82  * Generic compiler-dependent macros required for kernel
   83  * build go below this comment. Actual compiler/compiler version
   84  * specific implementations come from the above header files
   85  */
   86 
   87 struct ftrace_branch_data {
   88 	const char *func;
   89 	const char *file;
   90 	unsigned line;
   91 	union {
   92 		struct {
   93 			unsigned long correct;
   94 			unsigned long incorrect;
   95 		};
   96 		struct {
   97 			unsigned long miss;
   98 			unsigned long hit;
   99 		};
  100 		unsigned long miss_hit[2];
  101 	};
  102 };
  103 
  104 /*
  105  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
  106  * to disable branch tracing on a per file basis.
  107  */
  108 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
  109     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
  110 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
  111 
  112 #define likely_notrace(x)	__builtin_expect(!!(x), 1)
  113 #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
  114 
  115 #define __branch_check__(x, expect) ({					\
  116 			int ______r;					\
  117 			static struct ftrace_branch_data		\
  118 				__attribute__((__aligned__(4)))		\
  119 				__attribute__((section("_ftrace_annotated_branch"))) \
  120 				______f = {				\
  121 				.func = __func__,			\
  122 				.file = __FILE__,			\
  123 				.line = __LINE__,			\
  124 			};						\
  125 			______r = likely_notrace(x);			\
  126 			ftrace_likely_update(&______f, ______r, expect); \
  127 			______r;					\
  128 		})
  129 
  130 /*
  131  * Using __builtin_constant_p(x) to ignore cases where the return
  132  * value is always the same.  This idea is taken from a similar patch
  133  * written by Daniel Walker.
  134  */
  135 # ifndef likely
  136 #  define likely(x)	(__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
  137 # endif
  138 # ifndef unlikely
  139 #  define unlikely(x)	(__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
  140 # endif
  141 
  142 #ifdef CONFIG_PROFILE_ALL_BRANCHES
  143 /*
  144  * "Define 'is'", Bill Clinton
  145  * "Define 'if'", Steven Rostedt
  146  */
  147 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
  148 #define __trace_if(cond) \
  149 	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
  150 	({								\
  151 		int ______r;						\
  152 		static struct ftrace_branch_data			\
  153 			__attribute__((__aligned__(4)))			\
  154 			__attribute__((section("_ftrace_branch")))	\
  155 			______f = {					\
  156 				.func = __func__,			\
  157 				.file = __FILE__,			\
  158 				.line = __LINE__,			\
  159 			};						\
  160 		______r = !!(cond);					\
  161 		______f.miss_hit[______r]++;					\
  162 		______r;						\
  163 	}))
  164 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
  165 
  166 #else
  167 # define likely(x)	__builtin_expect(!!(x), 1)
  168 # define unlikely(x)	__builtin_expect(!!(x), 0)
  169 #endif
  170 
  171 /* Optimization barrier */
  172 #ifndef barrier
  173 # define barrier() __memory_barrier()
  174 #endif
  175 
  176 #ifndef barrier_data
  177 # define barrier_data(ptr) barrier()
  178 #endif
  179 
  180 /* Unreachable code */
  181 #ifndef unreachable
  182 # define unreachable() do { } while (1)
  183 #endif
  184 
  185 #ifndef RELOC_HIDE
  186 # define RELOC_HIDE(ptr, off)					\
  187   ({ unsigned long __ptr;					\
  188      __ptr = (unsigned long) (ptr);				\
  189     (typeof(ptr)) (__ptr + (off)); })
  190 #endif
  191 
  192 #ifndef OPTIMIZER_HIDE_VAR
  193 #define OPTIMIZER_HIDE_VAR(var) barrier()
  194 #endif
  195 
  196 /* Not-quite-unique ID. */
  197 #ifndef __UNIQUE_ID
  198 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
  199 #endif
  200 
  201 #include <uapi/linux/types.h>
  202 
  203 #define __READ_ONCE_SIZE						\
  204 ({									\
  205 	switch (size) {							\
  206 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
  207 	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
  208 	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
  209 	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
  210 	default:							\
  211 		barrier();						\
  212 		__builtin_memcpy((void *)res, (const void *)p, size);	\
  213 		barrier();						\
  214 	}								\
  215 })
  216 
  217 static __always_inline
  218 void __read_once_size(const volatile void *p, void *res, int size)
  219 {
  220 	__READ_ONCE_SIZE;
  221 }
  222 
  223 #ifdef CONFIG_KASAN
  224 /*
  225  * This function is not 'inline' because __no_sanitize_address confilcts
  226  * with inlining. Attempt to inline it may cause a build failure.
  227  * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
  228  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
  229  */
  230 static __no_sanitize_address __maybe_unused
  231 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
  232 {
  233 	__READ_ONCE_SIZE;
  234 }
  235 #else
  236 static __always_inline
  237 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
  238 {
  239 	__READ_ONCE_SIZE;
  240 }
  241 #endif
  242 
  243 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
  244 {
  245 	switch (size) {
  246 	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
  247 	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
  248 	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
  249 	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
  250 	default:
  251 		barrier();
  252 		__builtin_memcpy((void *)p, (const void *)res, size);
  253 		barrier();
  254 	}
  255 }
  256 
  257 /*
  258  * Prevent the compiler from merging or refetching reads or writes. The
  259  * compiler is also forbidden from reordering successive instances of
  260  * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
  261  * compiler is aware of some particular ordering.  One way to make the
  262  * compiler aware of ordering is to put the two invocations of READ_ONCE,
  263  * WRITE_ONCE or ACCESS_ONCE() in different C statements.
  264  *
  265  * In contrast to ACCESS_ONCE these two macros will also work on aggregate
  266  * data types like structs or unions. If the size of the accessed data
  267  * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
  268  * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
  269  * least two memcpy()s: one for the __builtin_memcpy() and then one for
  270  * the macro doing the copy of variable - '__u' allocated on the stack.
  271  *
  272  * Their two major use cases are: (1) Mediating communication between
  273  * process-level code and irq/NMI handlers, all running on the same CPU,
  274  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
  275  * mutilate accesses that either do not require ordering or that interact
  276  * with an explicit memory barrier or atomic instruction that provides the
  277  * required ordering.
  278  */
  279 
  280 #define __READ_ONCE(x, check)						\
  281 ({									\
  282 	union { typeof(x) __val; char __c[1]; } __u;			\
  283 	if (check)							\
  284 		__read_once_size(&(x), __u.__c, sizeof(x));		\
  285 	else								\
  286 		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
  287 	__u.__val;							\
  288 })
  289 #define READ_ONCE(x) __READ_ONCE(x, 1)
  290 
  291 /*
  292  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
  293  * to hide memory access from KASAN.
  294  */
  295 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
  296 
  297 #define WRITE_ONCE(x, val) \
  298 ({							\
  299 	union { typeof(x) __val; char __c[1]; } __u =	\
  300 		{ .__val = (__force typeof(x)) (val) }; \
  301 	__write_once_size(&(x), __u.__c, sizeof(x));	\
  302 	__u.__val;					\
  303 })
  304 
  305 #endif /* __KERNEL__ */
  306 
  307 #endif /* __ASSEMBLY__ */
  308 
  309 #ifdef __KERNEL__
  310 /*
  311  * Allow us to mark functions as 'deprecated' and have gcc emit a nice
  312  * warning for each use, in hopes of speeding the functions removal.
  313  * Usage is:
  314  * 		int __deprecated foo(void)
  315  */
  316 #ifndef __deprecated
  317 # define __deprecated		/* unimplemented */
  318 #endif
  319 
  320 #ifdef MODULE
  321 #define __deprecated_for_modules __deprecated
  322 #else
  323 #define __deprecated_for_modules
  324 #endif
  325 
  326 #ifndef __must_check
  327 #define __must_check
  328 #endif
  329 
  330 #ifndef CONFIG_ENABLE_MUST_CHECK
  331 #undef __must_check
  332 #define __must_check
  333 #endif
  334 #ifndef CONFIG_ENABLE_WARN_DEPRECATED
  335 #undef __deprecated
  336 #undef __deprecated_for_modules
  337 #define __deprecated
  338 #define __deprecated_for_modules
  339 #endif
  340 
  341 #ifndef __malloc
  342 #define __malloc
  343 #endif
  344 
  345 /*
  346  * Allow us to avoid 'defined but not used' warnings on functions and data,
  347  * as well as force them to be emitted to the assembly file.
  348  *
  349  * As of gcc 3.4, static functions that are not marked with attribute((used))
  350  * may be elided from the assembly file.  As of gcc 3.4, static data not so
  351  * marked will not be elided, but this may change in a future gcc version.
  352  *
  353  * NOTE: Because distributions shipped with a backported unit-at-a-time
  354  * compiler in gcc 3.3, we must define __used to be __attribute__((used))
  355  * for gcc >=3.3 instead of 3.4.
  356  *
  357  * In prior versions of gcc, such functions and data would be emitted, but
  358  * would be warned about except with attribute((unused)).
  359  *
  360  * Mark functions that are referenced only in inline assembly as __used so
  361  * the code is emitted even though it appears to be unreferenced.
  362  */
  363 #ifndef __used
  364 # define __used			/* unimplemented */
  365 #endif
  366 
  367 #ifndef __maybe_unused
  368 # define __maybe_unused		/* unimplemented */
  369 #endif
  370 
  371 #ifndef __always_unused
  372 # define __always_unused	/* unimplemented */
  373 #endif
  374 
  375 #ifndef noinline
  376 #define noinline
  377 #endif
  378 
  379 /*
  380  * Rather then using noinline to prevent stack consumption, use
  381  * noinline_for_stack instead.  For documentation reasons.
  382  */
  383 #define noinline_for_stack noinline
  384 
  385 #ifndef __always_inline
  386 #define __always_inline inline
  387 #endif
  388 
  389 #endif /* __KERNEL__ */
  390 
  391 /*
  392  * From the GCC manual:
  393  *
  394  * Many functions do not examine any values except their arguments,
  395  * and have no effects except the return value.  Basically this is
  396  * just slightly more strict class than the `pure' attribute above,
  397  * since function is not allowed to read global memory.
  398  *
  399  * Note that a function that has pointer arguments and examines the
  400  * data pointed to must _not_ be declared `const'.  Likewise, a
  401  * function that calls a non-`const' function usually must not be
  402  * `const'.  It does not make sense for a `const' function to return
  403  * `void'.
  404  */
  405 #ifndef __attribute_const__
  406 # define __attribute_const__	/* unimplemented */
  407 #endif
  408 
  409 /*
  410  * Tell gcc if a function is cold. The compiler will assume any path
  411  * directly leading to the call is unlikely.
  412  */
  413 
  414 #ifndef __cold
  415 #define __cold
  416 #endif
  417 
  418 /* Simple shorthand for a section definition */
  419 #ifndef __section
  420 # define __section(S) __attribute__ ((__section__(#S)))
  421 #endif
  422 
  423 #ifndef __visible
  424 #define __visible
  425 #endif
  426 
  427 /*
  428  * Assume alignment of return value.
  429  */
  430 #ifndef __assume_aligned
  431 #define __assume_aligned(a, ...)
  432 #endif
  433 
  434 
  435 /* Are two types/vars the same type (ignoring qualifiers)? */
  436 #ifndef __same_type
  437 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
  438 #endif
  439 
  440 /* Is this type a native word size -- useful for atomic operations */
  441 #ifndef __native_word
  442 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
  443 #endif
  444 
  445 /* Compile time object size, -1 for unknown */
  446 #ifndef __compiletime_object_size
  447 # define __compiletime_object_size(obj) -1
  448 #endif
  449 #ifndef __compiletime_warning
  450 # define __compiletime_warning(message)
  451 #endif
  452 #ifndef __compiletime_error
  453 # define __compiletime_error(message)
  454 /*
  455  * Sparse complains of variable sized arrays due to the temporary variable in
  456  * __compiletime_assert. Unfortunately we can't just expand it out to make
  457  * sparse see a constant array size without breaking compiletime_assert on old
  458  * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
  459  */
  460 # ifndef __CHECKER__
  461 #  define __compiletime_error_fallback(condition) \
  462 	do {  } while (0)
  463 # endif
  464 #endif
  465 #ifndef __compiletime_error_fallback
  466 # define __compiletime_error_fallback(condition) do { } while (0)
  467 #endif
  468 
  469 #define __compiletime_assert(condition, msg, prefix, suffix)		\
  470 	do {								\
  471 		bool __cond = !(condition);				\
  472 		extern void prefix ## suffix(void) __compiletime_error(msg); \
  473 		if (__cond)						\
  474 			prefix ## suffix();				\
  475 		__compiletime_error_fallback(__cond);			\
  476 	} while (0)
  477 
  478 #define _compiletime_assert(condition, msg, prefix, suffix) \
  479 	__compiletime_assert(condition, msg, prefix, suffix)
  480 
  481 /**
  482  * compiletime_assert - break build and emit msg if condition is false
  483  * @condition: a compile-time constant condition to check
  484  * @msg:       a message to emit if condition is false
  485  *
  486  * In tradition of POSIX assert, this macro will break the build if the
  487  * supplied condition is *false*, emitting the supplied error message if the
  488  * compiler has support to do so.
  489  */
  490 #define compiletime_assert(condition, msg) \
  491 	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
  492 
  493 #define compiletime_assert_atomic_type(t)				\
  494 	compiletime_assert(__native_word(t),				\
  495 		"Need native word sized stores/loads for atomicity.")
  496 
  497 /*
  498  * Prevent the compiler from merging or refetching accesses.  The compiler
  499  * is also forbidden from reordering successive instances of ACCESS_ONCE(),
  500  * but only when the compiler is aware of some particular ordering.  One way
  501  * to make the compiler aware of ordering is to put the two invocations of
  502  * ACCESS_ONCE() in different C statements.
  503  *
  504  * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
  505  * on a union member will work as long as the size of the member matches the
  506  * size of the union and the size is smaller than word size.
  507  *
  508  * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
  509  * between process-level code and irq/NMI handlers, all running on the same CPU,
  510  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
  511  * mutilate accesses that either do not require ordering or that interact
  512  * with an explicit memory barrier or atomic instruction that provides the
  513  * required ordering.
  514  *
  515  * If possible use READ_ONCE()/WRITE_ONCE() instead.
  516  */
  517 #define __ACCESS_ONCE(x) ({ \
  518 	 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
  519 	(volatile typeof(x) *)&(x); })
  520 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
  521 
  522 /**
  523  * lockless_dereference() - safely load a pointer for later dereference
  524  * @p: The pointer to load
  525  *
  526  * Similar to rcu_dereference(), but for situations where the pointed-to
  527  * object's lifetime is managed by something other than RCU.  That
  528  * "something other" might be reference counting or simple immortality.
  529  *
  530  * The seemingly unused void * variable is to validate @p is indeed a pointer
  531  * type. All pointer types silently cast to void *.
  532  */
  533 #define lockless_dereference(p) \
  534 ({ \
  535 	typeof(p) _________p1 = READ_ONCE(p); \
  536 	__maybe_unused const void * const _________p2 = _________p1; \
  537 	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
  538 	(_________p1); \
  539 })
  540 
  541 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
  542 #ifdef CONFIG_KPROBES
  543 # define __kprobes	__attribute__((__section__(".kprobes.text")))
  544 # define nokprobe_inline	__always_inline
  545 #else
  546 # define __kprobes
  547 # define nokprobe_inline	inline
  548 #endif
  549 #endif /* __LINUX_COMPILER_H */                 1 /*
    2  * device.h - generic, centralized driver model
    3  *
    4  * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
    5  * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
    6  * Copyright (c) 2008-2009 Novell Inc.
    7  *
    8  * This file is released under the GPLv2
    9  *
   10  * See Documentation/driver-model/ for more information.
   11  */
   12 
   13 #ifndef _DEVICE_H_
   14 #define _DEVICE_H_
   15 
   16 #include <linux/ioport.h>
   17 #include <linux/kobject.h>
   18 #include <linux/klist.h>
   19 #include <linux/list.h>
   20 #include <linux/lockdep.h>
   21 #include <linux/compiler.h>
   22 #include <linux/types.h>
   23 #include <linux/mutex.h>
   24 #include <linux/pinctrl/devinfo.h>
   25 #include <linux/pm.h>
   26 #include <linux/atomic.h>
   27 #include <linux/ratelimit.h>
   28 #include <linux/uidgid.h>
   29 #include <linux/gfp.h>
   30 #include <asm/device.h>
   31 
   32 struct device;
   33 struct device_private;
   34 struct device_driver;
   35 struct driver_private;
   36 struct module;
   37 struct class;
   38 struct subsys_private;
   39 struct bus_type;
   40 struct device_node;
   41 struct fwnode_handle;
   42 struct iommu_ops;
   43 struct iommu_group;
   44 
   45 struct bus_attribute {
   46 	struct attribute	attr;
   47 	ssize_t (*show)(struct bus_type *bus, char *buf);
   48 	ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
   49 };
   50 
   51 #define BUS_ATTR(_name, _mode, _show, _store)	\
   52 	struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
   53 #define BUS_ATTR_RW(_name) \
   54 	struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
   55 #define BUS_ATTR_RO(_name) \
   56 	struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
   57 
   58 extern int __must_check bus_create_file(struct bus_type *,
   59 					struct bus_attribute *);
   60 extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
   61 
   62 /**
   63  * struct bus_type - The bus type of the device
   64  *
   65  * @name:	The name of the bus.
   66  * @dev_name:	Used for subsystems to enumerate devices like ("foo%u", dev->id).
   67  * @dev_root:	Default device to use as the parent.
   68  * @dev_attrs:	Default attributes of the devices on the bus.
   69  * @bus_groups:	Default attributes of the bus.
   70  * @dev_groups:	Default attributes of the devices on the bus.
   71  * @drv_groups: Default attributes of the device drivers on the bus.
   72  * @match:	Called, perhaps multiple times, whenever a new device or driver
   73  *		is added for this bus. It should return a positive value if the
   74  *		given device can be handled by the given driver and zero
   75  *		otherwise. It may also return error code if determining that
   76  *		the driver supports the device is not possible. In case of
   77  *		-EPROBE_DEFER it will queue the device for deferred probing.
   78  * @uevent:	Called when a device is added, removed, or a few other things
   79  *		that generate uevents to add the environment variables.
   80  * @probe:	Called when a new device or driver add to this bus, and callback
   81  *		the specific driver's probe to initial the matched device.
   82  * @remove:	Called when a device removed from this bus.
   83  * @shutdown:	Called at shut-down time to quiesce the device.
   84  *
   85  * @online:	Called to put the device back online (after offlining it).
   86  * @offline:	Called to put the device offline for hot-removal. May fail.
   87  *
   88  * @suspend:	Called when a device on this bus wants to go to sleep mode.
   89  * @resume:	Called to bring a device on this bus out of sleep mode.
   90  * @pm:		Power management operations of this bus, callback the specific
   91  *		device driver's pm-ops.
   92  * @iommu_ops:  IOMMU specific operations for this bus, used to attach IOMMU
   93  *              driver implementations to a bus and allow the driver to do
   94  *              bus-specific setup
   95  * @p:		The private data of the driver core, only the driver core can
   96  *		touch this.
   97  * @lock_key:	Lock class key for use by the lock validator
   98  *
   99  * A bus is a channel between the processor and one or more devices. For the
  100  * purposes of the device model, all devices are connected via a bus, even if
  101  * it is an internal, virtual, "platform" bus. Buses can plug into each other.
  102  * A USB controller is usually a PCI device, for example. The device model
  103  * represents the actual connections between buses and the devices they control.
  104  * A bus is represented by the bus_type structure. It contains the name, the
  105  * default attributes, the bus' methods, PM operations, and the driver core's
  106  * private data.
  107  */
  108 struct bus_type {
  109 	const char		*name;
  110 	const char		*dev_name;
  111 	struct device		*dev_root;
  112 	struct device_attribute	*dev_attrs;	/* use dev_groups instead */
  113 	const struct attribute_group **bus_groups;
  114 	const struct attribute_group **dev_groups;
  115 	const struct attribute_group **drv_groups;
  116 
  117 	int (*match)(struct device *dev, struct device_driver *drv);
  118 	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
  119 	int (*probe)(struct device *dev);
  120 	int (*remove)(struct device *dev);
  121 	void (*shutdown)(struct device *dev);
  122 
  123 	int (*online)(struct device *dev);
  124 	int (*offline)(struct device *dev);
  125 
  126 	int (*suspend)(struct device *dev, pm_message_t state);
  127 	int (*resume)(struct device *dev);
  128 
  129 	const struct dev_pm_ops *pm;
  130 
  131 	const struct iommu_ops *iommu_ops;
  132 
  133 	struct subsys_private *p;
  134 	struct lock_class_key lock_key;
  135 };
  136 
  137 extern int __must_check bus_register(struct bus_type *bus);
  138 
  139 extern void bus_unregister(struct bus_type *bus);
  140 
  141 extern int __must_check bus_rescan_devices(struct bus_type *bus);
  142 
  143 /* iterator helpers for buses */
  144 struct subsys_dev_iter {
  145 	struct klist_iter		ki;
  146 	const struct device_type	*type;
  147 };
  148 void subsys_dev_iter_init(struct subsys_dev_iter *iter,
  149 			 struct bus_type *subsys,
  150 			 struct device *start,
  151 			 const struct device_type *type);
  152 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
  153 void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
  154 
  155 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
  156 		     int (*fn)(struct device *dev, void *data));
  157 struct device *bus_find_device(struct bus_type *bus, struct device *start,
  158 			       void *data,
  159 			       int (*match)(struct device *dev, void *data));
  160 struct device *bus_find_device_by_name(struct bus_type *bus,
  161 				       struct device *start,
  162 				       const char *name);
  163 struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
  164 					struct device *hint);
  165 int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
  166 		     void *data, int (*fn)(struct device_driver *, void *));
  167 void bus_sort_breadthfirst(struct bus_type *bus,
  168 			   int (*compare)(const struct device *a,
  169 					  const struct device *b));
  170 /*
  171  * Bus notifiers: Get notified of addition/removal of devices
  172  * and binding/unbinding of drivers to devices.
  173  * In the long run, it should be a replacement for the platform
  174  * notify hooks.
  175  */
  176 struct notifier_block;
  177 
  178 extern int bus_register_notifier(struct bus_type *bus,
  179 				 struct notifier_block *nb);
  180 extern int bus_unregister_notifier(struct bus_type *bus,
  181 				   struct notifier_block *nb);
  182 
  183 /* All 4 notifers below get called with the target struct device *
  184  * as an argument. Note that those functions are likely to be called
  185  * with the device lock held in the core, so be careful.
  186  */
  187 #define BUS_NOTIFY_ADD_DEVICE		0x00000001 /* device added */
  188 #define BUS_NOTIFY_DEL_DEVICE		0x00000002 /* device to be removed */
  189 #define BUS_NOTIFY_REMOVED_DEVICE	0x00000003 /* device removed */
  190 #define BUS_NOTIFY_BIND_DRIVER		0x00000004 /* driver about to be
  191 						      bound */
  192 #define BUS_NOTIFY_BOUND_DRIVER		0x00000005 /* driver bound to device */
  193 #define BUS_NOTIFY_UNBIND_DRIVER	0x00000006 /* driver about to be
  194 						      unbound */
  195 #define BUS_NOTIFY_UNBOUND_DRIVER	0x00000007 /* driver is unbound
  196 						      from the device */
  197 #define BUS_NOTIFY_DRIVER_NOT_BOUND	0x00000008 /* driver fails to be bound */
  198 
  199 extern struct kset *bus_get_kset(struct bus_type *bus);
  200 extern struct klist *bus_get_device_klist(struct bus_type *bus);
  201 
  202 /**
  203  * enum probe_type - device driver probe type to try
  204  *	Device drivers may opt in for special handling of their
  205  *	respective probe routines. This tells the core what to
  206  *	expect and prefer.
  207  *
  208  * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
  209  *	whether probed synchronously or asynchronously.
  210  * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
  211  *	probing order is not essential for booting the system may
  212  *	opt into executing their probes asynchronously.
  213  * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
  214  *	their probe routines to run synchronously with driver and
  215  *	device registration (with the exception of -EPROBE_DEFER
  216  *	handling - re-probing always ends up being done asynchronously).
  217  *
  218  * Note that the end goal is to switch the kernel to use asynchronous
  219  * probing by default, so annotating drivers with
  220  * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
  221  * to speed up boot process while we are validating the rest of the
  222  * drivers.
  223  */
  224 enum probe_type {
  225 	PROBE_DEFAULT_STRATEGY,
  226 	PROBE_PREFER_ASYNCHRONOUS,
  227 	PROBE_FORCE_SYNCHRONOUS,
  228 };
  229 
  230 /**
  231  * struct device_driver - The basic device driver structure
  232  * @name:	Name of the device driver.
  233  * @bus:	The bus which the device of this driver belongs to.
  234  * @owner:	The module owner.
  235  * @mod_name:	Used for built-in modules.
  236  * @suppress_bind_attrs: Disables bind/unbind via sysfs.
  237  * @probe_type:	Type of the probe (synchronous or asynchronous) to use.
  238  * @of_match_table: The open firmware table.
  239  * @acpi_match_table: The ACPI match table.
  240  * @probe:	Called to query the existence of a specific device,
  241  *		whether this driver can work with it, and bind the driver
  242  *		to a specific device.
  243  * @remove:	Called when the device is removed from the system to
  244  *		unbind a device from this driver.
  245  * @shutdown:	Called at shut-down time to quiesce the device.
  246  * @suspend:	Called to put the device to sleep mode. Usually to a
  247  *		low power state.
  248  * @resume:	Called to bring a device from sleep mode.
  249  * @groups:	Default attributes that get created by the driver core
  250  *		automatically.
  251  * @pm:		Power management operations of the device which matched
  252  *		this driver.
  253  * @p:		Driver core's private data, no one other than the driver
  254  *		core can touch this.
  255  *
  256  * The device driver-model tracks all of the drivers known to the system.
  257  * The main reason for this tracking is to enable the driver core to match
  258  * up drivers with new devices. Once drivers are known objects within the
  259  * system, however, a number of other things become possible. Device drivers
  260  * can export information and configuration variables that are independent
  261  * of any specific device.
  262  */
  263 struct device_driver {
  264 	const char		*name;
  265 	struct bus_type		*bus;
  266 
  267 	struct module		*owner;
  268 	const char		*mod_name;	/* used for built-in modules */
  269 
  270 	bool suppress_bind_attrs;	/* disables bind/unbind via sysfs */
  271 	enum probe_type probe_type;
  272 
  273 	const struct of_device_id	*of_match_table;
  274 	const struct acpi_device_id	*acpi_match_table;
  275 
  276 	int (*probe) (struct device *dev);
  277 	int (*remove) (struct device *dev);
  278 	void (*shutdown) (struct device *dev);
  279 	int (*suspend) (struct device *dev, pm_message_t state);
  280 	int (*resume) (struct device *dev);
  281 	const struct attribute_group **groups;
  282 
  283 	const struct dev_pm_ops *pm;
  284 
  285 	struct driver_private *p;
  286 };
  287 
  288 
  289 extern int __must_check driver_register(struct device_driver *drv);
  290 extern void driver_unregister(struct device_driver *drv);
  291 
  292 extern struct device_driver *driver_find(const char *name,
  293 					 struct bus_type *bus);
  294 extern int driver_probe_done(void);
  295 extern void wait_for_device_probe(void);
  296 
  297 
  298 /* sysfs interface for exporting driver attributes */
  299 
  300 struct driver_attribute {
  301 	struct attribute attr;
  302 	ssize_t (*show)(struct device_driver *driver, char *buf);
  303 	ssize_t (*store)(struct device_driver *driver, const char *buf,
  304 			 size_t count);
  305 };
  306 
  307 #define DRIVER_ATTR(_name, _mode, _show, _store) \
  308 	struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
  309 #define DRIVER_ATTR_RW(_name) \
  310 	struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
  311 #define DRIVER_ATTR_RO(_name) \
  312 	struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
  313 #define DRIVER_ATTR_WO(_name) \
  314 	struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
  315 
  316 extern int __must_check driver_create_file(struct device_driver *driver,
  317 					const struct driver_attribute *attr);
  318 extern void driver_remove_file(struct device_driver *driver,
  319 			       const struct driver_attribute *attr);
  320 
  321 extern int __must_check driver_for_each_device(struct device_driver *drv,
  322 					       struct device *start,
  323 					       void *data,
  324 					       int (*fn)(struct device *dev,
  325 							 void *));
  326 struct device *driver_find_device(struct device_driver *drv,
  327 				  struct device *start, void *data,
  328 				  int (*match)(struct device *dev, void *data));
  329 
  330 /**
  331  * struct subsys_interface - interfaces to device functions
  332  * @name:       name of the device function
  333  * @subsys:     subsytem of the devices to attach to
  334  * @node:       the list of functions registered at the subsystem
  335  * @add_dev:    device hookup to device function handler
  336  * @remove_dev: device hookup to device function handler
  337  *
  338  * Simple interfaces attached to a subsystem. Multiple interfaces can
  339  * attach to a subsystem and its devices. Unlike drivers, they do not
  340  * exclusively claim or control devices. Interfaces usually represent
  341  * a specific functionality of a subsystem/class of devices.
  342  */
  343 struct subsys_interface {
  344 	const char *name;
  345 	struct bus_type *subsys;
  346 	struct list_head node;
  347 	int (*add_dev)(struct device *dev, struct subsys_interface *sif);
  348 	void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
  349 };
  350 
  351 int subsys_interface_register(struct subsys_interface *sif);
  352 void subsys_interface_unregister(struct subsys_interface *sif);
  353 
  354 int subsys_system_register(struct bus_type *subsys,
  355 			   const struct attribute_group **groups);
  356 int subsys_virtual_register(struct bus_type *subsys,
  357 			    const struct attribute_group **groups);
  358 
  359 /**
  360  * struct class - device classes
  361  * @name:	Name of the class.
  362  * @owner:	The module owner.
  363  * @class_attrs: Default attributes of this class.
  364  * @dev_groups:	Default attributes of the devices that belong to the class.
  365  * @dev_kobj:	The kobject that represents this class and links it into the hierarchy.
  366  * @dev_uevent:	Called when a device is added, removed from this class, or a
  367  *		few other things that generate uevents to add the environment
  368  *		variables.
  369  * @devnode:	Callback to provide the devtmpfs.
  370  * @class_release: Called to release this class.
  371  * @dev_release: Called to release the device.
  372  * @suspend:	Used to put the device to sleep mode, usually to a low power
  373  *		state.
  374  * @resume:	Used to bring the device from the sleep mode.
  375  * @ns_type:	Callbacks so sysfs can detemine namespaces.
  376  * @namespace:	Namespace of the device belongs to this class.
  377  * @pm:		The default device power management operations of this class.
  378  * @p:		The private data of the driver core, no one other than the
  379  *		driver core can touch this.
  380  *
  381  * A class is a higher-level view of a device that abstracts out low-level
  382  * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
  383  * at the class level, they are all simply disks. Classes allow user space
  384  * to work with devices based on what they do, rather than how they are
  385  * connected or how they work.
  386  */
  387 struct class {
  388 	const char		*name;
  389 	struct module		*owner;
  390 
  391 	struct class_attribute		*class_attrs;
  392 	const struct attribute_group	**dev_groups;
  393 	struct kobject			*dev_kobj;
  394 
  395 	int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
  396 	char *(*devnode)(struct device *dev, umode_t *mode);
  397 
  398 	void (*class_release)(struct class *class);
  399 	void (*dev_release)(struct device *dev);
  400 
  401 	int (*suspend)(struct device *dev, pm_message_t state);
  402 	int (*resume)(struct device *dev);
  403 
  404 	const struct kobj_ns_type_operations *ns_type;
  405 	const void *(*namespace)(struct device *dev);
  406 
  407 	const struct dev_pm_ops *pm;
  408 
  409 	struct subsys_private *p;
  410 };
  411 
  412 struct class_dev_iter {
  413 	struct klist_iter		ki;
  414 	const struct device_type	*type;
  415 };
  416 
  417 extern struct kobject *sysfs_dev_block_kobj;
  418 extern struct kobject *sysfs_dev_char_kobj;
  419 extern int __must_check __class_register(struct class *class,
  420 					 struct lock_class_key *key);
  421 extern void class_unregister(struct class *class);
  422 
  423 /* This is a #define to keep the compiler from merging different
  424  * instances of the __key variable */
  425 #define class_register(class)			\
  426 ({						\
  427 	static struct lock_class_key __key;	\
  428 	__class_register(class, &__key);	\
  429 })
  430 
  431 struct class_compat;
  432 struct class_compat *class_compat_register(const char *name);
  433 void class_compat_unregister(struct class_compat *cls);
  434 int class_compat_create_link(struct class_compat *cls, struct device *dev,
  435 			     struct device *device_link);
  436 void class_compat_remove_link(struct class_compat *cls, struct device *dev,
  437 			      struct device *device_link);
  438 
  439 extern void class_dev_iter_init(struct class_dev_iter *iter,
  440 				struct class *class,
  441 				struct device *start,
  442 				const struct device_type *type);
  443 extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
  444 extern void class_dev_iter_exit(struct class_dev_iter *iter);
  445 
  446 extern int class_for_each_device(struct class *class, struct device *start,
  447 				 void *data,
  448 				 int (*fn)(struct device *dev, void *data));
  449 extern struct device *class_find_device(struct class *class,
  450 					struct device *start, const void *data,
  451 					int (*match)(struct device *, const void *));
  452 
  453 struct class_attribute {
  454 	struct attribute attr;
  455 	ssize_t (*show)(struct class *class, struct class_attribute *attr,
  456 			char *buf);
  457 	ssize_t (*store)(struct class *class, struct class_attribute *attr,
  458 			const char *buf, size_t count);
  459 };
  460 
  461 #define CLASS_ATTR(_name, _mode, _show, _store) \
  462 	struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store)
  463 #define CLASS_ATTR_RW(_name) \
  464 	struct class_attribute class_attr_##_name = __ATTR_RW(_name)
  465 #define CLASS_ATTR_RO(_name) \
  466 	struct class_attribute class_attr_##_name = __ATTR_RO(_name)
  467 
  468 extern int __must_check class_create_file_ns(struct class *class,
  469 					     const struct class_attribute *attr,
  470 					     const void *ns);
  471 extern void class_remove_file_ns(struct class *class,
  472 				 const struct class_attribute *attr,
  473 				 const void *ns);
  474 
  475 static inline int __must_check class_create_file(struct class *class,
  476 					const struct class_attribute *attr)
  477 {
  478 	return class_create_file_ns(class, attr, NULL);
  479 }
  480 
  481 static inline void class_remove_file(struct class *class,
  482 				     const struct class_attribute *attr)
  483 {
  484 	return class_remove_file_ns(class, attr, NULL);
  485 }
  486 
  487 /* Simple class attribute that is just a static string */
  488 struct class_attribute_string {
  489 	struct class_attribute attr;
  490 	char *str;
  491 };
  492 
  493 /* Currently read-only only */
  494 #define _CLASS_ATTR_STRING(_name, _mode, _str) \
  495 	{ __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
  496 #define CLASS_ATTR_STRING(_name, _mode, _str) \
  497 	struct class_attribute_string class_attr_##_name = \
  498 		_CLASS_ATTR_STRING(_name, _mode, _str)
  499 
  500 extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
  501                         char *buf);
  502 
  503 struct class_interface {
  504 	struct list_head	node;
  505 	struct class		*class;
  506 
  507 	int (*add_dev)		(struct device *, struct class_interface *);
  508 	void (*remove_dev)	(struct device *, struct class_interface *);
  509 };
  510 
  511 extern int __must_check class_interface_register(struct class_interface *);
  512 extern void class_interface_unregister(struct class_interface *);
  513 
  514 extern struct class * __must_check __class_create(struct module *owner,
  515 						  const char *name,
  516 						  struct lock_class_key *key);
  517 extern void class_destroy(struct class *cls);
  518 
  519 /* This is a #define to keep the compiler from merging different
  520  * instances of the __key variable */
  521 #define class_create(owner, name)		\
  522 ({						\
  523 	static struct lock_class_key __key;	\
  524 	__class_create(owner, name, &__key);	\
  525 })
  526 
  527 /*
  528  * The type of device, "struct device" is embedded in. A class
  529  * or bus can contain devices of different types
  530  * like "partitions" and "disks", "mouse" and "event".
  531  * This identifies the device type and carries type-specific
  532  * information, equivalent to the kobj_type of a kobject.
  533  * If "name" is specified, the uevent will contain it in
  534  * the DEVTYPE variable.
  535  */
  536 struct device_type {
  537 	const char *name;
  538 	const struct attribute_group **groups;
  539 	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
  540 	char *(*devnode)(struct device *dev, umode_t *mode,
  541 			 kuid_t *uid, kgid_t *gid);
  542 	void (*release)(struct device *dev);
  543 
  544 	const struct dev_pm_ops *pm;
  545 };
  546 
  547 /* interface for exporting device attributes */
  548 struct device_attribute {
  549 	struct attribute	attr;
  550 	ssize_t (*show)(struct device *dev, struct device_attribute *attr,
  551 			char *buf);
  552 	ssize_t (*store)(struct device *dev, struct device_attribute *attr,
  553 			 const char *buf, size_t count);
  554 };
  555 
  556 struct dev_ext_attribute {
  557 	struct device_attribute attr;
  558 	void *var;
  559 };
  560 
  561 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
  562 			  char *buf);
  563 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
  564 			   const char *buf, size_t count);
  565 ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
  566 			char *buf);
  567 ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
  568 			 const char *buf, size_t count);
  569 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
  570 			char *buf);
  571 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
  572 			 const char *buf, size_t count);
  573 
  574 #define DEVICE_ATTR(_name, _mode, _show, _store) \
  575 	struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
  576 #define DEVICE_ATTR_RW(_name) \
  577 	struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
  578 #define DEVICE_ATTR_RO(_name) \
  579 	struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
  580 #define DEVICE_ATTR_WO(_name) \
  581 	struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
  582 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \
  583 	struct dev_ext_attribute dev_attr_##_name = \
  584 		{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
  585 #define DEVICE_INT_ATTR(_name, _mode, _var) \
  586 	struct dev_ext_attribute dev_attr_##_name = \
  587 		{ __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
  588 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \
  589 	struct dev_ext_attribute dev_attr_##_name = \
  590 		{ __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
  591 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
  592 	struct device_attribute dev_attr_##_name =		\
  593 		__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
  594 
  595 extern int device_create_file(struct device *device,
  596 			      const struct device_attribute *entry);
  597 extern void device_remove_file(struct device *dev,
  598 			       const struct device_attribute *attr);
  599 extern bool device_remove_file_self(struct device *dev,
  600 				    const struct device_attribute *attr);
  601 extern int __must_check device_create_bin_file(struct device *dev,
  602 					const struct bin_attribute *attr);
  603 extern void device_remove_bin_file(struct device *dev,
  604 				   const struct bin_attribute *attr);
  605 
  606 /* device resource management */
  607 typedef void (*dr_release_t)(struct device *dev, void *res);
  608 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
  609 
  610 #ifdef CONFIG_DEBUG_DEVRES
  611 extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
  612 				 int nid, const char *name) __malloc;
  613 #define devres_alloc(release, size, gfp) \
  614 	__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
  615 #define devres_alloc_node(release, size, gfp, nid) \
  616 	__devres_alloc_node(release, size, gfp, nid, #release)
  617 #else
  618 extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
  619 			       int nid) __malloc;
  620 static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
  621 {
  622 	return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
  623 }
  624 #endif
  625 
  626 extern void devres_for_each_res(struct device *dev, dr_release_t release,
  627 				dr_match_t match, void *match_data,
  628 				void (*fn)(struct device *, void *, void *),
  629 				void *data);
  630 extern void devres_free(void *res);
  631 extern void devres_add(struct device *dev, void *res);
  632 extern void *devres_find(struct device *dev, dr_release_t release,
  633 			 dr_match_t match, void *match_data);
  634 extern void *devres_get(struct device *dev, void *new_res,
  635 			dr_match_t match, void *match_data);
  636 extern void *devres_remove(struct device *dev, dr_release_t release,
  637 			   dr_match_t match, void *match_data);
  638 extern int devres_destroy(struct device *dev, dr_release_t release,
  639 			  dr_match_t match, void *match_data);
  640 extern int devres_release(struct device *dev, dr_release_t release,
  641 			  dr_match_t match, void *match_data);
  642 
  643 /* devres group */
  644 extern void * __must_check devres_open_group(struct device *dev, void *id,
  645 					     gfp_t gfp);
  646 extern void devres_close_group(struct device *dev, void *id);
  647 extern void devres_remove_group(struct device *dev, void *id);
  648 extern int devres_release_group(struct device *dev, void *id);
  649 
  650 /* managed devm_k.alloc/kfree for device drivers */
  651 extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
  652 extern __printf(3, 0)
  653 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
  654 		      va_list ap) __malloc;
  655 extern __printf(3, 4)
  656 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc;
  657 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
  658 {
  659 	return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
  660 }
  661 static inline void *devm_kmalloc_array(struct device *dev,
  662 				       size_t n, size_t size, gfp_t flags)
  663 {
  664 	if (size != 0 && n > SIZE_MAX / size)
  665 		return NULL;
  666 	return devm_kmalloc(dev, n * size, flags);
  667 }
  668 static inline void *devm_kcalloc(struct device *dev,
  669 				 size_t n, size_t size, gfp_t flags)
  670 {
  671 	return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
  672 }
  673 extern void devm_kfree(struct device *dev, void *p);
  674 extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
  675 extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
  676 			  gfp_t gfp);
  677 
  678 extern unsigned long devm_get_free_pages(struct device *dev,
  679 					 gfp_t gfp_mask, unsigned int order);
  680 extern void devm_free_pages(struct device *dev, unsigned long addr);
  681 
  682 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
  683 
  684 /* allows to add/remove a custom action to devres stack */
  685 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
  686 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
  687 
  688 static inline int devm_add_action_or_reset(struct device *dev,
  689 					   void (*action)(void *), void *data)
  690 {
  691 	int ret;
  692 
  693 	ret = devm_add_action(dev, action, data);
  694 	if (ret)
  695 		action(data);
  696 
  697 	return ret;
  698 }
  699 
  700 struct device_dma_parameters {
  701 	/*
  702 	 * a low level driver may set these to teach IOMMU code about
  703 	 * sg limitations.
  704 	 */
  705 	unsigned int max_segment_size;
  706 	unsigned long segment_boundary_mask;
  707 };
  708 
  709 /**
  710  * struct device - The basic device structure
  711  * @parent:	The device's "parent" device, the device to which it is attached.
  712  * 		In most cases, a parent device is some sort of bus or host
  713  * 		controller. If parent is NULL, the device, is a top-level device,
  714  * 		which is not usually what you want.
  715  * @p:		Holds the private data of the driver core portions of the device.
  716  * 		See the comment of the struct device_private for detail.
  717  * @kobj:	A top-level, abstract class from which other classes are derived.
  718  * @init_name:	Initial name of the device.
  719  * @type:	The type of device.
  720  * 		This identifies the device type and carries type-specific
  721  * 		information.
  722  * @mutex:	Mutex to synchronize calls to its driver.
  723  * @bus:	Type of bus device is on.
  724  * @driver:	Which driver has allocated this
  725  * @platform_data: Platform data specific to the device.
  726  * 		Example: For devices on custom boards, as typical of embedded
  727  * 		and SOC based hardware, Linux often uses platform_data to point
  728  * 		to board-specific structures describing devices and how they
  729  * 		are wired.  That can include what ports are available, chip
  730  * 		variants, which GPIO pins act in what additional roles, and so
  731  * 		on.  This shrinks the "Board Support Packages" (BSPs) and
  732  * 		minimizes board-specific #ifdefs in drivers.
  733  * @driver_data: Private pointer for driver specific info.
  734  * @power:	For device power management.
  735  * 		See Documentation/power/devices.txt for details.
  736  * @pm_domain:	Provide callbacks that are executed during system suspend,
  737  * 		hibernation, system resume and during runtime PM transitions
  738  * 		along with subsystem-level and driver-level callbacks.
  739  * @pins:	For device pin management.
  740  *		See Documentation/pinctrl.txt for details.
  741  * @msi_list:	Hosts MSI descriptors
  742  * @msi_domain: The generic MSI domain this device is using.
  743  * @numa_node:	NUMA node this device is close to.
  744  * @dma_mask:	Dma mask (if dma'ble device).
  745  * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
  746  * 		hardware supports 64-bit addresses for consistent allocations
  747  * 		such descriptors.
  748  * @dma_pfn_offset: offset of DMA memory range relatively of RAM
  749  * @dma_parms:	A low level driver may set these to teach IOMMU code about
  750  * 		segment limitations.
  751  * @dma_pools:	Dma pools (if dma'ble device).
  752  * @dma_mem:	Internal for coherent mem override.
  753  * @cma_area:	Contiguous memory area for dma allocations
  754  * @archdata:	For arch-specific additions.
  755  * @of_node:	Associated device tree node.
  756  * @fwnode:	Associated device node supplied by platform firmware.
  757  * @devt:	For creating the sysfs "dev".
  758  * @id:		device instance
  759  * @devres_lock: Spinlock to protect the resource of the device.
  760  * @devres_head: The resources list of the device.
  761  * @knode_class: The node used to add the device to the class list.
  762  * @class:	The class of the device.
  763  * @groups:	Optional attribute groups.
  764  * @release:	Callback to free the device after all references have
  765  * 		gone away. This should be set by the allocator of the
  766  * 		device (i.e. the bus driver that discovered the device).
  767  * @iommu_group: IOMMU group the device belongs to.
  768  *
  769  * @offline_disabled: If set, the device is permanently online.
  770  * @offline:	Set after successful invocation of bus type's .offline().
  771  *
  772  * At the lowest level, every device in a Linux system is represented by an
  773  * instance of struct device. The device structure contains the information
  774  * that the device model core needs to model the system. Most subsystems,
  775  * however, track additional information about the devices they host. As a
  776  * result, it is rare for devices to be represented by bare device structures;
  777  * instead, that structure, like kobject structures, is usually embedded within
  778  * a higher-level representation of the device.
  779  */
  780 struct device {
  781 	struct device		*parent;
  782 
  783 	struct device_private	*p;
  784 
  785 	struct kobject kobj;
  786 	const char		*init_name; /* initial name of the device */
  787 	const struct device_type *type;
  788 
  789 	struct mutex		mutex;	/* mutex to synchronize calls to
  790 					 * its driver.
  791 					 */
  792 
  793 	struct bus_type	*bus;		/* type of bus device is on */
  794 	struct device_driver *driver;	/* which driver has allocated this
  795 					   device */
  796 	void		*platform_data;	/* Platform specific data, device
  797 					   core doesn't touch it */
  798 	void		*driver_data;	/* Driver data, set and get with
  799 					   dev_set/get_drvdata */
  800 	struct dev_pm_info	power;
  801 	struct dev_pm_domain	*pm_domain;
  802 
  803 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  804 	struct irq_domain	*msi_domain;
  805 #endif
  806 #ifdef CONFIG_PINCTRL
  807 	struct dev_pin_info	*pins;
  808 #endif
  809 #ifdef CONFIG_GENERIC_MSI_IRQ
  810 	struct list_head	msi_list;
  811 #endif
  812 
  813 #ifdef CONFIG_NUMA
  814 	int		numa_node;	/* NUMA node this device is close to */
  815 #endif
  816 	u64		*dma_mask;	/* dma mask (if dma'able device) */
  817 	u64		coherent_dma_mask;/* Like dma_mask, but for
  818 					     alloc_coherent mappings as
  819 					     not all hardware supports
  820 					     64 bit addresses for consistent
  821 					     allocations such descriptors. */
  822 	unsigned long	dma_pfn_offset;
  823 
  824 	struct device_dma_parameters *dma_parms;
  825 
  826 	struct list_head	dma_pools;	/* dma pools (if dma'ble) */
  827 
  828 	struct dma_coherent_mem	*dma_mem; /* internal for coherent mem
  829 					     override */
  830 #ifdef CONFIG_DMA_CMA
  831 	struct cma *cma_area;		/* contiguous memory area for dma
  832 					   allocations */
  833 #endif
  834 	/* arch specific additions */
  835 	struct dev_archdata	archdata;
  836 
  837 	struct device_node	*of_node; /* associated device tree node */
  838 	struct fwnode_handle	*fwnode; /* firmware device node */
  839 
  840 	dev_t			devt;	/* dev_t, creates the sysfs "dev" */
  841 	u32			id;	/* device instance */
  842 
  843 	spinlock_t		devres_lock;
  844 	struct list_head	devres_head;
  845 
  846 	struct klist_node	knode_class;
  847 	struct class		*class;
  848 	const struct attribute_group **groups;	/* optional groups */
  849 
  850 	void	(*release)(struct device *dev);
  851 	struct iommu_group	*iommu_group;
  852 
  853 	bool			offline_disabled:1;
  854 	bool			offline:1;
  855 };
  856 
  857 static inline struct device *kobj_to_dev(struct kobject *kobj)
  858 {
  859 	return container_of(kobj, struct device, kobj);
  860 }
  861 
  862 /* Get the wakeup routines, which depend on struct device */
  863 #include <linux/pm_wakeup.h>
  864 
  865 static inline const char *dev_name(const struct device *dev)
  866 {
  867 	/* Use the init name until the kobject becomes available */
  868 	if (dev->init_name)
  869 		return dev->init_name;
  870 
  871 	return kobject_name(&dev->kobj);
  872 }
  873 
  874 extern __printf(2, 3)
  875 int dev_set_name(struct device *dev, const char *name, ...);
  876 
  877 #ifdef CONFIG_NUMA
  878 static inline int dev_to_node(struct device *dev)
  879 {
  880 	return dev->numa_node;
  881 }
  882 static inline void set_dev_node(struct device *dev, int node)
  883 {
  884 	dev->numa_node = node;
  885 }
  886 #else
  887 static inline int dev_to_node(struct device *dev)
  888 {
  889 	return -1;
  890 }
  891 static inline void set_dev_node(struct device *dev, int node)
  892 {
  893 }
  894 #endif
  895 
  896 static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
  897 {
  898 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  899 	return dev->msi_domain;
  900 #else
  901 	return NULL;
  902 #endif
  903 }
  904 
  905 static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
  906 {
  907 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  908 	dev->msi_domain = d;
  909 #endif
  910 }
  911 
  912 static inline void *dev_get_drvdata(const struct device *dev)
  913 {
  914 	return dev->driver_data;
  915 }
  916 
  917 static inline void dev_set_drvdata(struct device *dev, void *data)
  918 {
  919 	dev->driver_data = data;
  920 }
  921 
  922 static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
  923 {
  924 	return dev ? dev->power.subsys_data : NULL;
  925 }
  926 
  927 static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
  928 {
  929 	return dev->kobj.uevent_suppress;
  930 }
  931 
  932 static inline void dev_set_uevent_suppress(struct device *dev, int val)
  933 {
  934 	dev->kobj.uevent_suppress = val;
  935 }
  936 
  937 static inline int device_is_registered(struct device *dev)
  938 {
  939 	return dev->kobj.state_in_sysfs;
  940 }
  941 
  942 static inline void device_enable_async_suspend(struct device *dev)
  943 {
  944 	if (!dev->power.is_prepared)
  945 		dev->power.async_suspend = true;
  946 }
  947 
  948 static inline void device_disable_async_suspend(struct device *dev)
  949 {
  950 	if (!dev->power.is_prepared)
  951 		dev->power.async_suspend = false;
  952 }
  953 
  954 static inline bool device_async_suspend_enabled(struct device *dev)
  955 {
  956 	return !!dev->power.async_suspend;
  957 }
  958 
  959 static inline void dev_pm_syscore_device(struct device *dev, bool val)
  960 {
  961 #ifdef CONFIG_PM_SLEEP
  962 	dev->power.syscore = val;
  963 #endif
  964 }
  965 
  966 static inline void device_lock(struct device *dev)
  967 {
  968 	mutex_lock(&dev->mutex);
  969 }
  970 
  971 static inline int device_lock_interruptible(struct device *dev)
  972 {
  973 	return mutex_lock_interruptible(&dev->mutex);
  974 }
  975 
  976 static inline int device_trylock(struct device *dev)
  977 {
  978 	return mutex_trylock(&dev->mutex);
  979 }
  980 
  981 static inline void device_unlock(struct device *dev)
  982 {
  983 	mutex_unlock(&dev->mutex);
  984 }
  985 
  986 static inline void device_lock_assert(struct device *dev)
  987 {
  988 	lockdep_assert_held(&dev->mutex);
  989 }
  990 
  991 static inline struct device_node *dev_of_node(struct device *dev)
  992 {
  993 	if (!IS_ENABLED(CONFIG_OF))
  994 		return NULL;
  995 	return dev->of_node;
  996 }
  997 
  998 void driver_init(void);
  999 
 1000 /*
 1001  * High level routines for use by the bus drivers
 1002  */
 1003 extern int __must_check device_register(struct device *dev);
 1004 extern void device_unregister(struct device *dev);
 1005 extern void device_initialize(struct device *dev);
 1006 extern int __must_check device_add(struct device *dev);
 1007 extern void device_del(struct device *dev);
 1008 extern int device_for_each_child(struct device *dev, void *data,
 1009 		     int (*fn)(struct device *dev, void *data));
 1010 extern int device_for_each_child_reverse(struct device *dev, void *data,
 1011 		     int (*fn)(struct device *dev, void *data));
 1012 extern struct device *device_find_child(struct device *dev, void *data,
 1013 				int (*match)(struct device *dev, void *data));
 1014 extern int device_rename(struct device *dev, const char *new_name);
 1015 extern int device_move(struct device *dev, struct device *new_parent,
 1016 		       enum dpm_order dpm_order);
 1017 extern const char *device_get_devnode(struct device *dev,
 1018 				      umode_t *mode, kuid_t *uid, kgid_t *gid,
 1019 				      const char **tmp);
 1020 
 1021 static inline bool device_supports_offline(struct device *dev)
 1022 {
 1023 	return dev->bus && dev->bus->offline && dev->bus->online;
 1024 }
 1025 
 1026 extern void lock_device_hotplug(void);
 1027 extern void unlock_device_hotplug(void);
 1028 extern int lock_device_hotplug_sysfs(void);
 1029 extern int device_offline(struct device *dev);
 1030 extern int device_online(struct device *dev);
 1031 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
 1032 extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
 1033 
 1034 /*
 1035  * Root device objects for grouping under /sys/devices
 1036  */
 1037 extern struct device *__root_device_register(const char *name,
 1038 					     struct module *owner);
 1039 
 1040 /* This is a macro to avoid include problems with THIS_MODULE */
 1041 #define root_device_register(name) \
 1042 	__root_device_register(name, THIS_MODULE)
 1043 
 1044 extern void root_device_unregister(struct device *root);
 1045 
 1046 static inline void *dev_get_platdata(const struct device *dev)
 1047 {
 1048 	return dev->platform_data;
 1049 }
 1050 
 1051 /*
 1052  * Manual binding of a device to driver. See drivers/base/bus.c
 1053  * for information on use.
 1054  */
 1055 extern int __must_check device_bind_driver(struct device *dev);
 1056 extern void device_release_driver(struct device *dev);
 1057 extern int  __must_check device_attach(struct device *dev);
 1058 extern int __must_check driver_attach(struct device_driver *drv);
 1059 extern void device_initial_probe(struct device *dev);
 1060 extern int __must_check device_reprobe(struct device *dev);
 1061 
 1062 extern bool device_is_bound(struct device *dev);
 1063 
 1064 /*
 1065  * Easy functions for dynamically creating devices on the fly
 1066  */
 1067 extern __printf(5, 0)
 1068 struct device *device_create_vargs(struct class *cls, struct device *parent,
 1069 				   dev_t devt, void *drvdata,
 1070 				   const char *fmt, va_list vargs);
 1071 extern __printf(5, 6)
 1072 struct device *device_create(struct class *cls, struct device *parent,
 1073 			     dev_t devt, void *drvdata,
 1074 			     const char *fmt, ...);
 1075 extern __printf(6, 7)
 1076 struct device *device_create_with_groups(struct class *cls,
 1077 			     struct device *parent, dev_t devt, void *drvdata,
 1078 			     const struct attribute_group **groups,
 1079 			     const char *fmt, ...);
 1080 extern void device_destroy(struct class *cls, dev_t devt);
 1081 
 1082 /*
 1083  * Platform "fixup" functions - allow the platform to have their say
 1084  * about devices and actions that the general device layer doesn't
 1085  * know about.
 1086  */
 1087 /* Notify platform of device discovery */
 1088 extern int (*platform_notify)(struct device *dev);
 1089 
 1090 extern int (*platform_notify_remove)(struct device *dev);
 1091 
 1092 
 1093 /*
 1094  * get_device - atomically increment the reference count for the device.
 1095  *
 1096  */
 1097 extern struct device *get_device(struct device *dev);
 1098 extern void put_device(struct device *dev);
 1099 
 1100 #ifdef CONFIG_DEVTMPFS
 1101 extern int devtmpfs_create_node(struct device *dev);
 1102 extern int devtmpfs_delete_node(struct device *dev);
 1103 extern int devtmpfs_mount(const char *mntdir);
 1104 #else
 1105 static inline int devtmpfs_create_node(struct device *dev) { return 0; }
 1106 static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
 1107 static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
 1108 #endif
 1109 
 1110 /* drivers/base/power/shutdown.c */
 1111 extern void device_shutdown(void);
 1112 
 1113 /* debugging and troubleshooting/diagnostic helpers. */
 1114 extern const char *dev_driver_string(const struct device *dev);
 1115 
 1116 
 1117 #ifdef CONFIG_PRINTK
 1118 
 1119 extern __printf(3, 0)
 1120 int dev_vprintk_emit(int level, const struct device *dev,
 1121 		     const char *fmt, va_list args);
 1122 extern __printf(3, 4)
 1123 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
 1124 
 1125 extern __printf(3, 4)
 1126 void dev_printk(const char *level, const struct device *dev,
 1127 		const char *fmt, ...);
 1128 extern __printf(2, 3)
 1129 void dev_emerg(const struct device *dev, const char *fmt, ...);
 1130 extern __printf(2, 3)
 1131 void dev_alert(const struct device *dev, const char *fmt, ...);
 1132 extern __printf(2, 3)
 1133 void dev_crit(const struct device *dev, const char *fmt, ...);
 1134 extern __printf(2, 3)
 1135 void dev_err(const struct device *dev, const char *fmt, ...);
 1136 extern __printf(2, 3)
 1137 void dev_warn(const struct device *dev, const char *fmt, ...);
 1138 extern __printf(2, 3)
 1139 void dev_notice(const struct device *dev, const char *fmt, ...);
 1140 extern __printf(2, 3)
 1141 void _dev_info(const struct device *dev, const char *fmt, ...);
 1142 
 1143 #else
 1144 
 1145 static inline __printf(3, 0)
 1146 int dev_vprintk_emit(int level, const struct device *dev,
 1147 		     const char *fmt, va_list args)
 1148 { return 0; }
 1149 static inline __printf(3, 4)
 1150 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
 1151 { return 0; }
 1152 
 1153 static inline void __dev_printk(const char *level, const struct device *dev,
 1154 				struct va_format *vaf)
 1155 {}
 1156 static inline __printf(3, 4)
 1157 void dev_printk(const char *level, const struct device *dev,
 1158 		const char *fmt, ...)
 1159 {}
 1160 
 1161 static inline __printf(2, 3)
 1162 void dev_emerg(const struct device *dev, const char *fmt, ...)
 1163 {}
 1164 static inline __printf(2, 3)
 1165 void dev_crit(const struct device *dev, const char *fmt, ...)
 1166 {}
 1167 static inline __printf(2, 3)
 1168 void dev_alert(const struct device *dev, const char *fmt, ...)
 1169 {}
 1170 static inline __printf(2, 3)
 1171 void dev_err(const struct device *dev, const char *fmt, ...)
 1172 {}
 1173 static inline __printf(2, 3)
 1174 void dev_warn(const struct device *dev, const char *fmt, ...)
 1175 {}
 1176 static inline __printf(2, 3)
 1177 void dev_notice(const struct device *dev, const char *fmt, ...)
 1178 {}
 1179 static inline __printf(2, 3)
 1180 void _dev_info(const struct device *dev, const char *fmt, ...)
 1181 {}
 1182 
 1183 #endif
 1184 
 1185 /*
 1186  * Stupid hackaround for existing uses of non-printk uses dev_info
 1187  *
 1188  * Note that the definition of dev_info below is actually _dev_info
 1189  * and a macro is used to avoid redefining dev_info
 1190  */
 1191 
 1192 #define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
 1193 
 1194 #if defined(CONFIG_DYNAMIC_DEBUG)
 1195 #define dev_dbg(dev, format, ...)		     \
 1196 do {						     \
 1197 	dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
 1198 } while (0)
 1199 #elif defined(DEBUG)
 1200 #define dev_dbg(dev, format, arg...)		\
 1201 	dev_printk(KERN_DEBUG, dev, format, ##arg)
 1202 #else
 1203 #define dev_dbg(dev, format, arg...)				\
 1204 ({								\
 1205 	if (0)							\
 1206 		dev_printk(KERN_DEBUG, dev, format, ##arg);	\
 1207 })
 1208 #endif
 1209 
 1210 #ifdef CONFIG_PRINTK
 1211 #define dev_level_once(dev_level, dev, fmt, ...)			\
 1212 do {									\
 1213 	static bool __print_once __read_mostly;				\
 1214 									\
 1215 	if (!__print_once) {						\
 1216 		__print_once = true;					\
 1217 		dev_level(dev, fmt, ##__VA_ARGS__);			\
 1218 	}								\
 1219 } while (0)
 1220 #else
 1221 #define dev_level_once(dev_level, dev, fmt, ...)			\
 1222 do {									\
 1223 	if (0)								\
 1224 		dev_level(dev, fmt, ##__VA_ARGS__);			\
 1225 } while (0)
 1226 #endif
 1227 
 1228 #define dev_emerg_once(dev, fmt, ...)					\
 1229 	dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
 1230 #define dev_alert_once(dev, fmt, ...)					\
 1231 	dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
 1232 #define dev_crit_once(dev, fmt, ...)					\
 1233 	dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
 1234 #define dev_err_once(dev, fmt, ...)					\
 1235 	dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
 1236 #define dev_warn_once(dev, fmt, ...)					\
 1237 	dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
 1238 #define dev_notice_once(dev, fmt, ...)					\
 1239 	dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
 1240 #define dev_info_once(dev, fmt, ...)					\
 1241 	dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
 1242 #define dev_dbg_once(dev, fmt, ...)					\
 1243 	dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
 1244 
 1245 #define dev_level_ratelimited(dev_level, dev, fmt, ...)			\
 1246 do {									\
 1247 	static DEFINE_RATELIMIT_STATE(_rs,				\
 1248 				      DEFAULT_RATELIMIT_INTERVAL,	\
 1249 				      DEFAULT_RATELIMIT_BURST);		\
 1250 	if (__ratelimit(&_rs))						\
 1251 		dev_level(dev, fmt, ##__VA_ARGS__);			\
 1252 } while (0)
 1253 
 1254 #define dev_emerg_ratelimited(dev, fmt, ...)				\
 1255 	dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
 1256 #define dev_alert_ratelimited(dev, fmt, ...)				\
 1257 	dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
 1258 #define dev_crit_ratelimited(dev, fmt, ...)				\
 1259 	dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
 1260 #define dev_err_ratelimited(dev, fmt, ...)				\
 1261 	dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
 1262 #define dev_warn_ratelimited(dev, fmt, ...)				\
 1263 	dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
 1264 #define dev_notice_ratelimited(dev, fmt, ...)				\
 1265 	dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
 1266 #define dev_info_ratelimited(dev, fmt, ...)				\
 1267 	dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
 1268 #if defined(CONFIG_DYNAMIC_DEBUG)
 1269 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
 1270 #define dev_dbg_ratelimited(dev, fmt, ...)				\
 1271 do {									\
 1272 	static DEFINE_RATELIMIT_STATE(_rs,				\
 1273 				      DEFAULT_RATELIMIT_INTERVAL,	\
 1274 				      DEFAULT_RATELIMIT_BURST);		\
 1275 	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);			\
 1276 	if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&	\
 1277 	    __ratelimit(&_rs))						\
 1278 		__dynamic_dev_dbg(&descriptor, dev, fmt,		\
 1279 				  ##__VA_ARGS__);			\
 1280 } while (0)
 1281 #elif defined(DEBUG)
 1282 #define dev_dbg_ratelimited(dev, fmt, ...)				\
 1283 do {									\
 1284 	static DEFINE_RATELIMIT_STATE(_rs,				\
 1285 				      DEFAULT_RATELIMIT_INTERVAL,	\
 1286 				      DEFAULT_RATELIMIT_BURST);		\
 1287 	if (__ratelimit(&_rs))						\
 1288 		dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);	\
 1289 } while (0)
 1290 #else
 1291 #define dev_dbg_ratelimited(dev, fmt, ...)				\
 1292 do {									\
 1293 	if (0)								\
 1294 		dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);	\
 1295 } while (0)
 1296 #endif
 1297 
 1298 #ifdef VERBOSE_DEBUG
 1299 #define dev_vdbg	dev_dbg
 1300 #else
 1301 #define dev_vdbg(dev, format, arg...)				\
 1302 ({								\
 1303 	if (0)							\
 1304 		dev_printk(KERN_DEBUG, dev, format, ##arg);	\
 1305 })
 1306 #endif
 1307 
 1308 /*
 1309  * dev_WARN*() acts like dev_printk(), but with the key difference of
 1310  * using WARN/WARN_ONCE to include file/line information and a backtrace.
 1311  */
 1312 #define dev_WARN(dev, format, arg...) \
 1313 	WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
 1314 
 1315 #define dev_WARN_ONCE(dev, condition, format, arg...) \
 1316 	WARN_ONCE(condition, "%s %s: " format, \
 1317 			dev_driver_string(dev), dev_name(dev), ## arg)
 1318 
 1319 /* Create alias, so I can be autoloaded. */
 1320 #define MODULE_ALIAS_CHARDEV(major,minor) \
 1321 	MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
 1322 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \
 1323 	MODULE_ALIAS("char-major-" __stringify(major) "-*")
 1324 
 1325 #ifdef CONFIG_SYSFS_DEPRECATED
 1326 extern long sysfs_deprecated;
 1327 #else
 1328 #define sysfs_deprecated 0
 1329 #endif
 1330 
 1331 /**
 1332  * module_driver() - Helper macro for drivers that don't do anything
 1333  * special in module init/exit. This eliminates a lot of boilerplate.
 1334  * Each module may only use this macro once, and calling it replaces
 1335  * module_init() and module_exit().
 1336  *
 1337  * @__driver: driver name
 1338  * @__register: register function for this driver type
 1339  * @__unregister: unregister function for this driver type
 1340  * @...: Additional arguments to be passed to __register and __unregister.
 1341  *
 1342  * Use this macro to construct bus specific macros for registering
 1343  * drivers, and do not use it on its own.
 1344  */
 1345 #define module_driver(__driver, __register, __unregister, ...) \
 1346 static int __init __driver##_init(void) \
 1347 { \
 1348 	return __register(&(__driver) , ##__VA_ARGS__); \
 1349 } \
 1350 module_init(__driver##_init); \
 1351 static void __exit __driver##_exit(void) \
 1352 { \
 1353 	__unregister(&(__driver) , ##__VA_ARGS__); \
 1354 } \
 1355 module_exit(__driver##_exit);
 1356 
 1357 /**
 1358  * builtin_driver() - Helper macro for drivers that don't do anything
 1359  * special in init and have no exit. This eliminates some boilerplate.
 1360  * Each driver may only use this macro once, and calling it replaces
 1361  * device_initcall (or in some cases, the legacy __initcall).  This is
 1362  * meant to be a direct parallel of module_driver() above but without
 1363  * the __exit stuff that is not used for builtin cases.
 1364  *
 1365  * @__driver: driver name
 1366  * @__register: register function for this driver type
 1367  * @...: Additional arguments to be passed to __register
 1368  *
 1369  * Use this macro to construct bus specific macros for registering
 1370  * drivers, and do not use it on its own.
 1371  */
 1372 #define builtin_driver(__driver, __register, ...) \
 1373 static int __init __driver##_init(void) \
 1374 { \
 1375 	return __register(&(__driver) , ##__VA_ARGS__); \
 1376 } \
 1377 device_initcall(__driver##_init);
 1378 
 1379 #endif /* _DEVICE_H_ */                 1 #ifndef _LINUX_DMA_MAPPING_H
    2 #define _LINUX_DMA_MAPPING_H
    3 
    4 #include <linux/sizes.h>
    5 #include <linux/string.h>
    6 #include <linux/device.h>
    7 #include <linux/err.h>
    8 #include <linux/dma-debug.h>
    9 #include <linux/dma-direction.h>
   10 #include <linux/scatterlist.h>
   11 #include <linux/kmemcheck.h>
   12 #include <linux/bug.h>
   13 
   14 /**
   15  * List of possible attributes associated with a DMA mapping. The semantics
   16  * of each attribute should be defined in Documentation/DMA-attributes.txt.
   17  *
   18  * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
   19  * forces all pending DMA writes to complete.
   20  */
   21 #define DMA_ATTR_WRITE_BARRIER		(1UL << 0)
   22 /*
   23  * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
   24  * may be weakly ordered, that is that reads and writes may pass each other.
   25  */
   26 #define DMA_ATTR_WEAK_ORDERING		(1UL << 1)
   27 /*
   28  * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
   29  * buffered to improve performance.
   30  */
   31 #define DMA_ATTR_WRITE_COMBINE		(1UL << 2)
   32 /*
   33  * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
   34  * consistent or non-consistent memory as it sees fit.
   35  */
   36 #define DMA_ATTR_NON_CONSISTENT		(1UL << 3)
   37 /*
   38  * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
   39  * virtual mapping for the allocated buffer.
   40  */
   41 #define DMA_ATTR_NO_KERNEL_MAPPING	(1UL << 4)
   42 /*
   43  * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
   44  * the CPU cache for the given buffer assuming that it has been already
   45  * transferred to 'device' domain.
   46  */
   47 #define DMA_ATTR_SKIP_CPU_SYNC		(1UL << 5)
   48 /*
   49  * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
   50  * in physical memory.
   51  */
   52 #define DMA_ATTR_FORCE_CONTIGUOUS	(1UL << 6)
   53 /*
   54  * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
   55  * that it's probably not worth the time to try to allocate memory to in a way
   56  * that gives better TLB efficiency.
   57  */
   58 #define DMA_ATTR_ALLOC_SINGLE_PAGES	(1UL << 7)
   59 
   60 /*
   61  * A dma_addr_t can hold any valid DMA or bus address for the platform.
   62  * It can be given to a device to use as a DMA source or target.  A CPU cannot
   63  * reference a dma_addr_t directly because there may be translation between
   64  * its physical address space and the bus address space.
   65  */
   66 struct dma_map_ops {
   67 	void* (*alloc)(struct device *dev, size_t size,
   68 				dma_addr_t *dma_handle, gfp_t gfp,
   69 				unsigned long attrs);
   70 	void (*free)(struct device *dev, size_t size,
   71 			      void *vaddr, dma_addr_t dma_handle,
   72 			      unsigned long attrs);
   73 	int (*mmap)(struct device *, struct vm_area_struct *,
   74 			  void *, dma_addr_t, size_t,
   75 			  unsigned long attrs);
   76 
   77 	int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
   78 			   dma_addr_t, size_t, unsigned long attrs);
   79 
   80 	dma_addr_t (*map_page)(struct device *dev, struct page *page,
   81 			       unsigned long offset, size_t size,
   82 			       enum dma_data_direction dir,
   83 			       unsigned long attrs);
   84 	void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
   85 			   size_t size, enum dma_data_direction dir,
   86 			   unsigned long attrs);
   87 	/*
   88 	 * map_sg returns 0 on error and a value > 0 on success.
   89 	 * It should never return a value < 0.
   90 	 */
   91 	int (*map_sg)(struct device *dev, struct scatterlist *sg,
   92 		      int nents, enum dma_data_direction dir,
   93 		      unsigned long attrs);
   94 	void (*unmap_sg)(struct device *dev,
   95 			 struct scatterlist *sg, int nents,
   96 			 enum dma_data_direction dir,
   97 			 unsigned long attrs);
   98 	void (*sync_single_for_cpu)(struct device *dev,
   99 				    dma_addr_t dma_handle, size_t size,
  100 				    enum dma_data_direction dir);
  101 	void (*sync_single_for_device)(struct device *dev,
  102 				       dma_addr_t dma_handle, size_t size,
  103 				       enum dma_data_direction dir);
  104 	void (*sync_sg_for_cpu)(struct device *dev,
  105 				struct scatterlist *sg, int nents,
  106 				enum dma_data_direction dir);
  107 	void (*sync_sg_for_device)(struct device *dev,
  108 				   struct scatterlist *sg, int nents,
  109 				   enum dma_data_direction dir);
  110 	int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
  111 	int (*dma_supported)(struct device *dev, u64 mask);
  112 	int (*set_dma_mask)(struct device *dev, u64 mask);
  113 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
  114 	u64 (*get_required_mask)(struct device *dev);
  115 #endif
  116 	int is_phys;
  117 };
  118 
  119 extern struct dma_map_ops dma_noop_ops;
  120 
  121 #define DMA_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
  122 
  123 #define DMA_MASK_NONE	0x0ULL
  124 
  125 static inline int valid_dma_direction(int dma_direction)
  126 {
  127 	return ((dma_direction == DMA_BIDIRECTIONAL) ||
  128 		(dma_direction == DMA_TO_DEVICE) ||
  129 		(dma_direction == DMA_FROM_DEVICE));
  130 }
  131 
  132 static inline int is_device_dma_capable(struct device *dev)
  133 {
  134 	return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
  135 }
  136 
  137 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
  138 /*
  139  * These three functions are only for dma allocator.
  140  * Don't use them in device drivers.
  141  */
  142 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
  143 				       dma_addr_t *dma_handle, void **ret);
  144 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
  145 
  146 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
  147 			    void *cpu_addr, size_t size, int *ret);
  148 #else
  149 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
  150 #define dma_release_from_coherent(dev, order, vaddr) (0)
  151 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
  152 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  153 
  154 #ifdef CONFIG_HAS_DMA
  155 #include <asm/dma-mapping.h>
  156 #else
  157 /*
  158  * Define the dma api to allow compilation but not linking of
  159  * dma dependent code.  Code that depends on the dma-mapping
  160  * API needs to set 'depends on HAS_DMA' in its Kconfig
  161  */
  162 extern struct dma_map_ops bad_dma_ops;
  163 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
  164 {
  165 	return &bad_dma_ops;
  166 }
  167 #endif
  168 
  169 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
  170 					      size_t size,
  171 					      enum dma_data_direction dir,
  172 					      unsigned long attrs)
  173 {
  174 	struct dma_map_ops *ops = get_dma_ops(dev);
  175 	dma_addr_t addr;
  176 
  177 	kmemcheck_mark_initialized(ptr, size);
  178 	BUG_ON(!valid_dma_direction(dir));
  179 	addr = ops->map_page(dev, virt_to_page(ptr),
  180 			     offset_in_page(ptr), size,
  181 			     dir, attrs);
  182 	debug_dma_map_page(dev, virt_to_page(ptr),
  183 			   offset_in_page(ptr), size,
  184 			   dir, addr, true);
  185 	return addr;
  186 }
  187 
  188 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
  189 					  size_t size,
  190 					  enum dma_data_direction dir,
  191 					  unsigned long attrs)
  192 {
  193 	struct dma_map_ops *ops = get_dma_ops(dev);
  194 
  195 	BUG_ON(!valid_dma_direction(dir));
  196 	if (ops->unmap_page)
  197 		ops->unmap_page(dev, addr, size, dir, attrs);
  198 	debug_dma_unmap_page(dev, addr, size, dir, true);
  199 }
  200 
  201 /*
  202  * dma_maps_sg_attrs returns 0 on error and > 0 on success.
  203  * It should never return a value < 0.
  204  */
  205 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
  206 				   int nents, enum dma_data_direction dir,
  207 				   unsigned long attrs)
  208 {
  209 	struct dma_map_ops *ops = get_dma_ops(dev);
  210 	int i, ents;
  211 	struct scatterlist *s;
  212 
  213 	for_each_sg(sg, s, nents, i)
  214 		kmemcheck_mark_initialized(sg_virt(s), s->length);
  215 	BUG_ON(!valid_dma_direction(dir));
  216 	ents = ops->map_sg(dev, sg, nents, dir, attrs);
  217 	BUG_ON(ents < 0);
  218 	debug_dma_map_sg(dev, sg, nents, ents, dir);
  219 
  220 	return ents;
  221 }
  222 
  223 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
  224 				      int nents, enum dma_data_direction dir,
  225 				      unsigned long attrs)
  226 {
  227 	struct dma_map_ops *ops = get_dma_ops(dev);
  228 
  229 	BUG_ON(!valid_dma_direction(dir));
  230 	debug_dma_unmap_sg(dev, sg, nents, dir);
  231 	if (ops->unmap_sg)
  232 		ops->unmap_sg(dev, sg, nents, dir, attrs);
  233 }
  234 
  235 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  236 				      size_t offset, size_t size,
  237 				      enum dma_data_direction dir)
  238 {
  239 	struct dma_map_ops *ops = get_dma_ops(dev);
  240 	dma_addr_t addr;
  241 
  242 	kmemcheck_mark_initialized(page_address(page) + offset, size);
  243 	BUG_ON(!valid_dma_direction(dir));
  244 	addr = ops->map_page(dev, page, offset, size, dir, 0);
  245 	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
  246 
  247 	return addr;
  248 }
  249 
  250 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
  251 				  size_t size, enum dma_data_direction dir)
  252 {
  253 	struct dma_map_ops *ops = get_dma_ops(dev);
  254 
  255 	BUG_ON(!valid_dma_direction(dir));
  256 	if (ops->unmap_page)
  257 		ops->unmap_page(dev, addr, size, dir, 0);
  258 	debug_dma_unmap_page(dev, addr, size, dir, false);
  259 }
  260 
  261 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
  262 					   size_t size,
  263 					   enum dma_data_direction dir)
  264 {
  265 	struct dma_map_ops *ops = get_dma_ops(dev);
  266 
  267 	BUG_ON(!valid_dma_direction(dir));
  268 	if (ops->sync_single_for_cpu)
  269 		ops->sync_single_for_cpu(dev, addr, size, dir);
  270 	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
  271 }
  272 
  273 static inline void dma_sync_single_for_device(struct device *dev,
  274 					      dma_addr_t addr, size_t size,
  275 					      enum dma_data_direction dir)
  276 {
  277 	struct dma_map_ops *ops = get_dma_ops(dev);
  278 
  279 	BUG_ON(!valid_dma_direction(dir));
  280 	if (ops->sync_single_for_device)
  281 		ops->sync_single_for_device(dev, addr, size, dir);
  282 	debug_dma_sync_single_for_device(dev, addr, size, dir);
  283 }
  284 
  285 static inline void dma_sync_single_range_for_cpu(struct device *dev,
  286 						 dma_addr_t addr,
  287 						 unsigned long offset,
  288 						 size_t size,
  289 						 enum dma_data_direction dir)
  290 {
  291 	const struct dma_map_ops *ops = get_dma_ops(dev);
  292 
  293 	BUG_ON(!valid_dma_direction(dir));
  294 	if (ops->sync_single_for_cpu)
  295 		ops->sync_single_for_cpu(dev, addr + offset, size, dir);
  296 	debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
  297 }
  298 
  299 static inline void dma_sync_single_range_for_device(struct device *dev,
  300 						    dma_addr_t addr,
  301 						    unsigned long offset,
  302 						    size_t size,
  303 						    enum dma_data_direction dir)
  304 {
  305 	const struct dma_map_ops *ops = get_dma_ops(dev);
  306 
  307 	BUG_ON(!valid_dma_direction(dir));
  308 	if (ops->sync_single_for_device)
  309 		ops->sync_single_for_device(dev, addr + offset, size, dir);
  310 	debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
  311 }
  312 
  313 static inline void
  314 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  315 		    int nelems, enum dma_data_direction dir)
  316 {
  317 	struct dma_map_ops *ops = get_dma_ops(dev);
  318 
  319 	BUG_ON(!valid_dma_direction(dir));
  320 	if (ops->sync_sg_for_cpu)
  321 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
  322 	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
  323 }
  324 
  325 static inline void
  326 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  327 		       int nelems, enum dma_data_direction dir)
  328 {
  329 	struct dma_map_ops *ops = get_dma_ops(dev);
  330 
  331 	BUG_ON(!valid_dma_direction(dir));
  332 	if (ops->sync_sg_for_device)
  333 		ops->sync_sg_for_device(dev, sg, nelems, dir);
  334 	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
  335 
  336 }
  337 
  338 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
  339 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
  340 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
  341 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
  342 
  343 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
  344 			   void *cpu_addr, dma_addr_t dma_addr, size_t size);
  345 
  346 void *dma_common_contiguous_remap(struct page *page, size_t size,
  347 			unsigned long vm_flags,
  348 			pgprot_t prot, const void *caller);
  349 
  350 void *dma_common_pages_remap(struct page **pages, size_t size,
  351 			unsigned long vm_flags, pgprot_t prot,
  352 			const void *caller);
  353 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
  354 
  355 /**
  356  * dma_mmap_attrs - map a coherent DMA allocation into user space
  357  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  358  * @vma: vm_area_struct describing requested user mapping
  359  * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
  360  * @handle: device-view address returned from dma_alloc_attrs
  361  * @size: size of memory originally requested in dma_alloc_attrs
  362  * @attrs: attributes of mapping properties requested in dma_alloc_attrs
  363  *
  364  * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
  365  * into user space.  The coherent DMA buffer must not be freed by the
  366  * driver until the user space mapping has been released.
  367  */
  368 static inline int
  369 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
  370 	       dma_addr_t dma_addr, size_t size, unsigned long attrs)
  371 {
  372 	struct dma_map_ops *ops = get_dma_ops(dev);
  373 	BUG_ON(!ops);
  374 	if (ops->mmap)
  375 		return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
  376 	return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
  377 }
  378 
  379 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
  380 
  381 int
  382 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
  383 		       void *cpu_addr, dma_addr_t dma_addr, size_t size);
  384 
  385 static inline int
  386 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
  387 		      dma_addr_t dma_addr, size_t size,
  388 		      unsigned long attrs)
  389 {
  390 	struct dma_map_ops *ops = get_dma_ops(dev);
  391 	BUG_ON(!ops);
  392 	if (ops->get_sgtable)
  393 		return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
  394 					attrs);
  395 	return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
  396 }
  397 
  398 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
  399 
  400 #ifndef arch_dma_alloc_attrs
  401 #define arch_dma_alloc_attrs(dev, flag)	(true)
  402 #endif
  403 
  404 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
  405 				       dma_addr_t *dma_handle, gfp_t flag,
  406 				       unsigned long attrs)
  407 {
  408 	struct dma_map_ops *ops = get_dma_ops(dev);
  409 	void *cpu_addr;
  410 
  411 	BUG_ON(!ops);
  412 
  413 	if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
  414 		return cpu_addr;
  415 
  416 	if (!arch_dma_alloc_attrs(&dev, &flag))
  417 		return NULL;
  418 	if (!ops->alloc)
  419 		return NULL;
  420 
  421 	cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
  422 	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
  423 	return cpu_addr;
  424 }
  425 
  426 static inline void dma_free_attrs(struct device *dev, size_t size,
  427 				     void *cpu_addr, dma_addr_t dma_handle,
  428 				     unsigned long attrs)
  429 {
  430 	struct dma_map_ops *ops = get_dma_ops(dev);
  431 
  432 	BUG_ON(!ops);
  433 	WARN_ON(irqs_disabled());
  434 
  435 	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
  436 		return;
  437 
  438 	if (!ops->free || !cpu_addr)
  439 		return;
  440 
  441 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
  442 	ops->free(dev, size, cpu_addr, dma_handle, attrs);
  443 }
  444 
  445 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  446 		dma_addr_t *dma_handle, gfp_t flag)
  447 {
  448 	return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
  449 }
  450 
  451 static inline void dma_free_coherent(struct device *dev, size_t size,
  452 		void *cpu_addr, dma_addr_t dma_handle)
  453 {
  454 	return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
  455 }
  456 
  457 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
  458 		dma_addr_t *dma_handle, gfp_t gfp)
  459 {
  460 	return dma_alloc_attrs(dev, size, dma_handle, gfp,
  461 			       DMA_ATTR_NON_CONSISTENT);
  462 }
  463 
  464 static inline void dma_free_noncoherent(struct device *dev, size_t size,
  465 		void *cpu_addr, dma_addr_t dma_handle)
  466 {
  467 	dma_free_attrs(dev, size, cpu_addr, dma_handle,
  468 		       DMA_ATTR_NON_CONSISTENT);
  469 }
  470 
  471 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  472 {
  473 	debug_dma_mapping_error(dev, dma_addr);
  474 
  475 	if (get_dma_ops(dev)->mapping_error)
  476 		return get_dma_ops(dev)->mapping_error(dev, dma_addr);
  477 
  478 #ifdef DMA_ERROR_CODE
  479 	return dma_addr == DMA_ERROR_CODE;
  480 #else
  481 	return 0;
  482 #endif
  483 }
  484 
  485 #ifndef HAVE_ARCH_DMA_SUPPORTED
  486 static inline int dma_supported(struct device *dev, u64 mask)
  487 {
  488 	struct dma_map_ops *ops = get_dma_ops(dev);
  489 
  490 	if (!ops)
  491 		return 0;
  492 	if (!ops->dma_supported)
  493 		return 1;
  494 	return ops->dma_supported(dev, mask);
  495 }
  496 #endif
  497 
  498 #ifndef HAVE_ARCH_DMA_SET_MASK
  499 static inline int dma_set_mask(struct device *dev, u64 mask)
  500 {
  501 	struct dma_map_ops *ops = get_dma_ops(dev);
  502 
  503 	if (ops->set_dma_mask)
  504 		return ops->set_dma_mask(dev, mask);
  505 
  506 	if (!dev->dma_mask || !dma_supported(dev, mask))
  507 		return -EIO;
  508 	*dev->dma_mask = mask;
  509 	return 0;
  510 }
  511 #endif
  512 
  513 static inline u64 dma_get_mask(struct device *dev)
  514 {
  515 	if (dev && dev->dma_mask && *dev->dma_mask)
  516 		return *dev->dma_mask;
  517 	return DMA_BIT_MASK(32);
  518 }
  519 
  520 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
  521 int dma_set_coherent_mask(struct device *dev, u64 mask);
  522 #else
  523 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
  524 {
  525 	if (!dma_supported(dev, mask))
  526 		return -EIO;
  527 	dev->coherent_dma_mask = mask;
  528 	return 0;
  529 }
  530 #endif
  531 
  532 /*
  533  * Set both the DMA mask and the coherent DMA mask to the same thing.
  534  * Note that we don't check the return value from dma_set_coherent_mask()
  535  * as the DMA API guarantees that the coherent DMA mask can be set to
  536  * the same or smaller than the streaming DMA mask.
  537  */
  538 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
  539 {
  540 	int rc = dma_set_mask(dev, mask);
  541 	if (rc == 0)
  542 		dma_set_coherent_mask(dev, mask);
  543 	return rc;
  544 }
  545 
  546 /*
  547  * Similar to the above, except it deals with the case where the device
  548  * does not have dev->dma_mask appropriately setup.
  549  */
  550 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
  551 {
  552 	dev->dma_mask = &dev->coherent_dma_mask;
  553 	return dma_set_mask_and_coherent(dev, mask);
  554 }
  555 
  556 extern u64 dma_get_required_mask(struct device *dev);
  557 
  558 #ifndef arch_setup_dma_ops
  559 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
  560 				      u64 size, const struct iommu_ops *iommu,
  561 				      bool coherent) { }
  562 #endif
  563 
  564 #ifndef arch_teardown_dma_ops
  565 static inline void arch_teardown_dma_ops(struct device *dev) { }
  566 #endif
  567 
  568 static inline unsigned int dma_get_max_seg_size(struct device *dev)
  569 {
  570 	if (dev->dma_parms && dev->dma_parms->max_segment_size)
  571 		return dev->dma_parms->max_segment_size;
  572 	return SZ_64K;
  573 }
  574 
  575 static inline unsigned int dma_set_max_seg_size(struct device *dev,
  576 						unsigned int size)
  577 {
  578 	if (dev->dma_parms) {
  579 		dev->dma_parms->max_segment_size = size;
  580 		return 0;
  581 	}
  582 	return -EIO;
  583 }
  584 
  585 static inline unsigned long dma_get_seg_boundary(struct device *dev)
  586 {
  587 	if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
  588 		return dev->dma_parms->segment_boundary_mask;
  589 	return DMA_BIT_MASK(32);
  590 }
  591 
  592 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
  593 {
  594 	if (dev->dma_parms) {
  595 		dev->dma_parms->segment_boundary_mask = mask;
  596 		return 0;
  597 	}
  598 	return -EIO;
  599 }
  600 
  601 #ifndef dma_max_pfn
  602 static inline unsigned long dma_max_pfn(struct device *dev)
  603 {
  604 	return *dev->dma_mask >> PAGE_SHIFT;
  605 }
  606 #endif
  607 
  608 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
  609 					dma_addr_t *dma_handle, gfp_t flag)
  610 {
  611 	void *ret = dma_alloc_coherent(dev, size, dma_handle,
  612 				       flag | __GFP_ZERO);
  613 	return ret;
  614 }
  615 
  616 #ifdef CONFIG_HAS_DMA
  617 static inline int dma_get_cache_alignment(void)
  618 {
  619 #ifdef ARCH_DMA_MINALIGN
  620 	return ARCH_DMA_MINALIGN;
  621 #endif
  622 	return 1;
  623 }
  624 #endif
  625 
  626 /* flags for the coherent memory api */
  627 #define	DMA_MEMORY_MAP			0x01
  628 #define DMA_MEMORY_IO			0x02
  629 #define DMA_MEMORY_INCLUDES_CHILDREN	0x04
  630 #define DMA_MEMORY_EXCLUSIVE		0x08
  631 
  632 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
  633 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
  634 				dma_addr_t device_addr, size_t size, int flags);
  635 void dma_release_declared_memory(struct device *dev);
  636 void *dma_mark_declared_memory_occupied(struct device *dev,
  637 					dma_addr_t device_addr, size_t size);
  638 #else
  639 static inline int
  640 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
  641 			    dma_addr_t device_addr, size_t size, int flags)
  642 {
  643 	return 0;
  644 }
  645 
  646 static inline void
  647 dma_release_declared_memory(struct device *dev)
  648 {
  649 }
  650 
  651 static inline void *
  652 dma_mark_declared_memory_occupied(struct device *dev,
  653 				  dma_addr_t device_addr, size_t size)
  654 {
  655 	return ERR_PTR(-EBUSY);
  656 }
  657 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  658 
  659 /*
  660  * Managed DMA API
  661  */
  662 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
  663 				 dma_addr_t *dma_handle, gfp_t gfp);
  664 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
  665 			       dma_addr_t dma_handle);
  666 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
  667 				    dma_addr_t *dma_handle, gfp_t gfp);
  668 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  669 				  dma_addr_t dma_handle);
  670 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
  671 extern int dmam_declare_coherent_memory(struct device *dev,
  672 					phys_addr_t phys_addr,
  673 					dma_addr_t device_addr, size_t size,
  674 					int flags);
  675 extern void dmam_release_declared_memory(struct device *dev);
  676 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  677 static inline int dmam_declare_coherent_memory(struct device *dev,
  678 				phys_addr_t phys_addr, dma_addr_t device_addr,
  679 				size_t size, gfp_t gfp)
  680 {
  681 	return 0;
  682 }
  683 
  684 static inline void dmam_release_declared_memory(struct device *dev)
  685 {
  686 }
  687 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  688 
  689 static inline void *dma_alloc_wc(struct device *dev, size_t size,
  690 				 dma_addr_t *dma_addr, gfp_t gfp)
  691 {
  692 	return dma_alloc_attrs(dev, size, dma_addr, gfp,
  693 			       DMA_ATTR_WRITE_COMBINE);
  694 }
  695 #ifndef dma_alloc_writecombine
  696 #define dma_alloc_writecombine dma_alloc_wc
  697 #endif
  698 
  699 static inline void dma_free_wc(struct device *dev, size_t size,
  700 			       void *cpu_addr, dma_addr_t dma_addr)
  701 {
  702 	return dma_free_attrs(dev, size, cpu_addr, dma_addr,
  703 			      DMA_ATTR_WRITE_COMBINE);
  704 }
  705 #ifndef dma_free_writecombine
  706 #define dma_free_writecombine dma_free_wc
  707 #endif
  708 
  709 static inline int dma_mmap_wc(struct device *dev,
  710 			      struct vm_area_struct *vma,
  711 			      void *cpu_addr, dma_addr_t dma_addr,
  712 			      size_t size)
  713 {
  714 	return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
  715 			      DMA_ATTR_WRITE_COMBINE);
  716 }
  717 #ifndef dma_mmap_writecombine
  718 #define dma_mmap_writecombine dma_mmap_wc
  719 #endif
  720 
  721 #ifdef CONFIG_NEED_DMA_MAP_STATE
  722 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
  723 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
  724 #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
  725 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
  726 #define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
  727 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
  728 #else
  729 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
  730 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
  731 #define dma_unmap_addr(PTR, ADDR_NAME)           (0)
  732 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  do { } while (0)
  733 #define dma_unmap_len(PTR, LEN_NAME)             (0)
  734 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    do { } while (0)
  735 #endif
  736 
  737 #endif                 1 /*
    2  * kref.h - library routines for handling generic reference counted objects
    3  *
    4  * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
    5  * Copyright (C) 2004 IBM Corp.
    6  *
    7  * based on kobject.h which was:
    8  * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
    9  * Copyright (C) 2002-2003 Open Source Development Labs
   10  *
   11  * This file is released under the GPLv2.
   12  *
   13  */
   14 
   15 #ifndef _KREF_H_
   16 #define _KREF_H_
   17 
   18 #include <linux/bug.h>
   19 #include <linux/atomic.h>
   20 #include <linux/kernel.h>
   21 #include <linux/mutex.h>
   22 
   23 struct kref {
   24 	atomic_t refcount;
   25 };
   26 
   27 /**
   28  * kref_init - initialize object.
   29  * @kref: object in question.
   30  */
   31 static inline void kref_init(struct kref *kref)
   32 {
   33 	atomic_set(&kref->refcount, 1);
   34 }
   35 
   36 /**
   37  * kref_get - increment refcount for object.
   38  * @kref: object.
   39  */
   40 static inline void kref_get(struct kref *kref)
   41 {
   42 	/* If refcount was 0 before incrementing then we have a race
   43 	 * condition when this kref is freeing by some other thread right now.
   44 	 * In this case one should use kref_get_unless_zero()
   45 	 */
   46 	WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2);
   47 }
   48 
   49 /**
   50  * kref_sub - subtract a number of refcounts for object.
   51  * @kref: object.
   52  * @count: Number of recounts to subtract.
   53  * @release: pointer to the function that will clean up the object when the
   54  *	     last reference to the object is released.
   55  *	     This pointer is required, and it is not acceptable to pass kfree
   56  *	     in as this function.  If the caller does pass kfree to this
   57  *	     function, you will be publicly mocked mercilessly by the kref
   58  *	     maintainer, and anyone else who happens to notice it.  You have
   59  *	     been warned.
   60  *
   61  * Subtract @count from the refcount, and if 0, call release().
   62  * Return 1 if the object was removed, otherwise return 0.  Beware, if this
   63  * function returns 0, you still can not count on the kref from remaining in
   64  * memory.  Only use the return value if you want to see if the kref is now
   65  * gone, not present.
   66  */
   67 static inline int kref_sub(struct kref *kref, unsigned int count,
   68 	     void (*release)(struct kref *kref))
   69 {
   70 	WARN_ON(release == NULL);
   71 
   72 	if (atomic_sub_and_test((int) count, &kref->refcount)) {
   73 		release(kref);
   74 		return 1;
   75 	}
   76 	return 0;
   77 }
   78 
   79 /**
   80  * kref_put - decrement refcount for object.
   81  * @kref: object.
   82  * @release: pointer to the function that will clean up the object when the
   83  *	     last reference to the object is released.
   84  *	     This pointer is required, and it is not acceptable to pass kfree
   85  *	     in as this function.  If the caller does pass kfree to this
   86  *	     function, you will be publicly mocked mercilessly by the kref
   87  *	     maintainer, and anyone else who happens to notice it.  You have
   88  *	     been warned.
   89  *
   90  * Decrement the refcount, and if 0, call release().
   91  * Return 1 if the object was removed, otherwise return 0.  Beware, if this
   92  * function returns 0, you still can not count on the kref from remaining in
   93  * memory.  Only use the return value if you want to see if the kref is now
   94  * gone, not present.
   95  */
   96 static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
   97 {
   98 	return kref_sub(kref, 1, release);
   99 }
  100 
  101 static inline int kref_put_mutex(struct kref *kref,
  102 				 void (*release)(struct kref *kref),
  103 				 struct mutex *lock)
  104 {
  105 	WARN_ON(release == NULL);
  106 	if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
  107 		mutex_lock(lock);
  108 		if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
  109 			mutex_unlock(lock);
  110 			return 0;
  111 		}
  112 		release(kref);
  113 		return 1;
  114 	}
  115 	return 0;
  116 }
  117 
  118 /**
  119  * kref_get_unless_zero - Increment refcount for object unless it is zero.
  120  * @kref: object.
  121  *
  122  * Return non-zero if the increment succeeded. Otherwise return 0.
  123  *
  124  * This function is intended to simplify locking around refcounting for
  125  * objects that can be looked up from a lookup structure, and which are
  126  * removed from that lookup structure in the object destructor.
  127  * Operations on such objects require at least a read lock around
  128  * lookup + kref_get, and a write lock around kref_put + remove from lookup
  129  * structure. Furthermore, RCU implementations become extremely tricky.
  130  * With a lookup followed by a kref_get_unless_zero *with return value check*
  131  * locking in the kref_put path can be deferred to the actual removal from
  132  * the lookup structure and RCU lookups become trivial.
  133  */
  134 static inline int __must_check kref_get_unless_zero(struct kref *kref)
  135 {
  136 	return atomic_add_unless(&kref->refcount, 1, 0);
  137 }
  138 #endif /* _KREF_H_ */                 1 #ifndef _LINUX_LIST_H
    2 #define _LINUX_LIST_H
    3 
    4 #include <linux/types.h>
    5 #include <linux/stddef.h>
    6 #include <linux/poison.h>
    7 #include <linux/const.h>
    8 #include <linux/kernel.h>
    9 
   10 /*
   11  * Simple doubly linked list implementation.
   12  *
   13  * Some of the internal functions ("__xxx") are useful when
   14  * manipulating whole lists rather than single entries, as
   15  * sometimes we already know the next/prev entries and we can
   16  * generate better code by using them directly rather than
   17  * using the generic single-entry routines.
   18  */
   19 
   20 #define LIST_HEAD_INIT(name) { &(name), &(name) }
   21 
   22 #define LIST_HEAD(name) \
   23 	struct list_head name = LIST_HEAD_INIT(name)
   24 
   25 static inline void INIT_LIST_HEAD(struct list_head *list)
   26 {
   27 	WRITE_ONCE(list->next, list);
   28 	list->prev = list;
   29 }
   30 
   31 /*
   32  * Insert a new entry between two known consecutive entries.
   33  *
   34  * This is only for internal list manipulation where we know
   35  * the prev/next entries already!
   36  */
   37 #ifndef CONFIG_DEBUG_LIST
   38 static inline void __list_add(struct list_head *new,
   39 			      struct list_head *prev,
   40 			      struct list_head *next)
   41 {
   42 	next->prev = new;
   43 	new->next = next;
   44 	new->prev = prev;
   45 	WRITE_ONCE(prev->next, new);
   46 }
   47 #else
   48 extern void __list_add(struct list_head *new,
   49 			      struct list_head *prev,
   50 			      struct list_head *next);
   51 #endif
   52 
   53 /**
   54  * list_add - add a new entry
   55  * @new: new entry to be added
   56  * @head: list head to add it after
   57  *
   58  * Insert a new entry after the specified head.
   59  * This is good for implementing stacks.
   60  */
   61 static inline void list_add(struct list_head *new, struct list_head *head)
   62 {
   63 	__list_add(new, head, head->next);
   64 }
   65 
   66 
   67 /**
   68  * list_add_tail - add a new entry
   69  * @new: new entry to be added
   70  * @head: list head to add it before
   71  *
   72  * Insert a new entry before the specified head.
   73  * This is useful for implementing queues.
   74  */
   75 static inline void list_add_tail(struct list_head *new, struct list_head *head)
   76 {
   77 	__list_add(new, head->prev, head);
   78 }
   79 
   80 /*
   81  * Delete a list entry by making the prev/next entries
   82  * point to each other.
   83  *
   84  * This is only for internal list manipulation where we know
   85  * the prev/next entries already!
   86  */
   87 static inline void __list_del(struct list_head * prev, struct list_head * next)
   88 {
   89 	next->prev = prev;
   90 	WRITE_ONCE(prev->next, next);
   91 }
   92 
   93 /**
   94  * list_del - deletes entry from list.
   95  * @entry: the element to delete from the list.
   96  * Note: list_empty() on entry does not return true after this, the entry is
   97  * in an undefined state.
   98  */
   99 #ifndef CONFIG_DEBUG_LIST
  100 static inline void __list_del_entry(struct list_head *entry)
  101 {
  102 	__list_del(entry->prev, entry->next);
  103 }
  104 
  105 static inline void list_del(struct list_head *entry)
  106 {
  107 	__list_del(entry->prev, entry->next);
  108 	entry->next = LIST_POISON1;
  109 	entry->prev = LIST_POISON2;
  110 }
  111 #else
  112 extern void __list_del_entry(struct list_head *entry);
  113 extern void list_del(struct list_head *entry);
  114 #endif
  115 
  116 /**
  117  * list_replace - replace old entry by new one
  118  * @old : the element to be replaced
  119  * @new : the new element to insert
  120  *
  121  * If @old was empty, it will be overwritten.
  122  */
  123 static inline void list_replace(struct list_head *old,
  124 				struct list_head *new)
  125 {
  126 	new->next = old->next;
  127 	new->next->prev = new;
  128 	new->prev = old->prev;
  129 	new->prev->next = new;
  130 }
  131 
  132 static inline void list_replace_init(struct list_head *old,
  133 					struct list_head *new)
  134 {
  135 	list_replace(old, new);
  136 	INIT_LIST_HEAD(old);
  137 }
  138 
  139 /**
  140  * list_del_init - deletes entry from list and reinitialize it.
  141  * @entry: the element to delete from the list.
  142  */
  143 static inline void list_del_init(struct list_head *entry)
  144 {
  145 	__list_del_entry(entry);
  146 	INIT_LIST_HEAD(entry);
  147 }
  148 
  149 /**
  150  * list_move - delete from one list and add as another's head
  151  * @list: the entry to move
  152  * @head: the head that will precede our entry
  153  */
  154 static inline void list_move(struct list_head *list, struct list_head *head)
  155 {
  156 	__list_del_entry(list);
  157 	list_add(list, head);
  158 }
  159 
  160 /**
  161  * list_move_tail - delete from one list and add as another's tail
  162  * @list: the entry to move
  163  * @head: the head that will follow our entry
  164  */
  165 static inline void list_move_tail(struct list_head *list,
  166 				  struct list_head *head)
  167 {
  168 	__list_del_entry(list);
  169 	list_add_tail(list, head);
  170 }
  171 
  172 /**
  173  * list_is_last - tests whether @list is the last entry in list @head
  174  * @list: the entry to test
  175  * @head: the head of the list
  176  */
  177 static inline int list_is_last(const struct list_head *list,
  178 				const struct list_head *head)
  179 {
  180 	return list->next == head;
  181 }
  182 
  183 /**
  184  * list_empty - tests whether a list is empty
  185  * @head: the list to test.
  186  */
  187 static inline int list_empty(const struct list_head *head)
  188 {
  189 	return READ_ONCE(head->next) == head;
  190 }
  191 
  192 /**
  193  * list_empty_careful - tests whether a list is empty and not being modified
  194  * @head: the list to test
  195  *
  196  * Description:
  197  * tests whether a list is empty _and_ checks that no other CPU might be
  198  * in the process of modifying either member (next or prev)
  199  *
  200  * NOTE: using list_empty_careful() without synchronization
  201  * can only be safe if the only activity that can happen
  202  * to the list entry is list_del_init(). Eg. it cannot be used
  203  * if another CPU could re-list_add() it.
  204  */
  205 static inline int list_empty_careful(const struct list_head *head)
  206 {
  207 	struct list_head *next = head->next;
  208 	return (next == head) && (next == head->prev);
  209 }
  210 
  211 /**
  212  * list_rotate_left - rotate the list to the left
  213  * @head: the head of the list
  214  */
  215 static inline void list_rotate_left(struct list_head *head)
  216 {
  217 	struct list_head *first;
  218 
  219 	if (!list_empty(head)) {
  220 		first = head->next;
  221 		list_move_tail(first, head);
  222 	}
  223 }
  224 
  225 /**
  226  * list_is_singular - tests whether a list has just one entry.
  227  * @head: the list to test.
  228  */
  229 static inline int list_is_singular(const struct list_head *head)
  230 {
  231 	return !list_empty(head) && (head->next == head->prev);
  232 }
  233 
  234 static inline void __list_cut_position(struct list_head *list,
  235 		struct list_head *head, struct list_head *entry)
  236 {
  237 	struct list_head *new_first = entry->next;
  238 	list->next = head->next;
  239 	list->next->prev = list;
  240 	list->prev = entry;
  241 	entry->next = list;
  242 	head->next = new_first;
  243 	new_first->prev = head;
  244 }
  245 
  246 /**
  247  * list_cut_position - cut a list into two
  248  * @list: a new list to add all removed entries
  249  * @head: a list with entries
  250  * @entry: an entry within head, could be the head itself
  251  *	and if so we won't cut the list
  252  *
  253  * This helper moves the initial part of @head, up to and
  254  * including @entry, from @head to @list. You should
  255  * pass on @entry an element you know is on @head. @list
  256  * should be an empty list or a list you do not care about
  257  * losing its data.
  258  *
  259  */
  260 static inline void list_cut_position(struct list_head *list,
  261 		struct list_head *head, struct list_head *entry)
  262 {
  263 	if (list_empty(head))
  264 		return;
  265 	if (list_is_singular(head) &&
  266 		(head->next != entry && head != entry))
  267 		return;
  268 	if (entry == head)
  269 		INIT_LIST_HEAD(list);
  270 	else
  271 		__list_cut_position(list, head, entry);
  272 }
  273 
  274 static inline void __list_splice(const struct list_head *list,
  275 				 struct list_head *prev,
  276 				 struct list_head *next)
  277 {
  278 	struct list_head *first = list->next;
  279 	struct list_head *last = list->prev;
  280 
  281 	first->prev = prev;
  282 	prev->next = first;
  283 
  284 	last->next = next;
  285 	next->prev = last;
  286 }
  287 
  288 /**
  289  * list_splice - join two lists, this is designed for stacks
  290  * @list: the new list to add.
  291  * @head: the place to add it in the first list.
  292  */
  293 static inline void list_splice(const struct list_head *list,
  294 				struct list_head *head)
  295 {
  296 	if (!list_empty(list))
  297 		__list_splice(list, head, head->next);
  298 }
  299 
  300 /**
  301  * list_splice_tail - join two lists, each list being a queue
  302  * @list: the new list to add.
  303  * @head: the place to add it in the first list.
  304  */
  305 static inline void list_splice_tail(struct list_head *list,
  306 				struct list_head *head)
  307 {
  308 	if (!list_empty(list))
  309 		__list_splice(list, head->prev, head);
  310 }
  311 
  312 /**
  313  * list_splice_init - join two lists and reinitialise the emptied list.
  314  * @list: the new list to add.
  315  * @head: the place to add it in the first list.
  316  *
  317  * The list at @list is reinitialised
  318  */
  319 static inline void list_splice_init(struct list_head *list,
  320 				    struct list_head *head)
  321 {
  322 	if (!list_empty(list)) {
  323 		__list_splice(list, head, head->next);
  324 		INIT_LIST_HEAD(list);
  325 	}
  326 }
  327 
  328 /**
  329  * list_splice_tail_init - join two lists and reinitialise the emptied list
  330  * @list: the new list to add.
  331  * @head: the place to add it in the first list.
  332  *
  333  * Each of the lists is a queue.
  334  * The list at @list is reinitialised
  335  */
  336 static inline void list_splice_tail_init(struct list_head *list,
  337 					 struct list_head *head)
  338 {
  339 	if (!list_empty(list)) {
  340 		__list_splice(list, head->prev, head);
  341 		INIT_LIST_HEAD(list);
  342 	}
  343 }
  344 
  345 /**
  346  * list_entry - get the struct for this entry
  347  * @ptr:	the &struct list_head pointer.
  348  * @type:	the type of the struct this is embedded in.
  349  * @member:	the name of the list_head within the struct.
  350  */
  351 #define list_entry(ptr, type, member) \
  352 	container_of(ptr, type, member)
  353 
  354 /**
  355  * list_first_entry - get the first element from a list
  356  * @ptr:	the list head to take the element from.
  357  * @type:	the type of the struct this is embedded in.
  358  * @member:	the name of the list_head within the struct.
  359  *
  360  * Note, that list is expected to be not empty.
  361  */
  362 #define list_first_entry(ptr, type, member) \
  363 	list_entry((ptr)->next, type, member)
  364 
  365 /**
  366  * list_last_entry - get the last element from a list
  367  * @ptr:	the list head to take the element from.
  368  * @type:	the type of the struct this is embedded in.
  369  * @member:	the name of the list_head within the struct.
  370  *
  371  * Note, that list is expected to be not empty.
  372  */
  373 #define list_last_entry(ptr, type, member) \
  374 	list_entry((ptr)->prev, type, member)
  375 
  376 /**
  377  * list_first_entry_or_null - get the first element from a list
  378  * @ptr:	the list head to take the element from.
  379  * @type:	the type of the struct this is embedded in.
  380  * @member:	the name of the list_head within the struct.
  381  *
  382  * Note that if the list is empty, it returns NULL.
  383  */
  384 #define list_first_entry_or_null(ptr, type, member) \
  385 	(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
  386 
  387 /**
  388  * list_next_entry - get the next element in list
  389  * @pos:	the type * to cursor
  390  * @member:	the name of the list_head within the struct.
  391  */
  392 #define list_next_entry(pos, member) \
  393 	list_entry((pos)->member.next, typeof(*(pos)), member)
  394 
  395 /**
  396  * list_prev_entry - get the prev element in list
  397  * @pos:	the type * to cursor
  398  * @member:	the name of the list_head within the struct.
  399  */
  400 #define list_prev_entry(pos, member) \
  401 	list_entry((pos)->member.prev, typeof(*(pos)), member)
  402 
  403 /**
  404  * list_for_each	-	iterate over a list
  405  * @pos:	the &struct list_head to use as a loop cursor.
  406  * @head:	the head for your list.
  407  */
  408 #define list_for_each(pos, head) \
  409 	for (pos = (head)->next; pos != (head); pos = pos->next)
  410 
  411 /**
  412  * list_for_each_prev	-	iterate over a list backwards
  413  * @pos:	the &struct list_head to use as a loop cursor.
  414  * @head:	the head for your list.
  415  */
  416 #define list_for_each_prev(pos, head) \
  417 	for (pos = (head)->prev; pos != (head); pos = pos->prev)
  418 
  419 /**
  420  * list_for_each_safe - iterate over a list safe against removal of list entry
  421  * @pos:	the &struct list_head to use as a loop cursor.
  422  * @n:		another &struct list_head to use as temporary storage
  423  * @head:	the head for your list.
  424  */
  425 #define list_for_each_safe(pos, n, head) \
  426 	for (pos = (head)->next, n = pos->next; pos != (head); \
  427 		pos = n, n = pos->next)
  428 
  429 /**
  430  * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
  431  * @pos:	the &struct list_head to use as a loop cursor.
  432  * @n:		another &struct list_head to use as temporary storage
  433  * @head:	the head for your list.
  434  */
  435 #define list_for_each_prev_safe(pos, n, head) \
  436 	for (pos = (head)->prev, n = pos->prev; \
  437 	     pos != (head); \
  438 	     pos = n, n = pos->prev)
  439 
  440 /**
  441  * list_for_each_entry	-	iterate over list of given type
  442  * @pos:	the type * to use as a loop cursor.
  443  * @head:	the head for your list.
  444  * @member:	the name of the list_head within the struct.
  445  */
  446 #define list_for_each_entry(pos, head, member)				\
  447 	for (pos = list_first_entry(head, typeof(*pos), member);	\
  448 	     &pos->member != (head);					\
  449 	     pos = list_next_entry(pos, member))
  450 
  451 /**
  452  * list_for_each_entry_reverse - iterate backwards over list of given type.
  453  * @pos:	the type * to use as a loop cursor.
  454  * @head:	the head for your list.
  455  * @member:	the name of the list_head within the struct.
  456  */
  457 #define list_for_each_entry_reverse(pos, head, member)			\
  458 	for (pos = list_last_entry(head, typeof(*pos), member);		\
  459 	     &pos->member != (head); 					\
  460 	     pos = list_prev_entry(pos, member))
  461 
  462 /**
  463  * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
  464  * @pos:	the type * to use as a start point
  465  * @head:	the head of the list
  466  * @member:	the name of the list_head within the struct.
  467  *
  468  * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
  469  */
  470 #define list_prepare_entry(pos, head, member) \
  471 	((pos) ? : list_entry(head, typeof(*pos), member))
  472 
  473 /**
  474  * list_for_each_entry_continue - continue iteration over list of given type
  475  * @pos:	the type * to use as a loop cursor.
  476  * @head:	the head for your list.
  477  * @member:	the name of the list_head within the struct.
  478  *
  479  * Continue to iterate over list of given type, continuing after
  480  * the current position.
  481  */
  482 #define list_for_each_entry_continue(pos, head, member) 		\
  483 	for (pos = list_next_entry(pos, member);			\
  484 	     &pos->member != (head);					\
  485 	     pos = list_next_entry(pos, member))
  486 
  487 /**
  488  * list_for_each_entry_continue_reverse - iterate backwards from the given point
  489  * @pos:	the type * to use as a loop cursor.
  490  * @head:	the head for your list.
  491  * @member:	the name of the list_head within the struct.
  492  *
  493  * Start to iterate over list of given type backwards, continuing after
  494  * the current position.
  495  */
  496 #define list_for_each_entry_continue_reverse(pos, head, member)		\
  497 	for (pos = list_prev_entry(pos, member);			\
  498 	     &pos->member != (head);					\
  499 	     pos = list_prev_entry(pos, member))
  500 
  501 /**
  502  * list_for_each_entry_from - iterate over list of given type from the current point
  503  * @pos:	the type * to use as a loop cursor.
  504  * @head:	the head for your list.
  505  * @member:	the name of the list_head within the struct.
  506  *
  507  * Iterate over list of given type, continuing from current position.
  508  */
  509 #define list_for_each_entry_from(pos, head, member) 			\
  510 	for (; &pos->member != (head);					\
  511 	     pos = list_next_entry(pos, member))
  512 
  513 /**
  514  * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  515  * @pos:	the type * to use as a loop cursor.
  516  * @n:		another type * to use as temporary storage
  517  * @head:	the head for your list.
  518  * @member:	the name of the list_head within the struct.
  519  */
  520 #define list_for_each_entry_safe(pos, n, head, member)			\
  521 	for (pos = list_first_entry(head, typeof(*pos), member),	\
  522 		n = list_next_entry(pos, member);			\
  523 	     &pos->member != (head); 					\
  524 	     pos = n, n = list_next_entry(n, member))
  525 
  526 /**
  527  * list_for_each_entry_safe_continue - continue list iteration safe against removal
  528  * @pos:	the type * to use as a loop cursor.
  529  * @n:		another type * to use as temporary storage
  530  * @head:	the head for your list.
  531  * @member:	the name of the list_head within the struct.
  532  *
  533  * Iterate over list of given type, continuing after current point,
  534  * safe against removal of list entry.
  535  */
  536 #define list_for_each_entry_safe_continue(pos, n, head, member) 		\
  537 	for (pos = list_next_entry(pos, member), 				\
  538 		n = list_next_entry(pos, member);				\
  539 	     &pos->member != (head);						\
  540 	     pos = n, n = list_next_entry(n, member))
  541 
  542 /**
  543  * list_for_each_entry_safe_from - iterate over list from current point safe against removal
  544  * @pos:	the type * to use as a loop cursor.
  545  * @n:		another type * to use as temporary storage
  546  * @head:	the head for your list.
  547  * @member:	the name of the list_head within the struct.
  548  *
  549  * Iterate over list of given type from current point, safe against
  550  * removal of list entry.
  551  */
  552 #define list_for_each_entry_safe_from(pos, n, head, member) 			\
  553 	for (n = list_next_entry(pos, member);					\
  554 	     &pos->member != (head);						\
  555 	     pos = n, n = list_next_entry(n, member))
  556 
  557 /**
  558  * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
  559  * @pos:	the type * to use as a loop cursor.
  560  * @n:		another type * to use as temporary storage
  561  * @head:	the head for your list.
  562  * @member:	the name of the list_head within the struct.
  563  *
  564  * Iterate backwards over list of given type, safe against removal
  565  * of list entry.
  566  */
  567 #define list_for_each_entry_safe_reverse(pos, n, head, member)		\
  568 	for (pos = list_last_entry(head, typeof(*pos), member),		\
  569 		n = list_prev_entry(pos, member);			\
  570 	     &pos->member != (head); 					\
  571 	     pos = n, n = list_prev_entry(n, member))
  572 
  573 /**
  574  * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
  575  * @pos:	the loop cursor used in the list_for_each_entry_safe loop
  576  * @n:		temporary storage used in list_for_each_entry_safe
  577  * @member:	the name of the list_head within the struct.
  578  *
  579  * list_safe_reset_next is not safe to use in general if the list may be
  580  * modified concurrently (eg. the lock is dropped in the loop body). An
  581  * exception to this is if the cursor element (pos) is pinned in the list,
  582  * and list_safe_reset_next is called after re-taking the lock and before
  583  * completing the current iteration of the loop body.
  584  */
  585 #define list_safe_reset_next(pos, n, member)				\
  586 	n = list_next_entry(pos, member)
  587 
  588 /*
  589  * Double linked lists with a single pointer list head.
  590  * Mostly useful for hash tables where the two pointer list head is
  591  * too wasteful.
  592  * You lose the ability to access the tail in O(1).
  593  */
  594 
  595 #define HLIST_HEAD_INIT { .first = NULL }
  596 #define HLIST_HEAD(name) struct hlist_head name = {  .first = NULL }
  597 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
  598 static inline void INIT_HLIST_NODE(struct hlist_node *h)
  599 {
  600 	h->next = NULL;
  601 	h->pprev = NULL;
  602 }
  603 
  604 static inline int hlist_unhashed(const struct hlist_node *h)
  605 {
  606 	return !h->pprev;
  607 }
  608 
  609 static inline int hlist_empty(const struct hlist_head *h)
  610 {
  611 	return !READ_ONCE(h->first);
  612 }
  613 
  614 static inline void __hlist_del(struct hlist_node *n)
  615 {
  616 	struct hlist_node *next = n->next;
  617 	struct hlist_node **pprev = n->pprev;
  618 
  619 	WRITE_ONCE(*pprev, next);
  620 	if (next)
  621 		next->pprev = pprev;
  622 }
  623 
  624 static inline void hlist_del(struct hlist_node *n)
  625 {
  626 	__hlist_del(n);
  627 	n->next = LIST_POISON1;
  628 	n->pprev = LIST_POISON2;
  629 }
  630 
  631 static inline void hlist_del_init(struct hlist_node *n)
  632 {
  633 	if (!hlist_unhashed(n)) {
  634 		__hlist_del(n);
  635 		INIT_HLIST_NODE(n);
  636 	}
  637 }
  638 
  639 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
  640 {
  641 	struct hlist_node *first = h->first;
  642 	n->next = first;
  643 	if (first)
  644 		first->pprev = &n->next;
  645 	WRITE_ONCE(h->first, n);
  646 	n->pprev = &h->first;
  647 }
  648 
  649 /* next must be != NULL */
  650 static inline void hlist_add_before(struct hlist_node *n,
  651 					struct hlist_node *next)
  652 {
  653 	n->pprev = next->pprev;
  654 	n->next = next;
  655 	next->pprev = &n->next;
  656 	WRITE_ONCE(*(n->pprev), n);
  657 }
  658 
  659 static inline void hlist_add_behind(struct hlist_node *n,
  660 				    struct hlist_node *prev)
  661 {
  662 	n->next = prev->next;
  663 	WRITE_ONCE(prev->next, n);
  664 	n->pprev = &prev->next;
  665 
  666 	if (n->next)
  667 		n->next->pprev  = &n->next;
  668 }
  669 
  670 /* after that we'll appear to be on some hlist and hlist_del will work */
  671 static inline void hlist_add_fake(struct hlist_node *n)
  672 {
  673 	n->pprev = &n->next;
  674 }
  675 
  676 static inline bool hlist_fake(struct hlist_node *h)
  677 {
  678 	return h->pprev == &h->next;
  679 }
  680 
  681 /*
  682  * Check whether the node is the only node of the head without
  683  * accessing head:
  684  */
  685 static inline bool
  686 hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
  687 {
  688 	return !n->next && n->pprev == &h->first;
  689 }
  690 
  691 /*
  692  * Move a list from one list head to another. Fixup the pprev
  693  * reference of the first entry if it exists.
  694  */
  695 static inline void hlist_move_list(struct hlist_head *old,
  696 				   struct hlist_head *new)
  697 {
  698 	new->first = old->first;
  699 	if (new->first)
  700 		new->first->pprev = &new->first;
  701 	old->first = NULL;
  702 }
  703 
  704 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
  705 
  706 #define hlist_for_each(pos, head) \
  707 	for (pos = (head)->first; pos ; pos = pos->next)
  708 
  709 #define hlist_for_each_safe(pos, n, head) \
  710 	for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
  711 	     pos = n)
  712 
  713 #define hlist_entry_safe(ptr, type, member) \
  714 	({ typeof(ptr) ____ptr = (ptr); \
  715 	   ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
  716 	})
  717 
  718 /**
  719  * hlist_for_each_entry	- iterate over list of given type
  720  * @pos:	the type * to use as a loop cursor.
  721  * @head:	the head for your list.
  722  * @member:	the name of the hlist_node within the struct.
  723  */
  724 #define hlist_for_each_entry(pos, head, member)				\
  725 	for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
  726 	     pos;							\
  727 	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
  728 
  729 /**
  730  * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
  731  * @pos:	the type * to use as a loop cursor.
  732  * @member:	the name of the hlist_node within the struct.
  733  */
  734 #define hlist_for_each_entry_continue(pos, member)			\
  735 	for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
  736 	     pos;							\
  737 	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
  738 
  739 /**
  740  * hlist_for_each_entry_from - iterate over a hlist continuing from current point
  741  * @pos:	the type * to use as a loop cursor.
  742  * @member:	the name of the hlist_node within the struct.
  743  */
  744 #define hlist_for_each_entry_from(pos, member)				\
  745 	for (; pos;							\
  746 	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
  747 
  748 /**
  749  * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  750  * @pos:	the type * to use as a loop cursor.
  751  * @n:		another &struct hlist_node to use as temporary storage
  752  * @head:	the head for your list.
  753  * @member:	the name of the hlist_node within the struct.
  754  */
  755 #define hlist_for_each_entry_safe(pos, n, head, member) 		\
  756 	for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
  757 	     pos && ({ n = pos->member.next; 1; });			\
  758 	     pos = hlist_entry_safe(n, typeof(*pos), member))
  759 
  760 #endif                 1 /* include this file if the platform implements the dma_ DMA Mapping API
    2  * and wants to provide the pci_ DMA Mapping API in terms of it */
    3 
    4 #ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
    5 #define _ASM_GENERIC_PCI_DMA_COMPAT_H
    6 
    7 #include <linux/dma-mapping.h>
    8 
    9 /* This defines the direction arg to the DMA mapping routines. */
   10 #define PCI_DMA_BIDIRECTIONAL	0
   11 #define PCI_DMA_TODEVICE	1
   12 #define PCI_DMA_FROMDEVICE	2
   13 #define PCI_DMA_NONE		3
   14 
   15 static inline void *
   16 pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
   17 		     dma_addr_t *dma_handle)
   18 {
   19 	return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
   20 }
   21 
   22 static inline void *
   23 pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
   24 		      dma_addr_t *dma_handle)
   25 {
   26 	return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev,
   27 				   size, dma_handle, GFP_ATOMIC);
   28 }
   29 
   30 static inline void
   31 pci_free_consistent(struct pci_dev *hwdev, size_t size,
   32 		    void *vaddr, dma_addr_t dma_handle)
   33 {
   34 	dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
   35 }
   36 
   37 static inline dma_addr_t
   38 pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
   39 {
   40 	return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
   41 }
   42 
   43 static inline void
   44 pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
   45 		 size_t size, int direction)
   46 {
   47 	dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
   48 }
   49 
   50 static inline dma_addr_t
   51 pci_map_page(struct pci_dev *hwdev, struct page *page,
   52 	     unsigned long offset, size_t size, int direction)
   53 {
   54 	return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
   55 }
   56 
   57 static inline void
   58 pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
   59 	       size_t size, int direction)
   60 {
   61 	dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
   62 }
   63 
   64 static inline int
   65 pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
   66 	   int nents, int direction)
   67 {
   68 	return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
   69 }
   70 
   71 static inline void
   72 pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
   73 	     int nents, int direction)
   74 {
   75 	dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
   76 }
   77 
   78 static inline void
   79 pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
   80 		    size_t size, int direction)
   81 {
   82 	dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
   83 }
   84 
   85 static inline void
   86 pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
   87 		    size_t size, int direction)
   88 {
   89 	dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
   90 }
   91 
   92 static inline void
   93 pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
   94 		int nelems, int direction)
   95 {
   96 	dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
   97 }
   98 
   99 static inline void
  100 pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
  101 		int nelems, int direction)
  102 {
  103 	dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
  104 }
  105 
  106 static inline int
  107 pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
  108 {
  109 	return dma_mapping_error(&pdev->dev, dma_addr);
  110 }
  111 
  112 #ifdef CONFIG_PCI
  113 static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
  114 {
  115 	return dma_set_mask(&dev->dev, mask);
  116 }
  117 
  118 static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
  119 {
  120 	return dma_set_coherent_mask(&dev->dev, mask);
  121 }
  122 
  123 static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
  124 					   unsigned int size)
  125 {
  126 	return dma_set_max_seg_size(&dev->dev, size);
  127 }
  128 
  129 static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
  130 					   unsigned long mask)
  131 {
  132 	return dma_set_seg_boundary(&dev->dev, mask);
  133 }
  134 #else
  135 static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
  136 { return -EIO; }
  137 static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
  138 { return -EIO; }
  139 static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
  140 					   unsigned int size)
  141 { return -EIO; }
  142 static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
  143 					   unsigned long mask)
  144 { return -EIO; }
  145 #endif
  146 
  147 #endif                 1 /*
    2  *	pci.h
    3  *
    4  *	PCI defines and function prototypes
    5  *	Copyright 1994, Drew Eckhardt
    6  *	Copyright 1997--1999 Martin Mares <mj@ucw.cz>
    7  *
    8  *	For more information, please consult the following manuals (look at
    9  *	http://www.pcisig.com/ for how to get them):
   10  *
   11  *	PCI BIOS Specification
   12  *	PCI Local Bus Specification
   13  *	PCI to PCI Bridge Specification
   14  *	PCI System Design Guide
   15  */
   16 #ifndef LINUX_PCI_H
   17 #define LINUX_PCI_H
   18 
   19 
   20 #include <linux/mod_devicetable.h>
   21 
   22 #include <linux/types.h>
   23 #include <linux/init.h>
   24 #include <linux/ioport.h>
   25 #include <linux/list.h>
   26 #include <linux/compiler.h>
   27 #include <linux/errno.h>
   28 #include <linux/kobject.h>
   29 #include <linux/atomic.h>
   30 #include <linux/device.h>
   31 #include <linux/io.h>
   32 #include <linux/resource_ext.h>
   33 #include <uapi/linux/pci.h>
   34 
   35 #include <linux/pci_ids.h>
   36 
   37 /*
   38  * The PCI interface treats multi-function devices as independent
   39  * devices.  The slot/function address of each device is encoded
   40  * in a single byte as follows:
   41  *
   42  *	7:3 = slot
   43  *	2:0 = function
   44  *
   45  * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
   46  * In the interest of not exposing interfaces to user-space unnecessarily,
   47  * the following kernel-only defines are being added here.
   48  */
   49 #define PCI_DEVID(bus, devfn)  ((((u16)(bus)) << 8) | (devfn))
   50 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
   51 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
   52 
   53 /* pci_slot represents a physical slot */
   54 struct pci_slot {
   55 	struct pci_bus *bus;		/* The bus this slot is on */
   56 	struct list_head list;		/* node in list of slots on this bus */
   57 	struct hotplug_slot *hotplug;	/* Hotplug info (migrate over time) */
   58 	unsigned char number;		/* PCI_SLOT(pci_dev->devfn) */
   59 	struct kobject kobj;
   60 };
   61 
   62 static inline const char *pci_slot_name(const struct pci_slot *slot)
   63 {
   64 	return kobject_name(&slot->kobj);
   65 }
   66 
   67 /* File state for mmap()s on /proc/bus/pci/X/Y */
   68 enum pci_mmap_state {
   69 	pci_mmap_io,
   70 	pci_mmap_mem
   71 };
   72 
   73 /*
   74  *  For PCI devices, the region numbers are assigned this way:
   75  */
   76 enum {
   77 	/* #0-5: standard PCI resources */
   78 	PCI_STD_RESOURCES,
   79 	PCI_STD_RESOURCE_END = 5,
   80 
   81 	/* #6: expansion ROM resource */
   82 	PCI_ROM_RESOURCE,
   83 
   84 	/* device specific resources */
   85 #ifdef CONFIG_PCI_IOV
   86 	PCI_IOV_RESOURCES,
   87 	PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
   88 #endif
   89 
   90 	/* resources assigned to buses behind the bridge */
   91 #define PCI_BRIDGE_RESOURCE_NUM 4
   92 
   93 	PCI_BRIDGE_RESOURCES,
   94 	PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
   95 				  PCI_BRIDGE_RESOURCE_NUM - 1,
   96 
   97 	/* total resources associated with a PCI device */
   98 	PCI_NUM_RESOURCES,
   99 
  100 	/* preserve this for compatibility */
  101 	DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
  102 };
  103 
  104 /*
  105  * pci_power_t values must match the bits in the Capabilities PME_Support
  106  * and Control/Status PowerState fields in the Power Management capability.
  107  */
  108 typedef int __bitwise pci_power_t;
  109 
  110 #define PCI_D0		((pci_power_t __force) 0)
  111 #define PCI_D1		((pci_power_t __force) 1)
  112 #define PCI_D2		((pci_power_t __force) 2)
  113 #define PCI_D3hot	((pci_power_t __force) 3)
  114 #define PCI_D3cold	((pci_power_t __force) 4)
  115 #define PCI_UNKNOWN	((pci_power_t __force) 5)
  116 #define PCI_POWER_ERROR	((pci_power_t __force) -1)
  117 
  118 /* Remember to update this when the list above changes! */
  119 extern const char *pci_power_names[];
  120 
  121 static inline const char *pci_power_name(pci_power_t state)
  122 {
  123 	return pci_power_names[1 + (__force int) state];
  124 }
  125 
  126 #define PCI_PM_D2_DELAY		200
  127 #define PCI_PM_D3_WAIT		10
  128 #define PCI_PM_D3COLD_WAIT	100
  129 #define PCI_PM_BUS_WAIT		50
  130 
  131 /** The pci_channel state describes connectivity between the CPU and
  132  *  the pci device.  If some PCI bus between here and the pci device
  133  *  has crashed or locked up, this info is reflected here.
  134  */
  135 typedef unsigned int __bitwise pci_channel_state_t;
  136 
  137 enum pci_channel_state {
  138 	/* I/O channel is in normal state */
  139 	pci_channel_io_normal = (__force pci_channel_state_t) 1,
  140 
  141 	/* I/O to channel is blocked */
  142 	pci_channel_io_frozen = (__force pci_channel_state_t) 2,
  143 
  144 	/* PCI card is dead */
  145 	pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
  146 };
  147 
  148 typedef unsigned int __bitwise pcie_reset_state_t;
  149 
  150 enum pcie_reset_state {
  151 	/* Reset is NOT asserted (Use to deassert reset) */
  152 	pcie_deassert_reset = (__force pcie_reset_state_t) 1,
  153 
  154 	/* Use #PERST to reset PCIe device */
  155 	pcie_warm_reset = (__force pcie_reset_state_t) 2,
  156 
  157 	/* Use PCIe Hot Reset to reset device */
  158 	pcie_hot_reset = (__force pcie_reset_state_t) 3
  159 };
  160 
  161 typedef unsigned short __bitwise pci_dev_flags_t;
  162 enum pci_dev_flags {
  163 	/* INTX_DISABLE in PCI_COMMAND register disables MSI
  164 	 * generation too.
  165 	 */
  166 	PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
  167 	/* Device configuration is irrevocably lost if disabled into D3 */
  168 	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
  169 	/* Provide indication device is assigned by a Virtual Machine Manager */
  170 	PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
  171 	/* Flag for quirk use to store if quirk-specific ACS is enabled */
  172 	PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
  173 	/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
  174 	PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
  175 	/* Do not use bus resets for device */
  176 	PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
  177 	/* Do not use PM reset even if device advertises NoSoftRst- */
  178 	PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
  179 	/* Get VPD from function 0 VPD */
  180 	PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
  181 };
  182 
  183 enum pci_irq_reroute_variant {
  184 	INTEL_IRQ_REROUTE_VARIANT = 1,
  185 	MAX_IRQ_REROUTE_VARIANTS = 3
  186 };
  187 
  188 typedef unsigned short __bitwise pci_bus_flags_t;
  189 enum pci_bus_flags {
  190 	PCI_BUS_FLAGS_NO_MSI   = (__force pci_bus_flags_t) 1,
  191 	PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
  192 };
  193 
  194 /* These values come from the PCI Express Spec */
  195 enum pcie_link_width {
  196 	PCIE_LNK_WIDTH_RESRV	= 0x00,
  197 	PCIE_LNK_X1		= 0x01,
  198 	PCIE_LNK_X2		= 0x02,
  199 	PCIE_LNK_X4		= 0x04,
  200 	PCIE_LNK_X8		= 0x08,
  201 	PCIE_LNK_X12		= 0x0C,
  202 	PCIE_LNK_X16		= 0x10,
  203 	PCIE_LNK_X32		= 0x20,
  204 	PCIE_LNK_WIDTH_UNKNOWN  = 0xFF,
  205 };
  206 
  207 /* Based on the PCI Hotplug Spec, but some values are made up by us */
  208 enum pci_bus_speed {
  209 	PCI_SPEED_33MHz			= 0x00,
  210 	PCI_SPEED_66MHz			= 0x01,
  211 	PCI_SPEED_66MHz_PCIX		= 0x02,
  212 	PCI_SPEED_100MHz_PCIX		= 0x03,
  213 	PCI_SPEED_133MHz_PCIX		= 0x04,
  214 	PCI_SPEED_66MHz_PCIX_ECC	= 0x05,
  215 	PCI_SPEED_100MHz_PCIX_ECC	= 0x06,
  216 	PCI_SPEED_133MHz_PCIX_ECC	= 0x07,
  217 	PCI_SPEED_66MHz_PCIX_266	= 0x09,
  218 	PCI_SPEED_100MHz_PCIX_266	= 0x0a,
  219 	PCI_SPEED_133MHz_PCIX_266	= 0x0b,
  220 	AGP_UNKNOWN			= 0x0c,
  221 	AGP_1X				= 0x0d,
  222 	AGP_2X				= 0x0e,
  223 	AGP_4X				= 0x0f,
  224 	AGP_8X				= 0x10,
  225 	PCI_SPEED_66MHz_PCIX_533	= 0x11,
  226 	PCI_SPEED_100MHz_PCIX_533	= 0x12,
  227 	PCI_SPEED_133MHz_PCIX_533	= 0x13,
  228 	PCIE_SPEED_2_5GT		= 0x14,
  229 	PCIE_SPEED_5_0GT		= 0x15,
  230 	PCIE_SPEED_8_0GT		= 0x16,
  231 	PCI_SPEED_UNKNOWN		= 0xff,
  232 };
  233 
  234 struct pci_cap_saved_data {
  235 	u16 cap_nr;
  236 	bool cap_extended;
  237 	unsigned int size;
  238 	u32 data[0];
  239 };
  240 
  241 struct pci_cap_saved_state {
  242 	struct hlist_node next;
  243 	struct pci_cap_saved_data cap;
  244 };
  245 
  246 struct pcie_link_state;
  247 struct pci_vpd;
  248 struct pci_sriov;
  249 struct pci_ats;
  250 
  251 /*
  252  * The pci_dev structure is used to describe PCI devices.
  253  */
  254 struct pci_dev {
  255 	struct list_head bus_list;	/* node in per-bus list */
  256 	struct pci_bus	*bus;		/* bus this device is on */
  257 	struct pci_bus	*subordinate;	/* bus this device bridges to */
  258 
  259 	void		*sysdata;	/* hook for sys-specific extension */
  260 	struct proc_dir_entry *procent;	/* device entry in /proc/bus/pci */
  261 	struct pci_slot	*slot;		/* Physical slot this device is in */
  262 
  263 	unsigned int	devfn;		/* encoded device & function index */
  264 	unsigned short	vendor;
  265 	unsigned short	device;
  266 	unsigned short	subsystem_vendor;
  267 	unsigned short	subsystem_device;
  268 	unsigned int	class;		/* 3 bytes: (base,sub,prog-if) */
  269 	u8		revision;	/* PCI revision, low byte of class word */
  270 	u8		hdr_type;	/* PCI header type (`multi' flag masked out) */
  271 	u8		pcie_cap;	/* PCIe capability offset */
  272 	u8		msi_cap;	/* MSI capability offset */
  273 	u8		msix_cap;	/* MSI-X capability offset */
  274 	u8		pcie_mpss:3;	/* PCIe Max Payload Size Supported */
  275 	u8		rom_base_reg;	/* which config register controls the ROM */
  276 	u8		pin;		/* which interrupt pin this device uses */
  277 	u16		pcie_flags_reg;	/* cached PCIe Capabilities Register */
  278 	unsigned long	*dma_alias_mask;/* mask of enabled devfn aliases */
  279 
  280 	struct pci_driver *driver;	/* which driver has allocated this device */
  281 	u64		dma_mask;	/* Mask of the bits of bus address this
  282 					   device implements.  Normally this is
  283 					   0xffffffff.  You only need to change
  284 					   this if your device has broken DMA
  285 					   or supports 64-bit transfers.  */
  286 
  287 	struct device_dma_parameters dma_parms;
  288 
  289 	pci_power_t     current_state;  /* Current operating state. In ACPI-speak,
  290 					   this is D0-D3, D0 being fully functional,
  291 					   and D3 being off. */
  292 	u8		pm_cap;		/* PM capability offset */
  293 	unsigned int	pme_support:5;	/* Bitmask of states from which PME#
  294 					   can be generated */
  295 	unsigned int	pme_interrupt:1;
  296 	unsigned int	pme_poll:1;	/* Poll device's PME status bit */
  297 	unsigned int	d1_support:1;	/* Low power state D1 is supported */
  298 	unsigned int	d2_support:1;	/* Low power state D2 is supported */
  299 	unsigned int	no_d1d2:1;	/* D1 and D2 are forbidden */
  300 	unsigned int	no_d3cold:1;	/* D3cold is forbidden */
  301 	unsigned int	bridge_d3:1;	/* Allow D3 for bridge */
  302 	unsigned int	d3cold_allowed:1;	/* D3cold is allowed by user */
  303 	unsigned int	mmio_always_on:1;	/* disallow turning off io/mem
  304 						   decoding during bar sizing */
  305 	unsigned int	wakeup_prepared:1;
  306 	unsigned int	runtime_d3cold:1;	/* whether go through runtime
  307 						   D3cold, not set for devices
  308 						   powered on/off by the
  309 						   corresponding bridge */
  310 	unsigned int	ignore_hotplug:1;	/* Ignore hotplug events */
  311 	unsigned int	d3_delay;	/* D3->D0 transition time in ms */
  312 	unsigned int	d3cold_delay;	/* D3cold->D0 transition time in ms */
  313 
  314 #ifdef CONFIG_PCIEASPM
  315 	struct pcie_link_state	*link_state;	/* ASPM link state */
  316 #endif
  317 
  318 	pci_channel_state_t error_state;	/* current connectivity state */
  319 	struct	device	dev;		/* Generic device interface */
  320 
  321 	int		cfg_size;	/* Size of configuration space */
  322 
  323 	/*
  324 	 * Instead of touching interrupt line and base address registers
  325 	 * directly, use the values stored here. They might be different!
  326 	 */
  327 	unsigned int	irq;
  328 	struct cpumask	*irq_affinity;
  329 	struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
  330 
  331 	bool match_driver;		/* Skip attaching driver */
  332 	/* These fields are used by common fixups */
  333 	unsigned int	transparent:1;	/* Subtractive decode PCI bridge */
  334 	unsigned int	multifunction:1;/* Part of multi-function device */
  335 	/* keep track of device state */
  336 	unsigned int	is_added:1;
  337 	unsigned int	is_busmaster:1; /* device is busmaster */
  338 	unsigned int	no_msi:1;	/* device may not use msi */
  339 	unsigned int	no_64bit_msi:1; /* device may only use 32-bit MSIs */
  340 	unsigned int	block_cfg_access:1;	/* config space access is blocked */
  341 	unsigned int	broken_parity_status:1;	/* Device generates false positive parity */
  342 	unsigned int	irq_reroute_variant:2;	/* device needs IRQ rerouting variant */
  343 	unsigned int	msi_enabled:1;
  344 	unsigned int	msix_enabled:1;
  345 	unsigned int	ari_enabled:1;	/* ARI forwarding */
  346 	unsigned int	ats_enabled:1;	/* Address Translation Service */
  347 	unsigned int	is_managed:1;
  348 	unsigned int    needs_freset:1; /* Dev requires fundamental reset */
  349 	unsigned int	state_saved:1;
  350 	unsigned int	is_physfn:1;
  351 	unsigned int	is_virtfn:1;
  352 	unsigned int	reset_fn:1;
  353 	unsigned int    is_hotplug_bridge:1;
  354 	unsigned int    __aer_firmware_first_valid:1;
  355 	unsigned int	__aer_firmware_first:1;
  356 	unsigned int	broken_intx_masking:1;
  357 	unsigned int	io_window_1k:1;	/* Intel P2P bridge 1K I/O windows */
  358 	unsigned int	irq_managed:1;
  359 	unsigned int	has_secondary_link:1;
  360 	unsigned int	non_compliant_bars:1;	/* broken BARs; ignore them */
  361 	pci_dev_flags_t dev_flags;
  362 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
  363 
  364 	u32		saved_config_space[16]; /* config space saved at suspend time */
  365 	struct hlist_head saved_cap_space;
  366 	struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
  367 	int rom_attr_enabled;		/* has display of the rom attribute been enabled? */
  368 	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
  369 	struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
  370 #ifdef CONFIG_PCI_MSI
  371 	const struct attribute_group **msi_irq_groups;
  372 #endif
  373 	struct pci_vpd *vpd;
  374 #ifdef CONFIG_PCI_ATS
  375 	union {
  376 		struct pci_sriov *sriov;	/* SR-IOV capability related */
  377 		struct pci_dev *physfn;	/* the PF this VF is associated with */
  378 	};
  379 	u16		ats_cap;	/* ATS Capability offset */
  380 	u8		ats_stu;	/* ATS Smallest Translation Unit */
  381 	atomic_t	ats_ref_cnt;	/* number of VFs with ATS enabled */
  382 #endif
  383 	phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
  384 	size_t romlen; /* Length of ROM if it's not from the BAR */
  385 	char *driver_override; /* Driver name to force a match */
  386 };
  387 
  388 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
  389 {
  390 #ifdef CONFIG_PCI_IOV
  391 	if (dev->is_virtfn)
  392 		dev = dev->physfn;
  393 #endif
  394 	return dev;
  395 }
  396 
  397 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
  398 
  399 #define	to_pci_dev(n) container_of(n, struct pci_dev, dev)
  400 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
  401 
  402 static inline int pci_channel_offline(struct pci_dev *pdev)
  403 {
  404 	return (pdev->error_state != pci_channel_io_normal);
  405 }
  406 
  407 struct pci_host_bridge {
  408 	struct device dev;
  409 	struct pci_bus *bus;		/* root bus */
  410 	struct list_head windows;	/* resource_entry */
  411 	void (*release_fn)(struct pci_host_bridge *);
  412 	void *release_data;
  413 	unsigned int ignore_reset_delay:1;	/* for entire hierarchy */
  414 	/* Resource alignment requirements */
  415 	resource_size_t (*align_resource)(struct pci_dev *dev,
  416 			const struct resource *res,
  417 			resource_size_t start,
  418 			resource_size_t size,
  419 			resource_size_t align);
  420 };
  421 
  422 #define	to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
  423 
  424 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
  425 
  426 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
  427 		     void (*release_fn)(struct pci_host_bridge *),
  428 		     void *release_data);
  429 
  430 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
  431 
  432 /*
  433  * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
  434  * to P2P or CardBus bridge windows) go in a table.  Additional ones (for
  435  * buses below host bridges or subtractive decode bridges) go in the list.
  436  * Use pci_bus_for_each_resource() to iterate through all the resources.
  437  */
  438 
  439 /*
  440  * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
  441  * and there's no way to program the bridge with the details of the window.
  442  * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
  443  * decode bit set, because they are explicit and can be programmed with _SRS.
  444  */
  445 #define PCI_SUBTRACTIVE_DECODE	0x1
  446 
  447 struct pci_bus_resource {
  448 	struct list_head list;
  449 	struct resource *res;
  450 	unsigned int flags;
  451 };
  452 
  453 #define PCI_REGION_FLAG_MASK	0x0fU	/* These bits of resource flags tell us the PCI region flags */
  454 
  455 struct pci_bus {
  456 	struct list_head node;		/* node in list of buses */
  457 	struct pci_bus	*parent;	/* parent bus this bridge is on */
  458 	struct list_head children;	/* list of child buses */
  459 	struct list_head devices;	/* list of devices on this bus */
  460 	struct pci_dev	*self;		/* bridge device as seen by parent */
  461 	struct list_head slots;		/* list of slots on this bus;
  462 					   protected by pci_slot_mutex */
  463 	struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
  464 	struct list_head resources;	/* address space routed to this bus */
  465 	struct resource busn_res;	/* bus numbers routed to this bus */
  466 
  467 	struct pci_ops	*ops;		/* configuration access functions */
  468 	struct msi_controller *msi;	/* MSI controller */
  469 	void		*sysdata;	/* hook for sys-specific extension */
  470 	struct proc_dir_entry *procdir;	/* directory entry in /proc/bus/pci */
  471 
  472 	unsigned char	number;		/* bus number */
  473 	unsigned char	primary;	/* number of primary bridge */
  474 	unsigned char	max_bus_speed;	/* enum pci_bus_speed */
  475 	unsigned char	cur_bus_speed;	/* enum pci_bus_speed */
  476 #ifdef CONFIG_PCI_DOMAINS_GENERIC
  477 	int		domain_nr;
  478 #endif
  479 
  480 	char		name[48];
  481 
  482 	unsigned short  bridge_ctl;	/* manage NO_ISA/FBB/et al behaviors */
  483 	pci_bus_flags_t bus_flags;	/* inherited by child buses */
  484 	struct device		*bridge;
  485 	struct device		dev;
  486 	struct bin_attribute	*legacy_io; /* legacy I/O for this bus */
  487 	struct bin_attribute	*legacy_mem; /* legacy mem */
  488 	unsigned int		is_added:1;
  489 };
  490 
  491 #define to_pci_bus(n)	container_of(n, struct pci_bus, dev)
  492 
  493 /*
  494  * Returns true if the PCI bus is root (behind host-PCI bridge),
  495  * false otherwise
  496  *
  497  * Some code assumes that "bus->self == NULL" means that bus is a root bus.
  498  * This is incorrect because "virtual" buses added for SR-IOV (via
  499  * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
  500  */
  501 static inline bool pci_is_root_bus(struct pci_bus *pbus)
  502 {
  503 	return !(pbus->parent);
  504 }
  505 
  506 /**
  507  * pci_is_bridge - check if the PCI device is a bridge
  508  * @dev: PCI device
  509  *
  510  * Return true if the PCI device is bridge whether it has subordinate
  511  * or not.
  512  */
  513 static inline bool pci_is_bridge(struct pci_dev *dev)
  514 {
  515 	return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
  516 		dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
  517 }
  518 
  519 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
  520 {
  521 	dev = pci_physfn(dev);
  522 	if (pci_is_root_bus(dev->bus))
  523 		return NULL;
  524 
  525 	return dev->bus->self;
  526 }
  527 
  528 struct device *pci_get_host_bridge_device(struct pci_dev *dev);
  529 void pci_put_host_bridge_device(struct device *dev);
  530 
  531 #ifdef CONFIG_PCI_MSI
  532 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
  533 {
  534 	return pci_dev->msi_enabled || pci_dev->msix_enabled;
  535 }
  536 #else
  537 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
  538 #endif
  539 
  540 /*
  541  * Error values that may be returned by PCI functions.
  542  */
  543 #define PCIBIOS_SUCCESSFUL		0x00
  544 #define PCIBIOS_FUNC_NOT_SUPPORTED	0x81
  545 #define PCIBIOS_BAD_VENDOR_ID		0x83
  546 #define PCIBIOS_DEVICE_NOT_FOUND	0x86
  547 #define PCIBIOS_BAD_REGISTER_NUMBER	0x87
  548 #define PCIBIOS_SET_FAILED		0x88
  549 #define PCIBIOS_BUFFER_TOO_SMALL	0x89
  550 
  551 /*
  552  * Translate above to generic errno for passing back through non-PCI code.
  553  */
  554 static inline int pcibios_err_to_errno(int err)
  555 {
  556 	if (err <= PCIBIOS_SUCCESSFUL)
  557 		return err; /* Assume already errno */
  558 
  559 	switch (err) {
  560 	case PCIBIOS_FUNC_NOT_SUPPORTED:
  561 		return -ENOENT;
  562 	case PCIBIOS_BAD_VENDOR_ID:
  563 		return -ENOTTY;
  564 	case PCIBIOS_DEVICE_NOT_FOUND:
  565 		return -ENODEV;
  566 	case PCIBIOS_BAD_REGISTER_NUMBER:
  567 		return -EFAULT;
  568 	case PCIBIOS_SET_FAILED:
  569 		return -EIO;
  570 	case PCIBIOS_BUFFER_TOO_SMALL:
  571 		return -ENOSPC;
  572 	}
  573 
  574 	return -ERANGE;
  575 }
  576 
  577 /* Low-level architecture-dependent routines */
  578 
  579 struct pci_ops {
  580 	int (*add_bus)(struct pci_bus *bus);
  581 	void (*remove_bus)(struct pci_bus *bus);
  582 	void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
  583 	int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
  584 	int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
  585 };
  586 
  587 /*
  588  * ACPI needs to be able to access PCI config space before we've done a
  589  * PCI bus scan and created pci_bus structures.
  590  */
  591 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
  592 		 int reg, int len, u32 *val);
  593 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
  594 		  int reg, int len, u32 val);
  595 
  596 #ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
  597 typedef u64 pci_bus_addr_t;
  598 #else
  599 typedef u32 pci_bus_addr_t;
  600 #endif
  601 
  602 struct pci_bus_region {
  603 	pci_bus_addr_t start;
  604 	pci_bus_addr_t end;
  605 };
  606 
  607 struct pci_dynids {
  608 	spinlock_t lock;            /* protects list, index */
  609 	struct list_head list;      /* for IDs added at runtime */
  610 };
  611 
  612 
  613 /*
  614  * PCI Error Recovery System (PCI-ERS).  If a PCI device driver provides
  615  * a set of callbacks in struct pci_error_handlers, that device driver
  616  * will be notified of PCI bus errors, and will be driven to recovery
  617  * when an error occurs.
  618  */
  619 
  620 typedef unsigned int __bitwise pci_ers_result_t;
  621 
  622 enum pci_ers_result {
  623 	/* no result/none/not supported in device driver */
  624 	PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
  625 
  626 	/* Device driver can recover without slot reset */
  627 	PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
  628 
  629 	/* Device driver wants slot to be reset. */
  630 	PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
  631 
  632 	/* Device has completely failed, is unrecoverable */
  633 	PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
  634 
  635 	/* Device driver is fully recovered and operational */
  636 	PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
  637 
  638 	/* No AER capabilities registered for the driver */
  639 	PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
  640 };
  641 
  642 /* PCI bus error event callbacks */
  643 struct pci_error_handlers {
  644 	/* PCI bus error detected on this device */
  645 	pci_ers_result_t (*error_detected)(struct pci_dev *dev,
  646 					   enum pci_channel_state error);
  647 
  648 	/* MMIO has been re-enabled, but not DMA */
  649 	pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
  650 
  651 	/* PCI Express link has been reset */
  652 	pci_ers_result_t (*link_reset)(struct pci_dev *dev);
  653 
  654 	/* PCI slot has been reset */
  655 	pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
  656 
  657 	/* PCI function reset prepare or completed */
  658 	void (*reset_notify)(struct pci_dev *dev, bool prepare);
  659 
  660 	/* Device driver may resume normal operations */
  661 	void (*resume)(struct pci_dev *dev);
  662 };
  663 
  664 
  665 struct module;
  666 struct pci_driver {
  667 	struct list_head node;
  668 	const char *name;
  669 	const struct pci_device_id *id_table;	/* must be non-NULL for probe to be called */
  670 	int  (*probe)  (struct pci_dev *dev, const struct pci_device_id *id);	/* New device inserted */
  671 	void (*remove) (struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug capable driver) */
  672 	int  (*suspend) (struct pci_dev *dev, pm_message_t state);	/* Device suspended */
  673 	int  (*suspend_late) (struct pci_dev *dev, pm_message_t state);
  674 	int  (*resume_early) (struct pci_dev *dev);
  675 	int  (*resume) (struct pci_dev *dev);	                /* Device woken up */
  676 	void (*shutdown) (struct pci_dev *dev);
  677 	int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */
  678 	const struct pci_error_handlers *err_handler;
  679 	struct device_driver	driver;
  680 	struct pci_dynids dynids;
  681 };
  682 
  683 #define	to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
  684 
  685 /**
  686  * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
  687  * @_table: device table name
  688  *
  689  * This macro is deprecated and should not be used in new code.
  690  */
  691 #define DEFINE_PCI_DEVICE_TABLE(_table) \
  692 	const struct pci_device_id _table[]
  693 
  694 /**
  695  * PCI_DEVICE - macro used to describe a specific pci device
  696  * @vend: the 16 bit PCI Vendor ID
  697  * @dev: the 16 bit PCI Device ID
  698  *
  699  * This macro is used to create a struct pci_device_id that matches a
  700  * specific device.  The subvendor and subdevice fields will be set to
  701  * PCI_ANY_ID.
  702  */
  703 #define PCI_DEVICE(vend,dev) \
  704 	.vendor = (vend), .device = (dev), \
  705 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
  706 
  707 /**
  708  * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem
  709  * @vend: the 16 bit PCI Vendor ID
  710  * @dev: the 16 bit PCI Device ID
  711  * @subvend: the 16 bit PCI Subvendor ID
  712  * @subdev: the 16 bit PCI Subdevice ID
  713  *
  714  * This macro is used to create a struct pci_device_id that matches a
  715  * specific device with subsystem information.
  716  */
  717 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
  718 	.vendor = (vend), .device = (dev), \
  719 	.subvendor = (subvend), .subdevice = (subdev)
  720 
  721 /**
  722  * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
  723  * @dev_class: the class, subclass, prog-if triple for this device
  724  * @dev_class_mask: the class mask for this device
  725  *
  726  * This macro is used to create a struct pci_device_id that matches a
  727  * specific PCI class.  The vendor, device, subvendor, and subdevice
  728  * fields will be set to PCI_ANY_ID.
  729  */
  730 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
  731 	.class = (dev_class), .class_mask = (dev_class_mask), \
  732 	.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
  733 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
  734 
  735 /**
  736  * PCI_VDEVICE - macro used to describe a specific pci device in short form
  737  * @vend: the vendor name
  738  * @dev: the 16 bit PCI Device ID
  739  *
  740  * This macro is used to create a struct pci_device_id that matches a
  741  * specific PCI device.  The subvendor, and subdevice fields will be set
  742  * to PCI_ANY_ID. The macro allows the next field to follow as the device
  743  * private data.
  744  */
  745 
  746 #define PCI_VDEVICE(vend, dev) \
  747 	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
  748 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
  749 
  750 enum {
  751 	PCI_REASSIGN_ALL_RSRC	= 0x00000001,	/* ignore firmware setup */
  752 	PCI_REASSIGN_ALL_BUS	= 0x00000002,	/* reassign all bus numbers */
  753 	PCI_PROBE_ONLY		= 0x00000004,	/* use existing setup */
  754 	PCI_CAN_SKIP_ISA_ALIGN	= 0x00000008,	/* don't do ISA alignment */
  755 	PCI_ENABLE_PROC_DOMAINS	= 0x00000010,	/* enable domains in /proc */
  756 	PCI_COMPAT_DOMAIN_0	= 0x00000020,	/* ... except domain 0 */
  757 	PCI_SCAN_ALL_PCIE_DEVS	= 0x00000040,	/* scan all, not just dev 0 */
  758 };
  759 
  760 /* these external functions are only available when PCI support is enabled */
  761 #ifdef CONFIG_PCI
  762 
  763 extern unsigned int pci_flags;
  764 
  765 static inline void pci_set_flags(int flags) { pci_flags = flags; }
  766 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
  767 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
  768 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
  769 
  770 void pcie_bus_configure_settings(struct pci_bus *bus);
  771 
  772 enum pcie_bus_config_types {
  773 	PCIE_BUS_TUNE_OFF,	/* don't touch MPS at all */
  774 	PCIE_BUS_DEFAULT,	/* ensure MPS matches upstream bridge */
  775 	PCIE_BUS_SAFE,		/* use largest MPS boot-time devices support */
  776 	PCIE_BUS_PERFORMANCE,	/* use MPS and MRRS for best performance */
  777 	PCIE_BUS_PEER2PEER,	/* set MPS = 128 for all devices */
  778 };
  779 
  780 extern enum pcie_bus_config_types pcie_bus_config;
  781 
  782 extern struct bus_type pci_bus_type;
  783 
  784 /* Do NOT directly access these two variables, unless you are arch-specific PCI
  785  * code, or PCI core code. */
  786 extern struct list_head pci_root_buses;	/* list of all known PCI buses */
  787 /* Some device drivers need know if PCI is initiated */
  788 int no_pci_devices(void);
  789 
  790 void pcibios_resource_survey_bus(struct pci_bus *bus);
  791 void pcibios_bus_add_device(struct pci_dev *pdev);
  792 void pcibios_add_bus(struct pci_bus *bus);
  793 void pcibios_remove_bus(struct pci_bus *bus);
  794 void pcibios_fixup_bus(struct pci_bus *);
  795 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
  796 /* Architecture-specific versions may override this (weak) */
  797 char *pcibios_setup(char *str);
  798 
  799 /* Used only when drivers/pci/setup.c is used */
  800 resource_size_t pcibios_align_resource(void *, const struct resource *,
  801 				resource_size_t,
  802 				resource_size_t);
  803 void pcibios_update_irq(struct pci_dev *, int irq);
  804 
  805 /* Weak but can be overriden by arch */
  806 void pci_fixup_cardbus(struct pci_bus *);
  807 
  808 /* Generic PCI functions used internally */
  809 
  810 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
  811 			     struct resource *res);
  812 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
  813 			     struct pci_bus_region *region);
  814 void pcibios_scan_specific_bus(int busn);
  815 struct pci_bus *pci_find_bus(int domain, int busnr);
  816 void pci_bus_add_devices(const struct pci_bus *bus);
  817 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
  818 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
  819 				    struct pci_ops *ops, void *sysdata,
  820 				    struct list_head *resources);
  821 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
  822 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
  823 void pci_bus_release_busn_res(struct pci_bus *b);
  824 struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
  825 				      struct pci_ops *ops, void *sysdata,
  826 				      struct list_head *resources,
  827 				      struct msi_controller *msi);
  828 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
  829 					     struct pci_ops *ops, void *sysdata,
  830 					     struct list_head *resources);
  831 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
  832 				int busnr);
  833 void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
  834 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
  835 				 const char *name,
  836 				 struct hotplug_slot *hotplug);
  837 void pci_destroy_slot(struct pci_slot *slot);
  838 #ifdef CONFIG_SYSFS
  839 void pci_dev_assign_slot(struct pci_dev *dev);
  840 #else
  841 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
  842 #endif
  843 int pci_scan_slot(struct pci_bus *bus, int devfn);
  844 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
  845 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
  846 unsigned int pci_scan_child_bus(struct pci_bus *bus);
  847 void pci_bus_add_device(struct pci_dev *dev);
  848 void pci_read_bridge_bases(struct pci_bus *child);
  849 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
  850 					  struct resource *res);
  851 struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev);
  852 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
  853 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
  854 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
  855 struct pci_dev *pci_dev_get(struct pci_dev *dev);
  856 void pci_dev_put(struct pci_dev *dev);
  857 void pci_remove_bus(struct pci_bus *b);
  858 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
  859 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
  860 void pci_stop_root_bus(struct pci_bus *bus);
  861 void pci_remove_root_bus(struct pci_bus *bus);
  862 void pci_setup_cardbus(struct pci_bus *bus);
  863 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
  864 void pci_sort_breadthfirst(void);
  865 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
  866 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
  867 #define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0))
  868 
  869 /* Generic PCI functions exported to card drivers */
  870 
  871 enum pci_lost_interrupt_reason {
  872 	PCI_LOST_IRQ_NO_INFORMATION = 0,
  873 	PCI_LOST_IRQ_DISABLE_MSI,
  874 	PCI_LOST_IRQ_DISABLE_MSIX,
  875 	PCI_LOST_IRQ_DISABLE_ACPI,
  876 };
  877 enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev);
  878 int pci_find_capability(struct pci_dev *dev, int cap);
  879 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
  880 int pci_find_ext_capability(struct pci_dev *dev, int cap);
  881 int pci_find_next_ext_capability(struct pci_dev *dev, int pos, int cap);
  882 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
  883 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
  884 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
  885 
  886 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
  887 				struct pci_dev *from);
  888 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
  889 				unsigned int ss_vendor, unsigned int ss_device,
  890 				struct pci_dev *from);
  891 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
  892 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
  893 					    unsigned int devfn);
  894 static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
  895 						   unsigned int devfn)
  896 {
  897 	return pci_get_domain_bus_and_slot(0, bus, devfn);
  898 }
  899 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
  900 int pci_dev_present(const struct pci_device_id *ids);
  901 
  902 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
  903 			     int where, u8 *val);
  904 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
  905 			     int where, u16 *val);
  906 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
  907 			      int where, u32 *val);
  908 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
  909 			      int where, u8 val);
  910 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
  911 			      int where, u16 val);
  912 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
  913 			       int where, u32 val);
  914 
  915 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
  916 			    int where, int size, u32 *val);
  917 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
  918 			    int where, int size, u32 val);
  919 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
  920 			      int where, int size, u32 *val);
  921 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
  922 			       int where, int size, u32 val);
  923 
  924 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
  925 
  926 static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
  927 {
  928 	return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
  929 }
  930 static inline int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
  931 {
  932 	return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
  933 }
  934 static inline int pci_read_config_dword(const struct pci_dev *dev, int where,
  935 					u32 *val)
  936 {
  937 	return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
  938 }
  939 static inline int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
  940 {
  941 	return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
  942 }
  943 static inline int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
  944 {
  945 	return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
  946 }
  947 static inline int pci_write_config_dword(const struct pci_dev *dev, int where,
  948 					 u32 val)
  949 {
  950 	return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
  951 }
  952 
  953 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
  954 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
  955 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
  956 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
  957 int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
  958 				       u16 clear, u16 set);
  959 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
  960 					u32 clear, u32 set);
  961 
  962 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
  963 					   u16 set)
  964 {
  965 	return pcie_capability_clear_and_set_word(dev, pos, 0, set);
  966 }
  967 
  968 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
  969 					    u32 set)
  970 {
  971 	return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
  972 }
  973 
  974 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
  975 					     u16 clear)
  976 {
  977 	return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
  978 }
  979 
  980 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
  981 					      u32 clear)
  982 {
  983 	return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
  984 }
  985 
  986 /* user-space driven config access */
  987 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
  988 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
  989 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
  990 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
  991 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
  992 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
  993 
  994 int __must_check pci_enable_device(struct pci_dev *dev);
  995 int __must_check pci_enable_device_io(struct pci_dev *dev);
  996 int __must_check pci_enable_device_mem(struct pci_dev *dev);
  997 int __must_check pci_reenable_device(struct pci_dev *);
  998 int __must_check pcim_enable_device(struct pci_dev *pdev);
  999 void pcim_pin_device(struct pci_dev *pdev);
 1000 
 1001 static inline int pci_is_enabled(struct pci_dev *pdev)
 1002 {
 1003 	return (atomic_read(&pdev->enable_cnt) > 0);
 1004 }
 1005 
 1006 static inline int pci_is_managed(struct pci_dev *pdev)
 1007 {
 1008 	return pdev->is_managed;
 1009 }
 1010 
 1011 void pci_disable_device(struct pci_dev *dev);
 1012 
 1013 extern unsigned int pcibios_max_latency;
 1014 void pci_set_master(struct pci_dev *dev);
 1015 void pci_clear_master(struct pci_dev *dev);
 1016 
 1017 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
 1018 int pci_set_cacheline_size(struct pci_dev *dev);
 1019 #define HAVE_PCI_SET_MWI
 1020 int __must_check pci_set_mwi(struct pci_dev *dev);
 1021 int pci_try_set_mwi(struct pci_dev *dev);
 1022 void pci_clear_mwi(struct pci_dev *dev);
 1023 void pci_intx(struct pci_dev *dev, int enable);
 1024 bool pci_intx_mask_supported(struct pci_dev *dev);
 1025 bool pci_check_and_mask_intx(struct pci_dev *dev);
 1026 bool pci_check_and_unmask_intx(struct pci_dev *dev);
 1027 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
 1028 int pci_wait_for_pending_transaction(struct pci_dev *dev);
 1029 int pcix_get_max_mmrbc(struct pci_dev *dev);
 1030 int pcix_get_mmrbc(struct pci_dev *dev);
 1031 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
 1032 int pcie_get_readrq(struct pci_dev *dev);
 1033 int pcie_set_readrq(struct pci_dev *dev, int rq);
 1034 int pcie_get_mps(struct pci_dev *dev);
 1035 int pcie_set_mps(struct pci_dev *dev, int mps);
 1036 int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
 1037 			  enum pcie_link_width *width);
 1038 int __pci_reset_function(struct pci_dev *dev);
 1039 int __pci_reset_function_locked(struct pci_dev *dev);
 1040 int pci_reset_function(struct pci_dev *dev);
 1041 int pci_try_reset_function(struct pci_dev *dev);
 1042 int pci_probe_reset_slot(struct pci_slot *slot);
 1043 int pci_reset_slot(struct pci_slot *slot);
 1044 int pci_try_reset_slot(struct pci_slot *slot);
 1045 int pci_probe_reset_bus(struct pci_bus *bus);
 1046 int pci_reset_bus(struct pci_bus *bus);
 1047 int pci_try_reset_bus(struct pci_bus *bus);
 1048 void pci_reset_secondary_bus(struct pci_dev *dev);
 1049 void pcibios_reset_secondary_bus(struct pci_dev *dev);
 1050 void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
 1051 void pci_update_resource(struct pci_dev *dev, int resno);
 1052 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
 1053 int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
 1054 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
 1055 bool pci_device_is_present(struct pci_dev *pdev);
 1056 void pci_ignore_hotplug(struct pci_dev *dev);
 1057 
 1058 /* ROM control related routines */
 1059 int pci_enable_rom(struct pci_dev *pdev);
 1060 void pci_disable_rom(struct pci_dev *pdev);
 1061 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
 1062 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
 1063 size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
 1064 void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
 1065 
 1066 /* Power management related routines */
 1067 int pci_save_state(struct pci_dev *dev);
 1068 void pci_restore_state(struct pci_dev *dev);
 1069 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
 1070 int pci_load_saved_state(struct pci_dev *dev,
 1071 			 struct pci_saved_state *state);
 1072 int pci_load_and_free_saved_state(struct pci_dev *dev,
 1073 				  struct pci_saved_state **state);
 1074 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
 1075 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
 1076 						   u16 cap);
 1077 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
 1078 int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
 1079 				u16 cap, unsigned int size);
 1080 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
 1081 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
 1082 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
 1083 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
 1084 void pci_pme_active(struct pci_dev *dev, bool enable);
 1085 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
 1086 		      bool runtime, bool enable);
 1087 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
 1088 int pci_prepare_to_sleep(struct pci_dev *dev);
 1089 int pci_back_from_sleep(struct pci_dev *dev);
 1090 bool pci_dev_run_wake(struct pci_dev *dev);
 1091 bool pci_check_pme_status(struct pci_dev *dev);
 1092 void pci_pme_wakeup_bus(struct pci_bus *bus);
 1093 void pci_d3cold_enable(struct pci_dev *dev);
 1094 void pci_d3cold_disable(struct pci_dev *dev);
 1095 
 1096 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
 1097 				  bool enable)
 1098 {
 1099 	return __pci_enable_wake(dev, state, false, enable);
 1100 }
 1101 
 1102 /* PCI Virtual Channel */
 1103 int pci_save_vc_state(struct pci_dev *dev);
 1104 void pci_restore_vc_state(struct pci_dev *dev);
 1105 void pci_allocate_vc_save_buffers(struct pci_dev *dev);
 1106 
 1107 /* For use by arch with custom probe code */
 1108 void set_pcie_port_type(struct pci_dev *pdev);
 1109 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
 1110 
 1111 /* Functions for PCI Hotplug drivers to use */
 1112 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
 1113 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
 1114 unsigned int pci_rescan_bus(struct pci_bus *bus);
 1115 void pci_lock_rescan_remove(void);
 1116 void pci_unlock_rescan_remove(void);
 1117 
 1118 /* Vital product data routines */
 1119 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
 1120 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
 1121 int pci_set_vpd_size(struct pci_dev *dev, size_t len);
 1122 
 1123 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
 1124 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
 1125 void pci_bus_assign_resources(const struct pci_bus *bus);
 1126 void pci_bus_claim_resources(struct pci_bus *bus);
 1127 void pci_bus_size_bridges(struct pci_bus *bus);
 1128 int pci_claim_resource(struct pci_dev *, int);
 1129 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
 1130 void pci_assign_unassigned_resources(void);
 1131 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
 1132 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
 1133 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
 1134 void pdev_enable_device(struct pci_dev *);
 1135 int pci_enable_resources(struct pci_dev *, int mask);
 1136 void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
 1137 		    int (*)(const struct pci_dev *, u8, u8));
 1138 #define HAVE_PCI_REQ_REGIONS	2
 1139 int __must_check pci_request_regions(struct pci_dev *, const char *);
 1140 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
 1141 void pci_release_regions(struct pci_dev *);
 1142 int __must_check pci_request_region(struct pci_dev *, int, const char *);
 1143 int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *);
 1144 void pci_release_region(struct pci_dev *, int);
 1145 int pci_request_selected_regions(struct pci_dev *, int, const char *);
 1146 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
 1147 void pci_release_selected_regions(struct pci_dev *, int);
 1148 
 1149 /* drivers/pci/bus.c */
 1150 struct pci_bus *pci_bus_get(struct pci_bus *bus);
 1151 void pci_bus_put(struct pci_bus *bus);
 1152 void pci_add_resource(struct list_head *resources, struct resource *res);
 1153 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
 1154 			     resource_size_t offset);
 1155 void pci_free_resource_list(struct list_head *resources);
 1156 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
 1157 			  unsigned int flags);
 1158 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
 1159 void pci_bus_remove_resources(struct pci_bus *bus);
 1160 int devm_request_pci_bus_resources(struct device *dev,
 1161 				   struct list_head *resources);
 1162 
 1163 #define pci_bus_for_each_resource(bus, res, i)				\
 1164 	for (i = 0;							\
 1165 	    (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
 1166 	     i++)
 1167 
 1168 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
 1169 			struct resource *res, resource_size_t size,
 1170 			resource_size_t align, resource_size_t min,
 1171 			unsigned long type_mask,
 1172 			resource_size_t (*alignf)(void *,
 1173 						  const struct resource *,
 1174 						  resource_size_t,
 1175 						  resource_size_t),
 1176 			void *alignf_data);
 1177 
 1178 
 1179 int pci_register_io_range(phys_addr_t addr, resource_size_t size);
 1180 unsigned long pci_address_to_pio(phys_addr_t addr);
 1181 phys_addr_t pci_pio_to_address(unsigned long pio);
 1182 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
 1183 void pci_unmap_iospace(struct resource *res);
 1184 
 1185 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
 1186 {
 1187 	struct pci_bus_region region;
 1188 
 1189 	pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]);
 1190 	return region.start;
 1191 }
 1192 
 1193 /* Proper probing supporting hot-pluggable devices */
 1194 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
 1195 				       const char *mod_name);
 1196 
 1197 /*
 1198  * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded
 1199  */
 1200 #define pci_register_driver(driver)		\
 1201 	__pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
 1202 
 1203 void pci_unregister_driver(struct pci_driver *dev);
 1204 
 1205 /**
 1206  * module_pci_driver() - Helper macro for registering a PCI driver
 1207  * @__pci_driver: pci_driver struct
 1208  *
 1209  * Helper macro for PCI drivers which do not do anything special in module
 1210  * init/exit. This eliminates a lot of boilerplate. Each module may only
 1211  * use this macro once, and calling it replaces module_init() and module_exit()
 1212  */
 1213 #define module_pci_driver(__pci_driver) \
 1214 	module_driver(__pci_driver, pci_register_driver, \
 1215 		       pci_unregister_driver)
 1216 
 1217 /**
 1218  * builtin_pci_driver() - Helper macro for registering a PCI driver
 1219  * @__pci_driver: pci_driver struct
 1220  *
 1221  * Helper macro for PCI drivers which do not do anything special in their
 1222  * init code. This eliminates a lot of boilerplate. Each driver may only
 1223  * use this macro once, and calling it replaces device_initcall(...)
 1224  */
 1225 #define builtin_pci_driver(__pci_driver) \
 1226 	builtin_driver(__pci_driver, pci_register_driver)
 1227 
 1228 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
 1229 int pci_add_dynid(struct pci_driver *drv,
 1230 		  unsigned int vendor, unsigned int device,
 1231 		  unsigned int subvendor, unsigned int subdevice,
 1232 		  unsigned int class, unsigned int class_mask,
 1233 		  unsigned long driver_data);
 1234 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
 1235 					 struct pci_dev *dev);
 1236 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
 1237 		    int pass);
 1238 
 1239 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
 1240 		  void *userdata);
 1241 int pci_cfg_space_size(struct pci_dev *dev);
 1242 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
 1243 void pci_setup_bridge(struct pci_bus *bus);
 1244 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
 1245 					 unsigned long type);
 1246 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
 1247 
 1248 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
 1249 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
 1250 
 1251 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
 1252 		      unsigned int command_bits, u32 flags);
 1253 
 1254 #define PCI_IRQ_NOLEGACY	(1 << 0) /* don't use legacy interrupts */
 1255 #define PCI_IRQ_NOMSI		(1 << 1) /* don't use MSI interrupts */
 1256 #define PCI_IRQ_NOMSIX		(1 << 2) /* don't use MSI-X interrupts */
 1257 #define PCI_IRQ_NOAFFINITY	(1 << 3) /* don't auto-assign affinity */
 1258 
 1259 /* kmem_cache style wrapper around pci_alloc_consistent() */
 1260 
 1261 #include <linux/pci-dma.h>
 1262 #include <linux/dmapool.h>
 1263 
 1264 #define	pci_pool dma_pool
 1265 #define pci_pool_create(name, pdev, size, align, allocation) \
 1266 		dma_pool_create(name, &pdev->dev, size, align, allocation)
 1267 #define	pci_pool_destroy(pool) dma_pool_destroy(pool)
 1268 #define	pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
 1269 #define	pci_pool_zalloc(pool, flags, handle) \
 1270 		dma_pool_zalloc(pool, flags, handle)
 1271 #define	pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
 1272 
 1273 struct msix_entry {
 1274 	u32	vector;	/* kernel uses to write allocated vector */
 1275 	u16	entry;	/* driver uses to specify entry, OS writes */
 1276 };
 1277 
 1278 #ifdef CONFIG_PCI_MSI
 1279 int pci_msi_vec_count(struct pci_dev *dev);
 1280 void pci_msi_shutdown(struct pci_dev *dev);
 1281 void pci_disable_msi(struct pci_dev *dev);
 1282 int pci_msix_vec_count(struct pci_dev *dev);
 1283 int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec);
 1284 void pci_msix_shutdown(struct pci_dev *dev);
 1285 void pci_disable_msix(struct pci_dev *dev);
 1286 void pci_restore_msi_state(struct pci_dev *dev);
 1287 int pci_msi_enabled(void);
 1288 int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec);
 1289 static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
 1290 {
 1291 	int rc = pci_enable_msi_range(dev, nvec, nvec);
 1292 	if (rc < 0)
 1293 		return rc;
 1294 	return 0;
 1295 }
 1296 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
 1297 			  int minvec, int maxvec);
 1298 static inline int pci_enable_msix_exact(struct pci_dev *dev,
 1299 					struct msix_entry *entries, int nvec)
 1300 {
 1301 	int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
 1302 	if (rc < 0)
 1303 		return rc;
 1304 	return 0;
 1305 }
 1306 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
 1307 		unsigned int max_vecs, unsigned int flags);
 1308 void pci_free_irq_vectors(struct pci_dev *dev);
 1309 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
 1310 
 1311 #else
 1312 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
 1313 static inline void pci_msi_shutdown(struct pci_dev *dev) { }
 1314 static inline void pci_disable_msi(struct pci_dev *dev) { }
 1315 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
 1316 static inline int pci_enable_msix(struct pci_dev *dev,
 1317 				  struct msix_entry *entries, int nvec)
 1318 { return -ENOSYS; }
 1319 static inline void pci_msix_shutdown(struct pci_dev *dev) { }
 1320 static inline void pci_disable_msix(struct pci_dev *dev) { }
 1321 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
 1322 static inline int pci_msi_enabled(void) { return 0; }
 1323 static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec,
 1324 				       int maxvec)
 1325 { return -ENOSYS; }
 1326 static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
 1327 { return -ENOSYS; }
 1328 static inline int pci_enable_msix_range(struct pci_dev *dev,
 1329 		      struct msix_entry *entries, int minvec, int maxvec)
 1330 { return -ENOSYS; }
 1331 static inline int pci_enable_msix_exact(struct pci_dev *dev,
 1332 		      struct msix_entry *entries, int nvec)
 1333 { return -ENOSYS; }
 1334 static inline int pci_alloc_irq_vectors(struct pci_dev *dev,
 1335 		unsigned int min_vecs, unsigned int max_vecs,
 1336 		unsigned int flags)
 1337 {
 1338 	if (min_vecs > 1)
 1339 		return -EINVAL;
 1340 	return 1;
 1341 }
 1342 static inline void pci_free_irq_vectors(struct pci_dev *dev)
 1343 {
 1344 }
 1345 
 1346 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
 1347 {
 1348 	if (WARN_ON_ONCE(nr > 0))
 1349 		return -EINVAL;
 1350 	return dev->irq;
 1351 }
 1352 #endif
 1353 
 1354 #ifdef CONFIG_PCIEPORTBUS
 1355 extern bool pcie_ports_disabled;
 1356 extern bool pcie_ports_auto;
 1357 #else
 1358 #define pcie_ports_disabled	true
 1359 #define pcie_ports_auto		false
 1360 #endif
 1361 
 1362 #ifdef CONFIG_PCIEASPM
 1363 bool pcie_aspm_support_enabled(void);
 1364 #else
 1365 static inline bool pcie_aspm_support_enabled(void) { return false; }
 1366 #endif
 1367 
 1368 #ifdef CONFIG_PCIEAER
 1369 void pci_no_aer(void);
 1370 bool pci_aer_available(void);
 1371 #else
 1372 static inline void pci_no_aer(void) { }
 1373 static inline bool pci_aer_available(void) { return false; }
 1374 #endif
 1375 
 1376 #ifdef CONFIG_PCIE_ECRC
 1377 void pcie_set_ecrc_checking(struct pci_dev *dev);
 1378 void pcie_ecrc_get_policy(char *str);
 1379 #else
 1380 static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
 1381 static inline void pcie_ecrc_get_policy(char *str) { }
 1382 #endif
 1383 
 1384 #define pci_enable_msi(pdev)	pci_enable_msi_exact(pdev, 1)
 1385 
 1386 #ifdef CONFIG_HT_IRQ
 1387 /* The functions a driver should call */
 1388 int  ht_create_irq(struct pci_dev *dev, int idx);
 1389 void ht_destroy_irq(unsigned int irq);
 1390 #endif /* CONFIG_HT_IRQ */
 1391 
 1392 #ifdef CONFIG_PCI_ATS
 1393 /* Address Translation Service */
 1394 void pci_ats_init(struct pci_dev *dev);
 1395 int pci_enable_ats(struct pci_dev *dev, int ps);
 1396 void pci_disable_ats(struct pci_dev *dev);
 1397 int pci_ats_queue_depth(struct pci_dev *dev);
 1398 #else
 1399 static inline void pci_ats_init(struct pci_dev *d) { }
 1400 static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
 1401 static inline void pci_disable_ats(struct pci_dev *d) { }
 1402 static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
 1403 #endif
 1404 
 1405 void pci_cfg_access_lock(struct pci_dev *dev);
 1406 bool pci_cfg_access_trylock(struct pci_dev *dev);
 1407 void pci_cfg_access_unlock(struct pci_dev *dev);
 1408 
 1409 /*
 1410  * PCI domain support.  Sometimes called PCI segment (eg by ACPI),
 1411  * a PCI domain is defined to be a set of PCI buses which share
 1412  * configuration space.
 1413  */
 1414 #ifdef CONFIG_PCI_DOMAINS
 1415 extern int pci_domains_supported;
 1416 int pci_get_new_domain_nr(void);
 1417 #else
 1418 enum { pci_domains_supported = 0 };
 1419 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
 1420 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
 1421 static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
 1422 #endif /* CONFIG_PCI_DOMAINS */
 1423 
 1424 /*
 1425  * Generic implementation for PCI domain support. If your
 1426  * architecture does not need custom management of PCI
 1427  * domains then this implementation will be used
 1428  */
 1429 #ifdef CONFIG_PCI_DOMAINS_GENERIC
 1430 static inline int pci_domain_nr(struct pci_bus *bus)
 1431 {
 1432 	return bus->domain_nr;
 1433 }
 1434 #ifdef CONFIG_ACPI
 1435 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
 1436 #else
 1437 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
 1438 { return 0; }
 1439 #endif
 1440 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
 1441 #endif
 1442 
 1443 /* some architectures require additional setup to direct VGA traffic */
 1444 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
 1445 		      unsigned int command_bits, u32 flags);
 1446 void pci_register_set_vga_state(arch_set_vga_state_t func);
 1447 
 1448 static inline int
 1449 pci_request_io_regions(struct pci_dev *pdev, const char *name)
 1450 {
 1451 	return pci_request_selected_regions(pdev,
 1452 			    pci_select_bars(pdev, IORESOURCE_IO), name);
 1453 }
 1454 
 1455 static inline void
 1456 pci_release_io_regions(struct pci_dev *pdev)
 1457 {
 1458 	return pci_release_selected_regions(pdev,
 1459 			    pci_select_bars(pdev, IORESOURCE_IO));
 1460 }
 1461 
 1462 static inline int
 1463 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
 1464 {
 1465 	return pci_request_selected_regions(pdev,
 1466 			    pci_select_bars(pdev, IORESOURCE_MEM), name);
 1467 }
 1468 
 1469 static inline void
 1470 pci_release_mem_regions(struct pci_dev *pdev)
 1471 {
 1472 	return pci_release_selected_regions(pdev,
 1473 			    pci_select_bars(pdev, IORESOURCE_MEM));
 1474 }
 1475 
 1476 #else /* CONFIG_PCI is not enabled */
 1477 
 1478 static inline void pci_set_flags(int flags) { }
 1479 static inline void pci_add_flags(int flags) { }
 1480 static inline void pci_clear_flags(int flags) { }
 1481 static inline int pci_has_flag(int flag) { return 0; }
 1482 
 1483 /*
 1484  *  If the system does not have PCI, clearly these return errors.  Define
 1485  *  these as simple inline functions to avoid hair in drivers.
 1486  */
 1487 
 1488 #define _PCI_NOP(o, s, t) \
 1489 	static inline int pci_##o##_config_##s(struct pci_dev *dev, \
 1490 						int where, t val) \
 1491 		{ return PCIBIOS_FUNC_NOT_SUPPORTED; }
 1492 
 1493 #define _PCI_NOP_ALL(o, x)	_PCI_NOP(o, byte, u8 x) \
 1494 				_PCI_NOP(o, word, u16 x) \
 1495 				_PCI_NOP(o, dword, u32 x)
 1496 _PCI_NOP_ALL(read, *)
 1497 _PCI_NOP_ALL(write,)
 1498 
 1499 static inline struct pci_dev *pci_get_device(unsigned int vendor,
 1500 					     unsigned int device,
 1501 					     struct pci_dev *from)
 1502 { return NULL; }
 1503 
 1504 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
 1505 					     unsigned int device,
 1506 					     unsigned int ss_vendor,
 1507 					     unsigned int ss_device,
 1508 					     struct pci_dev *from)
 1509 { return NULL; }
 1510 
 1511 static inline struct pci_dev *pci_get_class(unsigned int class,
 1512 					    struct pci_dev *from)
 1513 { return NULL; }
 1514 
 1515 #define pci_dev_present(ids)	(0)
 1516 #define no_pci_devices()	(1)
 1517 #define pci_dev_put(dev)	do { } while (0)
 1518 
 1519 static inline void pci_set_master(struct pci_dev *dev) { }
 1520 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
 1521 static inline void pci_disable_device(struct pci_dev *dev) { }
 1522 static inline int pci_assign_resource(struct pci_dev *dev, int i)
 1523 { return -EBUSY; }
 1524 static inline int __pci_register_driver(struct pci_driver *drv,
 1525 					struct module *owner)
 1526 { return 0; }
 1527 static inline int pci_register_driver(struct pci_driver *drv)
 1528 { return 0; }
 1529 static inline void pci_unregister_driver(struct pci_driver *drv) { }
 1530 static inline int pci_find_capability(struct pci_dev *dev, int cap)
 1531 { return 0; }
 1532 static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
 1533 					   int cap)
 1534 { return 0; }
 1535 static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
 1536 { return 0; }
 1537 
 1538 /* Power management related routines */
 1539 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
 1540 static inline void pci_restore_state(struct pci_dev *dev) { }
 1541 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 1542 { return 0; }
 1543 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
 1544 { return 0; }
 1545 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
 1546 					   pm_message_t state)
 1547 { return PCI_D0; }
 1548 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
 1549 				  int enable)
 1550 { return 0; }
 1551 
 1552 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
 1553 { return -EIO; }
 1554 static inline void pci_release_regions(struct pci_dev *dev) { }
 1555 
 1556 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
 1557 
 1558 static inline void pci_block_cfg_access(struct pci_dev *dev) { }
 1559 static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
 1560 { return 0; }
 1561 static inline void pci_unblock_cfg_access(struct pci_dev *dev) { }
 1562 
 1563 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
 1564 { return NULL; }
 1565 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
 1566 						unsigned int devfn)
 1567 { return NULL; }
 1568 static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
 1569 						unsigned int devfn)
 1570 { return NULL; }
 1571 
 1572 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
 1573 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
 1574 static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
 1575 
 1576 #define dev_is_pci(d) (false)
 1577 #define dev_is_pf(d) (false)
 1578 #define dev_num_vf(d) (0)
 1579 #endif /* CONFIG_PCI */
 1580 
 1581 /* Include architecture-dependent settings and functions */
 1582 
 1583 #include <asm/pci.h>
 1584 
 1585 #ifndef pci_root_bus_fwnode
 1586 #define pci_root_bus_fwnode(bus)	NULL
 1587 #endif
 1588 
 1589 /* these helpers provide future and backwards compatibility
 1590  * for accessing popular PCI BAR info */
 1591 #define pci_resource_start(dev, bar)	((dev)->resource[(bar)].start)
 1592 #define pci_resource_end(dev, bar)	((dev)->resource[(bar)].end)
 1593 #define pci_resource_flags(dev, bar)	((dev)->resource[(bar)].flags)
 1594 #define pci_resource_len(dev,bar) \
 1595 	((pci_resource_start((dev), (bar)) == 0 &&	\
 1596 	  pci_resource_end((dev), (bar)) ==		\
 1597 	  pci_resource_start((dev), (bar))) ? 0 :	\
 1598 							\
 1599 	 (pci_resource_end((dev), (bar)) -		\
 1600 	  pci_resource_start((dev), (bar)) + 1))
 1601 
 1602 /* Similar to the helpers above, these manipulate per-pci_dev
 1603  * driver-specific data.  They are really just a wrapper around
 1604  * the generic device structure functions of these calls.
 1605  */
 1606 static inline void *pci_get_drvdata(struct pci_dev *pdev)
 1607 {
 1608 	return dev_get_drvdata(&pdev->dev);
 1609 }
 1610 
 1611 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
 1612 {
 1613 	dev_set_drvdata(&pdev->dev, data);
 1614 }
 1615 
 1616 /* If you want to know what to call your pci_dev, ask this function.
 1617  * Again, it's a wrapper around the generic device.
 1618  */
 1619 static inline const char *pci_name(const struct pci_dev *pdev)
 1620 {
 1621 	return dev_name(&pdev->dev);
 1622 }
 1623 
 1624 
 1625 /* Some archs don't want to expose struct resource to userland as-is
 1626  * in sysfs and /proc
 1627  */
 1628 #ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER
 1629 void pci_resource_to_user(const struct pci_dev *dev, int bar,
 1630 			  const struct resource *rsrc,
 1631 			  resource_size_t *start, resource_size_t *end);
 1632 #else
 1633 static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
 1634 		const struct resource *rsrc, resource_size_t *start,
 1635 		resource_size_t *end)
 1636 {
 1637 	*start = rsrc->start;
 1638 	*end = rsrc->end;
 1639 }
 1640 #endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */
 1641 
 1642 
 1643 /*
 1644  *  The world is not perfect and supplies us with broken PCI devices.
 1645  *  For at least a part of these bugs we need a work-around, so both
 1646  *  generic (drivers/pci/quirks.c) and per-architecture code can define
 1647  *  fixup hooks to be called for particular buggy devices.
 1648  */
 1649 
 1650 struct pci_fixup {
 1651 	u16 vendor;		/* You can use PCI_ANY_ID here of course */
 1652 	u16 device;		/* You can use PCI_ANY_ID here of course */
 1653 	u32 class;		/* You can use PCI_ANY_ID here too */
 1654 	unsigned int class_shift;	/* should be 0, 8, 16 */
 1655 	void (*hook)(struct pci_dev *dev);
 1656 };
 1657 
 1658 enum pci_fixup_pass {
 1659 	pci_fixup_early,	/* Before probing BARs */
 1660 	pci_fixup_header,	/* After reading configuration header */
 1661 	pci_fixup_final,	/* Final phase of device fixups */
 1662 	pci_fixup_enable,	/* pci_enable_device() time */
 1663 	pci_fixup_resume,	/* pci_device_resume() */
 1664 	pci_fixup_suspend,	/* pci_device_suspend() */
 1665 	pci_fixup_resume_early, /* pci_device_resume_early() */
 1666 	pci_fixup_suspend_late,	/* pci_device_suspend_late() */
 1667 };
 1668 
 1669 /* Anonymous variables would be nice... */
 1670 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class,	\
 1671 				  class_shift, hook)			\
 1672 	static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used	\
 1673 	__attribute__((__section__(#section), aligned((sizeof(void *)))))    \
 1674 		= { vendor, device, class, class_shift, hook };
 1675 
 1676 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class,		\
 1677 					 class_shift, hook)		\
 1678 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
 1679 		hook, vendor, device, class, class_shift, hook)
 1680 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class,		\
 1681 					 class_shift, hook)		\
 1682 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
 1683 		hook, vendor, device, class, class_shift, hook)
 1684 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class,		\
 1685 					 class_shift, hook)		\
 1686 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
 1687 		hook, vendor, device, class, class_shift, hook)
 1688 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class,		\
 1689 					 class_shift, hook)		\
 1690 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
 1691 		hook, vendor, device, class, class_shift, hook)
 1692 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class,		\
 1693 					 class_shift, hook)		\
 1694 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
 1695 		resume##hook, vendor, device, class,	\
 1696 		class_shift, hook)
 1697 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class,	\
 1698 					 class_shift, hook)		\
 1699 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
 1700 		resume_early##hook, vendor, device,	\
 1701 		class, class_shift, hook)
 1702 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class,		\
 1703 					 class_shift, hook)		\
 1704 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
 1705 		suspend##hook, vendor, device, class,	\
 1706 		class_shift, hook)
 1707 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class,	\
 1708 					 class_shift, hook)		\
 1709 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
 1710 		suspend_late##hook, vendor, device,	\
 1711 		class, class_shift, hook)
 1712 
 1713 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook)			\
 1714 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
 1715 		hook, vendor, device, PCI_ANY_ID, 0, hook)
 1716 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook)			\
 1717 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
 1718 		hook, vendor, device, PCI_ANY_ID, 0, hook)
 1719 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook)			\
 1720 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
 1721 		hook, vendor, device, PCI_ANY_ID, 0, hook)
 1722 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook)			\
 1723 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
 1724 		hook, vendor, device, PCI_ANY_ID, 0, hook)
 1725 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook)			\
 1726 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
 1727 		resume##hook, vendor, device,		\
 1728 		PCI_ANY_ID, 0, hook)
 1729 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook)		\
 1730 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
 1731 		resume_early##hook, vendor, device,	\
 1732 		PCI_ANY_ID, 0, hook)
 1733 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook)			\
 1734 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
 1735 		suspend##hook, vendor, device,		\
 1736 		PCI_ANY_ID, 0, hook)
 1737 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook)		\
 1738 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
 1739 		suspend_late##hook, vendor, device,	\
 1740 		PCI_ANY_ID, 0, hook)
 1741 
 1742 #ifdef CONFIG_PCI_QUIRKS
 1743 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
 1744 int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
 1745 int pci_dev_specific_enable_acs(struct pci_dev *dev);
 1746 #else
 1747 static inline void pci_fixup_device(enum pci_fixup_pass pass,
 1748 				    struct pci_dev *dev) { }
 1749 static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
 1750 					       u16 acs_flags)
 1751 {
 1752 	return -ENOTTY;
 1753 }
 1754 static inline int pci_dev_specific_enable_acs(struct pci_dev *dev)
 1755 {
 1756 	return -ENOTTY;
 1757 }
 1758 #endif
 1759 
 1760 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
 1761 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
 1762 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
 1763 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
 1764 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
 1765 				   const char *name);
 1766 void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
 1767 
 1768 extern int pci_pci_problems;
 1769 #define PCIPCI_FAIL		1	/* No PCI PCI DMA */
 1770 #define PCIPCI_TRITON		2
 1771 #define PCIPCI_NATOMA		4
 1772 #define PCIPCI_VIAETBF		8
 1773 #define PCIPCI_VSFX		16
 1774 #define PCIPCI_ALIMAGIK		32	/* Need low latency setting */
 1775 #define PCIAGP_FAIL		64	/* No PCI to AGP DMA */
 1776 
 1777 extern unsigned long pci_cardbus_io_size;
 1778 extern unsigned long pci_cardbus_mem_size;
 1779 extern u8 pci_dfl_cache_line_size;
 1780 extern u8 pci_cache_line_size;
 1781 
 1782 extern unsigned long pci_hotplug_io_size;
 1783 extern unsigned long pci_hotplug_mem_size;
 1784 extern unsigned long pci_hotplug_bus_size;
 1785 
 1786 /* Architecture-specific versions may override these (weak) */
 1787 void pcibios_disable_device(struct pci_dev *dev);
 1788 void pcibios_set_master(struct pci_dev *dev);
 1789 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
 1790 				 enum pcie_reset_state state);
 1791 int pcibios_add_device(struct pci_dev *dev);
 1792 void pcibios_release_device(struct pci_dev *dev);
 1793 void pcibios_penalize_isa_irq(int irq, int active);
 1794 int pcibios_alloc_irq(struct pci_dev *dev);
 1795 void pcibios_free_irq(struct pci_dev *dev);
 1796 
 1797 #ifdef CONFIG_HIBERNATE_CALLBACKS
 1798 extern struct dev_pm_ops pcibios_pm_ops;
 1799 #endif
 1800 
 1801 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
 1802 void __init pci_mmcfg_early_init(void);
 1803 void __init pci_mmcfg_late_init(void);
 1804 #else
 1805 static inline void pci_mmcfg_early_init(void) { }
 1806 static inline void pci_mmcfg_late_init(void) { }
 1807 #endif
 1808 
 1809 int pci_ext_cfg_avail(void);
 1810 
 1811 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
 1812 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
 1813 
 1814 #ifdef CONFIG_PCI_IOV
 1815 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
 1816 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
 1817 
 1818 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
 1819 void pci_disable_sriov(struct pci_dev *dev);
 1820 int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset);
 1821 void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset);
 1822 int pci_num_vf(struct pci_dev *dev);
 1823 int pci_vfs_assigned(struct pci_dev *dev);
 1824 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
 1825 int pci_sriov_get_totalvfs(struct pci_dev *dev);
 1826 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
 1827 #else
 1828 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
 1829 {
 1830 	return -ENOSYS;
 1831 }
 1832 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
 1833 {
 1834 	return -ENOSYS;
 1835 }
 1836 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
 1837 { return -ENODEV; }
 1838 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
 1839 {
 1840 	return -ENOSYS;
 1841 }
 1842 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
 1843 					 int id, int reset) { }
 1844 static inline void pci_disable_sriov(struct pci_dev *dev) { }
 1845 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
 1846 static inline int pci_vfs_assigned(struct pci_dev *dev)
 1847 { return 0; }
 1848 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
 1849 { return 0; }
 1850 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
 1851 { return 0; }
 1852 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
 1853 { return 0; }
 1854 #endif
 1855 
 1856 #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
 1857 void pci_hp_create_module_link(struct pci_slot *pci_slot);
 1858 void pci_hp_remove_module_link(struct pci_slot *pci_slot);
 1859 #endif
 1860 
 1861 /**
 1862  * pci_pcie_cap - get the saved PCIe capability offset
 1863  * @dev: PCI device
 1864  *
 1865  * PCIe capability offset is calculated at PCI device initialization
 1866  * time and saved in the data structure. This function returns saved
 1867  * PCIe capability offset. Using this instead of pci_find_capability()
 1868  * reduces unnecessary search in the PCI configuration space. If you
 1869  * need to calculate PCIe capability offset from raw device for some
 1870  * reasons, please use pci_find_capability() instead.
 1871  */
 1872 static inline int pci_pcie_cap(struct pci_dev *dev)
 1873 {
 1874 	return dev->pcie_cap;
 1875 }
 1876 
 1877 /**
 1878  * pci_is_pcie - check if the PCI device is PCI Express capable
 1879  * @dev: PCI device
 1880  *
 1881  * Returns: true if the PCI device is PCI Express capable, false otherwise.
 1882  */
 1883 static inline bool pci_is_pcie(struct pci_dev *dev)
 1884 {
 1885 	return pci_pcie_cap(dev);
 1886 }
 1887 
 1888 /**
 1889  * pcie_caps_reg - get the PCIe Capabilities Register
 1890  * @dev: PCI device
 1891  */
 1892 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
 1893 {
 1894 	return dev->pcie_flags_reg;
 1895 }
 1896 
 1897 /**
 1898  * pci_pcie_type - get the PCIe device/port type
 1899  * @dev: PCI device
 1900  */
 1901 static inline int pci_pcie_type(const struct pci_dev *dev)
 1902 {
 1903 	return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
 1904 }
 1905 
 1906 void pci_request_acs(void);
 1907 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
 1908 bool pci_acs_path_enabled(struct pci_dev *start,
 1909 			  struct pci_dev *end, u16 acs_flags);
 1910 
 1911 #define PCI_VPD_LRDT			0x80	/* Large Resource Data Type */
 1912 #define PCI_VPD_LRDT_ID(x)		((x) | PCI_VPD_LRDT)
 1913 
 1914 /* Large Resource Data Type Tag Item Names */
 1915 #define PCI_VPD_LTIN_ID_STRING		0x02	/* Identifier String */
 1916 #define PCI_VPD_LTIN_RO_DATA		0x10	/* Read-Only Data */
 1917 #define PCI_VPD_LTIN_RW_DATA		0x11	/* Read-Write Data */
 1918 
 1919 #define PCI_VPD_LRDT_ID_STRING		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
 1920 #define PCI_VPD_LRDT_RO_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
 1921 #define PCI_VPD_LRDT_RW_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
 1922 
 1923 /* Small Resource Data Type Tag Item Names */
 1924 #define PCI_VPD_STIN_END		0x0f	/* End */
 1925 
 1926 #define PCI_VPD_SRDT_END		(PCI_VPD_STIN_END << 3)
 1927 
 1928 #define PCI_VPD_SRDT_TIN_MASK		0x78
 1929 #define PCI_VPD_SRDT_LEN_MASK		0x07
 1930 #define PCI_VPD_LRDT_TIN_MASK		0x7f
 1931 
 1932 #define PCI_VPD_LRDT_TAG_SIZE		3
 1933 #define PCI_VPD_SRDT_TAG_SIZE		1
 1934 
 1935 #define PCI_VPD_INFO_FLD_HDR_SIZE	3
 1936 
 1937 #define PCI_VPD_RO_KEYWORD_PARTNO	"PN"
 1938 #define PCI_VPD_RO_KEYWORD_MFR_ID	"MN"
 1939 #define PCI_VPD_RO_KEYWORD_VENDOR0	"V0"
 1940 #define PCI_VPD_RO_KEYWORD_CHKSUM	"RV"
 1941 
 1942 /**
 1943  * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
 1944  * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
 1945  *
 1946  * Returns the extracted Large Resource Data Type length.
 1947  */
 1948 static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
 1949 {
 1950 	return (u16)lrdt[1] + ((u16)lrdt[2] << 8);
 1951 }
 1952 
 1953 /**
 1954  * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
 1955  * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
 1956  *
 1957  * Returns the extracted Large Resource Data Type Tag item.
 1958  */
 1959 static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
 1960 {
 1961     return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
 1962 }
 1963 
 1964 /**
 1965  * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
 1966  * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
 1967  *
 1968  * Returns the extracted Small Resource Data Type length.
 1969  */
 1970 static inline u8 pci_vpd_srdt_size(const u8 *srdt)
 1971 {
 1972 	return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
 1973 }
 1974 
 1975 /**
 1976  * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
 1977  * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
 1978  *
 1979  * Returns the extracted Small Resource Data Type Tag Item.
 1980  */
 1981 static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
 1982 {
 1983 	return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
 1984 }
 1985 
 1986 /**
 1987  * pci_vpd_info_field_size - Extracts the information field length
 1988  * @lrdt: Pointer to the beginning of an information field header
 1989  *
 1990  * Returns the extracted information field length.
 1991  */
 1992 static inline u8 pci_vpd_info_field_size(const u8 *info_field)
 1993 {
 1994 	return info_field[2];
 1995 }
 1996 
 1997 /**
 1998  * pci_vpd_find_tag - Locates the Resource Data Type tag provided
 1999  * @buf: Pointer to buffered vpd data
 2000  * @off: The offset into the buffer at which to begin the search
 2001  * @len: The length of the vpd buffer
 2002  * @rdt: The Resource Data Type to search for
 2003  *
 2004  * Returns the index where the Resource Data Type was found or
 2005  * -ENOENT otherwise.
 2006  */
 2007 int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt);
 2008 
 2009 /**
 2010  * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD
 2011  * @buf: Pointer to buffered vpd data
 2012  * @off: The offset into the buffer at which to begin the search
 2013  * @len: The length of the buffer area, relative to off, in which to search
 2014  * @kw: The keyword to search for
 2015  *
 2016  * Returns the index where the information field keyword was found or
 2017  * -ENOENT otherwise.
 2018  */
 2019 int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
 2020 			      unsigned int len, const char *kw);
 2021 
 2022 /* PCI <-> OF binding helpers */
 2023 #ifdef CONFIG_OF
 2024 struct device_node;
 2025 struct irq_domain;
 2026 void pci_set_of_node(struct pci_dev *dev);
 2027 void pci_release_of_node(struct pci_dev *dev);
 2028 void pci_set_bus_of_node(struct pci_bus *bus);
 2029 void pci_release_bus_of_node(struct pci_bus *bus);
 2030 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
 2031 
 2032 /* Arch may override this (weak) */
 2033 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
 2034 
 2035 static inline struct device_node *
 2036 pci_device_to_OF_node(const struct pci_dev *pdev)
 2037 {
 2038 	return pdev ? pdev->dev.of_node : NULL;
 2039 }
 2040 
 2041 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
 2042 {
 2043 	return bus ? bus->dev.of_node : NULL;
 2044 }
 2045 
 2046 #else /* CONFIG_OF */
 2047 static inline void pci_set_of_node(struct pci_dev *dev) { }
 2048 static inline void pci_release_of_node(struct pci_dev *dev) { }
 2049 static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
 2050 static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
 2051 static inline struct device_node *
 2052 pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
 2053 static inline struct irq_domain *
 2054 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
 2055 #endif  /* CONFIG_OF */
 2056 
 2057 #ifdef CONFIG_ACPI
 2058 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
 2059 
 2060 void
 2061 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
 2062 #else
 2063 static inline struct irq_domain *
 2064 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
 2065 #endif
 2066 
 2067 #ifdef CONFIG_EEH
 2068 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
 2069 {
 2070 	return pdev->dev.archdata.edev;
 2071 }
 2072 #endif
 2073 
 2074 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn);
 2075 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
 2076 int pci_for_each_dma_alias(struct pci_dev *pdev,
 2077 			   int (*fn)(struct pci_dev *pdev,
 2078 				     u16 alias, void *data), void *data);
 2079 
 2080 /* helper functions for operation of device flag */
 2081 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
 2082 {
 2083 	pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
 2084 }
 2085 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
 2086 {
 2087 	pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
 2088 }
 2089 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
 2090 {
 2091 	return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
 2092 }
 2093 
 2094 /**
 2095  * pci_ari_enabled - query ARI forwarding status
 2096  * @bus: the PCI bus
 2097  *
 2098  * Returns true if ARI forwarding is enabled.
 2099  */
 2100 static inline bool pci_ari_enabled(struct pci_bus *bus)
 2101 {
 2102 	return bus->self && bus->self->ari_enabled;
 2103 }
 2104 
 2105 /* provide the legacy pci_dma_* API */
 2106 #include <linux/pci-dma-compat.h>
 2107 
 2108 #endif /* LINUX_PCI_H */                 1 /*
    2  * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
    3  *
    4  * (C) SGI 2006, Christoph Lameter
    5  * 	Cleaned up and restructured to ease the addition of alternative
    6  * 	implementations of SLAB allocators.
    7  * (C) Linux Foundation 2008-2013
    8  *      Unified interface for all slab allocators
    9  */
   10 
   11 #ifndef _LINUX_SLAB_H
   12 #define	_LINUX_SLAB_H
   13 
   14 #include <linux/gfp.h>
   15 #include <linux/types.h>
   16 #include <linux/workqueue.h>
   17 
   18 
   19 /*
   20  * Flags to pass to kmem_cache_create().
   21  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
   22  */
   23 #define SLAB_CONSISTENCY_CHECKS	0x00000100UL	/* DEBUG: Perform (expensive) checks on alloc/free */
   24 #define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
   25 #define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
   26 #define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
   27 #define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
   28 #define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
   29 #define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
   30 /*
   31  * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
   32  *
   33  * This delays freeing the SLAB page by a grace period, it does _NOT_
   34  * delay object freeing. This means that if you do kmem_cache_free()
   35  * that memory location is free to be reused at any time. Thus it may
   36  * be possible to see another object there in the same RCU grace period.
   37  *
   38  * This feature only ensures the memory location backing the object
   39  * stays valid, the trick to using this is relying on an independent
   40  * object validation pass. Something like:
   41  *
   42  *  rcu_read_lock()
   43  * again:
   44  *  obj = lockless_lookup(key);
   45  *  if (obj) {
   46  *    if (!try_get_ref(obj)) // might fail for free objects
   47  *      goto again;
   48  *
   49  *    if (obj->key != key) { // not the object we expected
   50  *      put_ref(obj);
   51  *      goto again;
   52  *    }
   53  *  }
   54  *  rcu_read_unlock();
   55  *
   56  * This is useful if we need to approach a kernel structure obliquely,
   57  * from its address obtained without the usual locking. We can lock
   58  * the structure to stabilize it and check it's still at the given address,
   59  * only if we can be sure that the memory has not been meanwhile reused
   60  * for some other kind of object (which our subsystem's lock might corrupt).
   61  *
   62  * rcu_read_lock before reading the address, then rcu_read_unlock after
   63  * taking the spinlock within the structure expected at that address.
   64  */
   65 #define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
   66 #define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
   67 #define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
   68 
   69 /* Flag to prevent checks on free */
   70 #ifdef CONFIG_DEBUG_OBJECTS
   71 # define SLAB_DEBUG_OBJECTS	0x00400000UL
   72 #else
   73 # define SLAB_DEBUG_OBJECTS	0x00000000UL
   74 #endif
   75 
   76 #define SLAB_NOLEAKTRACE	0x00800000UL	/* Avoid kmemleak tracing */
   77 
   78 /* Don't track use of uninitialized memory */
   79 #ifdef CONFIG_KMEMCHECK
   80 # define SLAB_NOTRACK		0x01000000UL
   81 #else
   82 # define SLAB_NOTRACK		0x00000000UL
   83 #endif
   84 #ifdef CONFIG_FAILSLAB
   85 # define SLAB_FAILSLAB		0x02000000UL	/* Fault injection mark */
   86 #else
   87 # define SLAB_FAILSLAB		0x00000000UL
   88 #endif
   89 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
   90 # define SLAB_ACCOUNT		0x04000000UL	/* Account to memcg */
   91 #else
   92 # define SLAB_ACCOUNT		0x00000000UL
   93 #endif
   94 
   95 #ifdef CONFIG_KASAN
   96 #define SLAB_KASAN		0x08000000UL
   97 #else
   98 #define SLAB_KASAN		0x00000000UL
   99 #endif
  100 
  101 /* The following flags affect the page allocator grouping pages by mobility */
  102 #define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
  103 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
  104 /*
  105  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
  106  *
  107  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
  108  *
  109  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
  110  * Both make kfree a no-op.
  111  */
  112 #define ZERO_SIZE_PTR ((void *)16)
  113 
  114 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
  115 				(unsigned long)ZERO_SIZE_PTR)
  116 
  117 #include <linux/kmemleak.h>
  118 #include <linux/kasan.h>
  119 
  120 struct mem_cgroup;
  121 /*
  122  * struct kmem_cache related prototypes
  123  */
  124 void __init kmem_cache_init(void);
  125 bool slab_is_available(void);
  126 
  127 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
  128 			unsigned long,
  129 			void (*)(void *));
  130 void kmem_cache_destroy(struct kmem_cache *);
  131 int kmem_cache_shrink(struct kmem_cache *);
  132 
  133 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
  134 void memcg_deactivate_kmem_caches(struct mem_cgroup *);
  135 void memcg_destroy_kmem_caches(struct mem_cgroup *);
  136 
  137 /*
  138  * Please use this macro to create slab caches. Simply specify the
  139  * name of the structure and maybe some flags that are listed above.
  140  *
  141  * The alignment of the struct determines object alignment. If you
  142  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
  143  * then the objects will be properly aligned in SMP configurations.
  144  */
  145 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
  146 		sizeof(struct __struct), __alignof__(struct __struct),\
  147 		(__flags), NULL)
  148 
  149 /*
  150  * Common kmalloc functions provided by all allocators
  151  */
  152 void * __must_check __krealloc(const void *, size_t, gfp_t);
  153 void * __must_check krealloc(const void *, size_t, gfp_t);
  154 void kfree(const void *);
  155 void kzfree(const void *);
  156 size_t ksize(const void *);
  157 
  158 /*
  159  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
  160  * alignment larger than the alignment of a 64-bit integer.
  161  * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
  162  */
  163 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
  164 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
  165 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
  166 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
  167 #else
  168 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
  169 #endif
  170 
  171 /*
  172  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
  173  * Intended for arches that get misalignment faults even for 64 bit integer
  174  * aligned buffers.
  175  */
  176 #ifndef ARCH_SLAB_MINALIGN
  177 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
  178 #endif
  179 
  180 /*
  181  * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
  182  * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
  183  * aligned pointers.
  184  */
  185 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
  186 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
  187 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
  188 
  189 /*
  190  * Kmalloc array related definitions
  191  */
  192 
  193 #ifdef CONFIG_SLAB
  194 /*
  195  * The largest kmalloc size supported by the SLAB allocators is
  196  * 32 megabyte (2^25) or the maximum allocatable page order if that is
  197  * less than 32 MB.
  198  *
  199  * WARNING: Its not easy to increase this value since the allocators have
  200  * to do various tricks to work around compiler limitations in order to
  201  * ensure proper constant folding.
  202  */
  203 #define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
  204 				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
  205 #define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH
  206 #ifndef KMALLOC_SHIFT_LOW
  207 #define KMALLOC_SHIFT_LOW	5
  208 #endif
  209 #endif
  210 
  211 #ifdef CONFIG_SLUB
  212 /*
  213  * SLUB directly allocates requests fitting in to an order-1 page
  214  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
  215  */
  216 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
  217 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT)
  218 #ifndef KMALLOC_SHIFT_LOW
  219 #define KMALLOC_SHIFT_LOW	3
  220 #endif
  221 #endif
  222 
  223 #ifdef CONFIG_SLOB
  224 /*
  225  * SLOB passes all requests larger than one page to the page allocator.
  226  * No kmalloc array is necessary since objects of different sizes can
  227  * be allocated from the same page.
  228  */
  229 #define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
  230 #define KMALLOC_SHIFT_MAX	30
  231 #ifndef KMALLOC_SHIFT_LOW
  232 #define KMALLOC_SHIFT_LOW	3
  233 #endif
  234 #endif
  235 
  236 /* Maximum allocatable size */
  237 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
  238 /* Maximum size for which we actually use a slab cache */
  239 #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
  240 /* Maximum order allocatable via the slab allocagtor */
  241 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
  242 
  243 /*
  244  * Kmalloc subsystem.
  245  */
  246 #ifndef KMALLOC_MIN_SIZE
  247 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
  248 #endif
  249 
  250 /*
  251  * This restriction comes from byte sized index implementation.
  252  * Page size is normally 2^12 bytes and, in this case, if we want to use
  253  * byte sized index which can represent 2^8 entries, the size of the object
  254  * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
  255  * If minimum size of kmalloc is less than 16, we use it as minimum object
  256  * size and give up to use byte sized index.
  257  */
  258 #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
  259                                (KMALLOC_MIN_SIZE) : 16)
  260 
  261 #ifndef CONFIG_SLOB
  262 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
  263 #ifdef CONFIG_ZONE_DMA
  264 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
  265 #endif
  266 
  267 /*
  268  * Figure out which kmalloc slab an allocation of a certain size
  269  * belongs to.
  270  * 0 = zero alloc
  271  * 1 =  65 .. 96 bytes
  272  * 2 = 129 .. 192 bytes
  273  * n = 2^(n-1)+1 .. 2^n
  274  */
  275 static __always_inline int kmalloc_index(size_t size)
  276 {
  277 	if (!size)
  278 		return 0;
  279 
  280 	if (size <= KMALLOC_MIN_SIZE)
  281 		return KMALLOC_SHIFT_LOW;
  282 
  283 	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
  284 		return 1;
  285 	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
  286 		return 2;
  287 	if (size <=          8) return 3;
  288 	if (size <=         16) return 4;
  289 	if (size <=         32) return 5;
  290 	if (size <=         64) return 6;
  291 	if (size <=        128) return 7;
  292 	if (size <=        256) return 8;
  293 	if (size <=        512) return 9;
  294 	if (size <=       1024) return 10;
  295 	if (size <=   2 * 1024) return 11;
  296 	if (size <=   4 * 1024) return 12;
  297 	if (size <=   8 * 1024) return 13;
  298 	if (size <=  16 * 1024) return 14;
  299 	if (size <=  32 * 1024) return 15;
  300 	if (size <=  64 * 1024) return 16;
  301 	if (size <= 128 * 1024) return 17;
  302 	if (size <= 256 * 1024) return 18;
  303 	if (size <= 512 * 1024) return 19;
  304 	if (size <= 1024 * 1024) return 20;
  305 	if (size <=  2 * 1024 * 1024) return 21;
  306 	if (size <=  4 * 1024 * 1024) return 22;
  307 	if (size <=  8 * 1024 * 1024) return 23;
  308 	if (size <=  16 * 1024 * 1024) return 24;
  309 	if (size <=  32 * 1024 * 1024) return 25;
  310 	if (size <=  64 * 1024 * 1024) return 26;
  311 	BUG();
  312 
  313 	/* Will never be reached. Needed because the compiler may complain */
  314 	return -1;
  315 }
  316 #endif /* !CONFIG_SLOB */
  317 
  318 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
  319 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
  320 void kmem_cache_free(struct kmem_cache *, void *);
  321 
  322 /*
  323  * Bulk allocation and freeing operations. These are accelerated in an
  324  * allocator specific way to avoid taking locks repeatedly or building
  325  * metadata structures unnecessarily.
  326  *
  327  * Note that interrupts must be enabled when calling these functions.
  328  */
  329 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
  330 int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
  331 
  332 /*
  333  * Caller must not use kfree_bulk() on memory not originally allocated
  334  * by kmalloc(), because the SLOB allocator cannot handle this.
  335  */
  336 static __always_inline void kfree_bulk(size_t size, void **p)
  337 {
  338 	kmem_cache_free_bulk(NULL, size, p);
  339 }
  340 
  341 #ifdef CONFIG_NUMA
  342 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
  343 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
  344 #else
  345 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
  346 {
  347 	return __kmalloc(size, flags);
  348 }
  349 
  350 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
  351 {
  352 	return kmem_cache_alloc(s, flags);
  353 }
  354 #endif
  355 
  356 #ifdef CONFIG_TRACING
  357 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
  358 
  359 #ifdef CONFIG_NUMA
  360 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
  361 					   gfp_t gfpflags,
  362 					   int node, size_t size) __assume_slab_alignment __malloc;
  363 #else
  364 static __always_inline void *
  365 kmem_cache_alloc_node_trace(struct kmem_cache *s,
  366 			      gfp_t gfpflags,
  367 			      int node, size_t size)
  368 {
  369 	return kmem_cache_alloc_trace(s, gfpflags, size);
  370 }
  371 #endif /* CONFIG_NUMA */
  372 
  373 #else /* CONFIG_TRACING */
  374 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
  375 		gfp_t flags, size_t size)
  376 {
  377 	void *ret = kmem_cache_alloc(s, flags);
  378 
  379 	kasan_kmalloc(s, ret, size, flags);
  380 	return ret;
  381 }
  382 
  383 static __always_inline void *
  384 kmem_cache_alloc_node_trace(struct kmem_cache *s,
  385 			      gfp_t gfpflags,
  386 			      int node, size_t size)
  387 {
  388 	void *ret = kmem_cache_alloc_node(s, gfpflags, node);
  389 
  390 	kasan_kmalloc(s, ret, size, gfpflags);
  391 	return ret;
  392 }
  393 #endif /* CONFIG_TRACING */
  394 
  395 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
  396 
  397 #ifdef CONFIG_TRACING
  398 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
  399 #else
  400 static __always_inline void *
  401 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
  402 {
  403 	return kmalloc_order(size, flags, order);
  404 }
  405 #endif
  406 
  407 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
  408 {
  409 	unsigned int order = get_order(size);
  410 	return kmalloc_order_trace(size, flags, order);
  411 }
  412 
  413 /**
  414  * kmalloc - allocate memory
  415  * @size: how many bytes of memory are required.
  416  * @flags: the type of memory to allocate.
  417  *
  418  * kmalloc is the normal method of allocating memory
  419  * for objects smaller than page size in the kernel.
  420  *
  421  * The @flags argument may be one of:
  422  *
  423  * %GFP_USER - Allocate memory on behalf of user.  May sleep.
  424  *
  425  * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
  426  *
  427  * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools.
  428  *   For example, use this inside interrupt handlers.
  429  *
  430  * %GFP_HIGHUSER - Allocate pages from high memory.
  431  *
  432  * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
  433  *
  434  * %GFP_NOFS - Do not make any fs calls while trying to get memory.
  435  *
  436  * %GFP_NOWAIT - Allocation will not sleep.
  437  *
  438  * %__GFP_THISNODE - Allocate node-local memory only.
  439  *
  440  * %GFP_DMA - Allocation suitable for DMA.
  441  *   Should only be used for kmalloc() caches. Otherwise, use a
  442  *   slab created with SLAB_DMA.
  443  *
  444  * Also it is possible to set different flags by OR'ing
  445  * in one or more of the following additional @flags:
  446  *
  447  * %__GFP_COLD - Request cache-cold pages instead of
  448  *   trying to return cache-warm pages.
  449  *
  450  * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
  451  *
  452  * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
  453  *   (think twice before using).
  454  *
  455  * %__GFP_NORETRY - If memory is not immediately available,
  456  *   then give up at once.
  457  *
  458  * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
  459  *
  460  * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
  461  *
  462  * There are other flags available as well, but these are not intended
  463  * for general use, and so are not documented here. For a full list of
  464  * potential flags, always refer to linux/gfp.h.
  465  */
  466 static __always_inline void *kmalloc(size_t size, gfp_t flags)
  467 {
  468 	if (__builtin_constant_p(size)) {
  469 		if (size > KMALLOC_MAX_CACHE_SIZE)
  470 			return kmalloc_large(size, flags);
  471 #ifndef CONFIG_SLOB
  472 		if (!(flags & GFP_DMA)) {
  473 			int index = kmalloc_index(size);
  474 
  475 			if (!index)
  476 				return ZERO_SIZE_PTR;
  477 
  478 			return kmem_cache_alloc_trace(kmalloc_caches[index],
  479 					flags, size);
  480 		}
  481 #endif
  482 	}
  483 	return __kmalloc(size, flags);
  484 }
  485 
  486 /*
  487  * Determine size used for the nth kmalloc cache.
  488  * return size or 0 if a kmalloc cache for that
  489  * size does not exist
  490  */
  491 static __always_inline int kmalloc_size(int n)
  492 {
  493 #ifndef CONFIG_SLOB
  494 	if (n > 2)
  495 		return 1 << n;
  496 
  497 	if (n == 1 && KMALLOC_MIN_SIZE <= 32)
  498 		return 96;
  499 
  500 	if (n == 2 && KMALLOC_MIN_SIZE <= 64)
  501 		return 192;
  502 #endif
  503 	return 0;
  504 }
  505 
  506 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  507 {
  508 #ifndef CONFIG_SLOB
  509 	if (__builtin_constant_p(size) &&
  510 		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
  511 		int i = kmalloc_index(size);
  512 
  513 		if (!i)
  514 			return ZERO_SIZE_PTR;
  515 
  516 		return kmem_cache_alloc_node_trace(kmalloc_caches[i],
  517 						flags, node, size);
  518 	}
  519 #endif
  520 	return __kmalloc_node(size, flags, node);
  521 }
  522 
  523 struct memcg_cache_array {
  524 	struct rcu_head rcu;
  525 	struct kmem_cache *entries[0];
  526 };
  527 
  528 /*
  529  * This is the main placeholder for memcg-related information in kmem caches.
  530  * Both the root cache and the child caches will have it. For the root cache,
  531  * this will hold a dynamically allocated array large enough to hold
  532  * information about the currently limited memcgs in the system. To allow the
  533  * array to be accessed without taking any locks, on relocation we free the old
  534  * version only after a grace period.
  535  *
  536  * Child caches will hold extra metadata needed for its operation. Fields are:
  537  *
  538  * @memcg: pointer to the memcg this cache belongs to
  539  * @root_cache: pointer to the global, root cache, this cache was derived from
  540  *
  541  * Both root and child caches of the same kind are linked into a list chained
  542  * through @list.
  543  */
  544 struct memcg_cache_params {
  545 	bool is_root_cache;
  546 	struct list_head list;
  547 	union {
  548 		struct memcg_cache_array __rcu *memcg_caches;
  549 		struct {
  550 			struct mem_cgroup *memcg;
  551 			struct kmem_cache *root_cache;
  552 		};
  553 	};
  554 };
  555 
  556 int memcg_update_all_caches(int num_memcgs);
  557 
  558 /**
  559  * kmalloc_array - allocate memory for an array.
  560  * @n: number of elements.
  561  * @size: element size.
  562  * @flags: the type of memory to allocate (see kmalloc).
  563  */
  564 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
  565 {
  566 	if (size != 0 && n > SIZE_MAX / size)
  567 		return NULL;
  568 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
  569 		return kmalloc(n * size, flags);
  570 	return __kmalloc(n * size, flags);
  571 }
  572 
  573 /**
  574  * kcalloc - allocate memory for an array. The memory is set to zero.
  575  * @n: number of elements.
  576  * @size: element size.
  577  * @flags: the type of memory to allocate (see kmalloc).
  578  */
  579 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
  580 {
  581 	return kmalloc_array(n, size, flags | __GFP_ZERO);
  582 }
  583 
  584 /*
  585  * kmalloc_track_caller is a special version of kmalloc that records the
  586  * calling function of the routine calling it for slab leak tracking instead
  587  * of just the calling function (confusing, eh?).
  588  * It's useful when the call to kmalloc comes from a widely-used standard
  589  * allocator where we care about the real place the memory allocation
  590  * request comes from.
  591  */
  592 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
  593 #define kmalloc_track_caller(size, flags) \
  594 	__kmalloc_track_caller(size, flags, _RET_IP_)
  595 
  596 #ifdef CONFIG_NUMA
  597 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
  598 #define kmalloc_node_track_caller(size, flags, node) \
  599 	__kmalloc_node_track_caller(size, flags, node, \
  600 			_RET_IP_)
  601 
  602 #else /* CONFIG_NUMA */
  603 
  604 #define kmalloc_node_track_caller(size, flags, node) \
  605 	kmalloc_track_caller(size, flags)
  606 
  607 #endif /* CONFIG_NUMA */
  608 
  609 /*
  610  * Shortcuts
  611  */
  612 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
  613 {
  614 	return kmem_cache_alloc(k, flags | __GFP_ZERO);
  615 }
  616 
  617 /**
  618  * kzalloc - allocate memory. The memory is set to zero.
  619  * @size: how many bytes of memory are required.
  620  * @flags: the type of memory to allocate (see kmalloc).
  621  */
  622 static inline void *kzalloc(size_t size, gfp_t flags)
  623 {
  624 	return kmalloc(size, flags | __GFP_ZERO);
  625 }
  626 
  627 /**
  628  * kzalloc_node - allocate zeroed memory from a particular memory node.
  629  * @size: how many bytes of memory are required.
  630  * @flags: the type of memory to allocate (see kmalloc).
  631  * @node: memory node from which to allocate
  632  */
  633 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
  634 {
  635 	return kmalloc_node(size, flags | __GFP_ZERO, node);
  636 }
  637 
  638 unsigned int kmem_cache_size(struct kmem_cache *s);
  639 void __init kmem_cache_init_late(void);
  640 
  641 #endif	/* _LINUX_SLAB_H */                 1 #ifndef __LINUX_SPINLOCK_H
    2 #define __LINUX_SPINLOCK_H
    3 
    4 /*
    5  * include/linux/spinlock.h - generic spinlock/rwlock declarations
    6  *
    7  * here's the role of the various spinlock/rwlock related include files:
    8  *
    9  * on SMP builds:
   10  *
   11  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
   12  *                        initializers
   13  *
   14  *  linux/spinlock_types.h:
   15  *                        defines the generic type and initializers
   16  *
   17  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
   18  *                        implementations, mostly inline assembly code
   19  *
   20  *   (also included on UP-debug builds:)
   21  *
   22  *  linux/spinlock_api_smp.h:
   23  *                        contains the prototypes for the _spin_*() APIs.
   24  *
   25  *  linux/spinlock.h:     builds the final spin_*() APIs.
   26  *
   27  * on UP builds:
   28  *
   29  *  linux/spinlock_type_up.h:
   30  *                        contains the generic, simplified UP spinlock type.
   31  *                        (which is an empty structure on non-debug builds)
   32  *
   33  *  linux/spinlock_types.h:
   34  *                        defines the generic type and initializers
   35  *
   36  *  linux/spinlock_up.h:
   37  *                        contains the arch_spin_*()/etc. version of UP
   38  *                        builds. (which are NOPs on non-debug, non-preempt
   39  *                        builds)
   40  *
   41  *   (included on UP-non-debug builds:)
   42  *
   43  *  linux/spinlock_api_up.h:
   44  *                        builds the _spin_*() APIs.
   45  *
   46  *  linux/spinlock.h:     builds the final spin_*() APIs.
   47  */
   48 
   49 #include <linux/typecheck.h>
   50 #include <linux/preempt.h>
   51 #include <linux/linkage.h>
   52 #include <linux/compiler.h>
   53 #include <linux/irqflags.h>
   54 #include <linux/thread_info.h>
   55 #include <linux/kernel.h>
   56 #include <linux/stringify.h>
   57 #include <linux/bottom_half.h>
   58 #include <asm/barrier.h>
   59 
   60 
   61 /*
   62  * Must define these before including other files, inline functions need them
   63  */
   64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
   65 
   66 #define LOCK_SECTION_START(extra)               \
   67         ".subsection 1\n\t"                     \
   68         extra                                   \
   69         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
   70         LOCK_SECTION_NAME ":\n\t"               \
   71         ".endif\n"
   72 
   73 #define LOCK_SECTION_END                        \
   74         ".previous\n\t"
   75 
   76 #define __lockfunc __attribute__((section(".spinlock.text")))
   77 
   78 /*
   79  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
   80  */
   81 #include <linux/spinlock_types.h>
   82 
   83 /*
   84  * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
   85  */
   86 #ifdef CONFIG_SMP
   87 # include <asm/spinlock.h>
   88 #else
   89 # include <linux/spinlock_up.h>
   90 #endif
   91 
   92 #ifdef CONFIG_DEBUG_SPINLOCK
   93   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
   94 				   struct lock_class_key *key);
   95 # define raw_spin_lock_init(lock)				\
   96 do {								\
   97 	static struct lock_class_key __key;			\
   98 								\
   99 	__raw_spin_lock_init((lock), #lock, &__key);		\
  100 } while (0)
  101 
  102 #else
  103 # define raw_spin_lock_init(lock)				\
  104 	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
  105 #endif
  106 
  107 #define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
  108 
  109 #ifdef CONFIG_GENERIC_LOCKBREAK
  110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
  111 #else
  112 
  113 #ifdef arch_spin_is_contended
  114 #define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
  115 #else
  116 #define raw_spin_is_contended(lock)	(((void)(lock), 0))
  117 #endif /*arch_spin_is_contended*/
  118 #endif
  119 
  120 /*
  121  * Despite its name it doesn't necessarily has to be a full barrier.
  122  * It should only guarantee that a STORE before the critical section
  123  * can not be reordered with LOADs and STOREs inside this section.
  124  * spin_lock() is the one-way barrier, this LOAD can not escape out
  125  * of the region. So the default implementation simply ensures that
  126  * a STORE can not move into the critical section, smp_wmb() should
  127  * serialize it with another STORE done by spin_lock().
  128  */
  129 #ifndef smp_mb__before_spinlock
  130 #define smp_mb__before_spinlock()	smp_wmb()
  131 #endif
  132 
  133 /**
  134  * raw_spin_unlock_wait - wait until the spinlock gets unlocked
  135  * @lock: the spinlock in question.
  136  */
  137 #define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
  138 
  139 #ifdef CONFIG_DEBUG_SPINLOCK
  140  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
  141 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  142  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  143  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
  144 #else
  145 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
  146 {
  147 	__acquire(lock);
  148 	arch_spin_lock(&lock->raw_lock);
  149 }
  150 
  151 static inline void
  152 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
  153 {
  154 	__acquire(lock);
  155 	arch_spin_lock_flags(&lock->raw_lock, *flags);
  156 }
  157 
  158 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
  159 {
  160 	return arch_spin_trylock(&(lock)->raw_lock);
  161 }
  162 
  163 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
  164 {
  165 	arch_spin_unlock(&lock->raw_lock);
  166 	__release(lock);
  167 }
  168 #endif
  169 
  170 /*
  171  * Define the various spin_lock methods.  Note we define these
  172  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
  173  * various methods are defined as nops in the case they are not
  174  * required.
  175  */
  176 #define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
  177 
  178 #define raw_spin_lock(lock)	_raw_spin_lock(lock)
  179 
  180 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  181 # define raw_spin_lock_nested(lock, subclass) \
  182 	_raw_spin_lock_nested(lock, subclass)
  183 # define raw_spin_lock_bh_nested(lock, subclass) \
  184 	_raw_spin_lock_bh_nested(lock, subclass)
  185 
  186 # define raw_spin_lock_nest_lock(lock, nest_lock)			\
  187 	 do {								\
  188 		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
  189 		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
  190 	 } while (0)
  191 #else
  192 /*
  193  * Always evaluate the 'subclass' argument to avoid that the compiler
  194  * warns about set-but-not-used variables when building with
  195  * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
  196  */
  197 # define raw_spin_lock_nested(lock, subclass)		\
  198 	_raw_spin_lock(((void)(subclass), (lock)))
  199 # define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
  200 # define raw_spin_lock_bh_nested(lock, subclass)	_raw_spin_lock_bh(lock)
  201 #endif
  202 
  203 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  204 
  205 #define raw_spin_lock_irqsave(lock, flags)			\
  206 	do {						\
  207 		typecheck(unsigned long, flags);	\
  208 		flags = _raw_spin_lock_irqsave(lock);	\
  209 	} while (0)
  210 
  211 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  212 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  213 	do {								\
  214 		typecheck(unsigned long, flags);			\
  215 		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
  216 	} while (0)
  217 #else
  218 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  219 	do {								\
  220 		typecheck(unsigned long, flags);			\
  221 		flags = _raw_spin_lock_irqsave(lock);			\
  222 	} while (0)
  223 #endif
  224 
  225 #else
  226 
  227 #define raw_spin_lock_irqsave(lock, flags)		\
  228 	do {						\
  229 		typecheck(unsigned long, flags);	\
  230 		_raw_spin_lock_irqsave(lock, flags);	\
  231 	} while (0)
  232 
  233 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
  234 	raw_spin_lock_irqsave(lock, flags)
  235 
  236 #endif
  237 
  238 #define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
  239 #define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
  240 #define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
  241 #define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
  242 
  243 #define raw_spin_unlock_irqrestore(lock, flags)		\
  244 	do {							\
  245 		typecheck(unsigned long, flags);		\
  246 		_raw_spin_unlock_irqrestore(lock, flags);	\
  247 	} while (0)
  248 #define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
  249 
  250 #define raw_spin_trylock_bh(lock) \
  251 	__cond_lock(lock, _raw_spin_trylock_bh(lock))
  252 
  253 #define raw_spin_trylock_irq(lock) \
  254 ({ \
  255 	local_irq_disable(); \
  256 	raw_spin_trylock(lock) ? \
  257 	1 : ({ local_irq_enable(); 0;  }); \
  258 })
  259 
  260 #define raw_spin_trylock_irqsave(lock, flags) \
  261 ({ \
  262 	local_irq_save(flags); \
  263 	raw_spin_trylock(lock) ? \
  264 	1 : ({ local_irq_restore(flags); 0; }); \
  265 })
  266 
  267 /**
  268  * raw_spin_can_lock - would raw_spin_trylock() succeed?
  269  * @lock: the spinlock in question.
  270  */
  271 #define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))
  272 
  273 /* Include rwlock functions */
  274 #include <linux/rwlock.h>
  275 
  276 /*
  277  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  278  */
  279 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  280 # include <linux/spinlock_api_smp.h>
  281 #else
  282 # include <linux/spinlock_api_up.h>
  283 #endif
  284 
  285 /*
  286  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  287  */
  288 
  289 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
  290 {
  291 	return &lock->rlock;
  292 }
  293 
  294 #define spin_lock_init(_lock)				\
  295 do {							\
  296 	spinlock_check(_lock);				\
  297 	raw_spin_lock_init(&(_lock)->rlock);		\
  298 } while (0)
  299 
  300 static __always_inline void spin_lock(spinlock_t *lock)
  301 {
  302 	raw_spin_lock(&lock->rlock);
  303 }
  304 
  305 static __always_inline void spin_lock_bh(spinlock_t *lock)
  306 {
  307 	raw_spin_lock_bh(&lock->rlock);
  308 }
  309 
  310 static __always_inline int spin_trylock(spinlock_t *lock)
  311 {
  312 	return raw_spin_trylock(&lock->rlock);
  313 }
  314 
  315 #define spin_lock_nested(lock, subclass)			\
  316 do {								\
  317 	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
  318 } while (0)
  319 
  320 #define spin_lock_bh_nested(lock, subclass)			\
  321 do {								\
  322 	raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
  323 } while (0)
  324 
  325 #define spin_lock_nest_lock(lock, nest_lock)				\
  326 do {									\
  327 	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
  328 } while (0)
  329 
  330 static __always_inline void spin_lock_irq(spinlock_t *lock)
  331 {
  332 	raw_spin_lock_irq(&lock->rlock);
  333 }
  334 
  335 #define spin_lock_irqsave(lock, flags)				\
  336 do {								\
  337 	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
  338 } while (0)
  339 
  340 #define spin_lock_irqsave_nested(lock, flags, subclass)			\
  341 do {									\
  342 	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
  343 } while (0)
  344 
  345 static __always_inline void spin_unlock(spinlock_t *lock)
  346 {
  347 	raw_spin_unlock(&lock->rlock);
  348 }
  349 
  350 static __always_inline void spin_unlock_bh(spinlock_t *lock)
  351 {
  352 	raw_spin_unlock_bh(&lock->rlock);
  353 }
  354 
  355 static __always_inline void spin_unlock_irq(spinlock_t *lock)
  356 {
  357 	raw_spin_unlock_irq(&lock->rlock);
  358 }
  359 
  360 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  361 {
  362 	raw_spin_unlock_irqrestore(&lock->rlock, flags);
  363 }
  364 
  365 static __always_inline int spin_trylock_bh(spinlock_t *lock)
  366 {
  367 	return raw_spin_trylock_bh(&lock->rlock);
  368 }
  369 
  370 static __always_inline int spin_trylock_irq(spinlock_t *lock)
  371 {
  372 	return raw_spin_trylock_irq(&lock->rlock);
  373 }
  374 
  375 #define spin_trylock_irqsave(lock, flags)			\
  376 ({								\
  377 	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
  378 })
  379 
  380 static __always_inline void spin_unlock_wait(spinlock_t *lock)
  381 {
  382 	raw_spin_unlock_wait(&lock->rlock);
  383 }
  384 
  385 static __always_inline int spin_is_locked(spinlock_t *lock)
  386 {
  387 	return raw_spin_is_locked(&lock->rlock);
  388 }
  389 
  390 static __always_inline int spin_is_contended(spinlock_t *lock)
  391 {
  392 	return raw_spin_is_contended(&lock->rlock);
  393 }
  394 
  395 static __always_inline int spin_can_lock(spinlock_t *lock)
  396 {
  397 	return raw_spin_can_lock(&lock->rlock);
  398 }
  399 
  400 #define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
  401 
  402 /*
  403  * Pull the atomic_t declaration:
  404  * (asm-mips/atomic.h needs above definitions)
  405  */
  406 #include <linux/atomic.h>
  407 /**
  408  * atomic_dec_and_lock - lock on reaching reference count zero
  409  * @atomic: the atomic counter
  410  * @lock: the spinlock in question
  411  *
  412  * Decrements @atomic by 1.  If the result is 0, returns true and locks
  413  * @lock.  Returns false for all other cases.
  414  */
  415 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  416 #define atomic_dec_and_lock(atomic, lock) \
  417 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  418 
  419 #endif /* __LINUX_SPINLOCK_H */            | 
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
| Ядро | Модуль | Правило | Верификатор | Вердикт | Статус | Время создания | Описание проблемы | 
| linux-4.8-rc1.tar.xz | drivers/firewire/nosy.ko | 152_1a | CPAchecker | Bug | Fixed | 2016-09-24 18:04:53 | L0251 | 
Комментарий
Reported: 25 Sep 2016
[В начало]