Bug
        
                          [В начало]
Ошибка # 134
Показать/спрятать трассу ошибок|            Error trace     
         {    19     typedef signed char __s8;    20     typedef unsigned char __u8;    23     typedef unsigned short __u16;    25     typedef int __s32;    26     typedef unsigned int __u32;    30     typedef unsigned long long __u64;    15     typedef signed char s8;    16     typedef unsigned char u8;    19     typedef unsigned short u16;    21     typedef int s32;    22     typedef unsigned int u32;    24     typedef long long s64;    25     typedef unsigned long long u64;    14     typedef long __kernel_long_t;    15     typedef unsigned long __kernel_ulong_t;    27     typedef int __kernel_pid_t;    48     typedef unsigned int __kernel_uid32_t;    49     typedef unsigned int __kernel_gid32_t;    71     typedef __kernel_ulong_t __kernel_size_t;    72     typedef __kernel_long_t __kernel_ssize_t;    87     typedef long long __kernel_loff_t;    88     typedef __kernel_long_t __kernel_time_t;    89     typedef __kernel_long_t __kernel_clock_t;    90     typedef int __kernel_timer_t;    91     typedef int __kernel_clockid_t;    33     typedef __u16 __be16;    35     typedef __u32 __be32;    40     typedef __u32 __wsum;   259     struct kernel_symbol {   unsigned long value;   const char *name; } ;    33     struct module ;    12     typedef __u32 __kernel_dev_t;    15     typedef __kernel_dev_t dev_t;    18     typedef unsigned short umode_t;    21     typedef __kernel_pid_t pid_t;    26     typedef __kernel_clockid_t clockid_t;    29     typedef _Bool bool;    31     typedef __kernel_uid32_t uid_t;    32     typedef __kernel_gid32_t gid_t;    45     typedef __kernel_loff_t loff_t;    54     typedef __kernel_size_t size_t;    59     typedef __kernel_ssize_t ssize_t;    69     typedef __kernel_time_t time_t;    74     typedef __kernel_clock_t clock_t;   102     typedef __s32 int32_t;   106     typedef __u8 uint8_t;   108     typedef __u32 uint32_t;   111     typedef __u64 uint64_t;   133     typedef unsigned long sector_t;   134     typedef unsigned long blkcnt_t;   152     typedef u64 dma_addr_t;   157     typedef unsigned int gfp_t;   158     typedef unsigned int fmode_t;   176     struct __anonstruct_atomic_t_6 {   int counter; } ;   176     typedef struct __anonstruct_atomic_t_6 atomic_t;   181     struct __anonstruct_atomic64_t_7 {   long counter; } ;   181     typedef struct __anonstruct_atomic64_t_7 atomic64_t;   182     struct list_head {   struct list_head *next;   struct list_head *prev; } ;   187     struct hlist_node ;   187     struct hlist_head {   struct hlist_node *first; } ;   191     struct hlist_node {   struct hlist_node *next;   struct hlist_node **pprev; } ;   202     struct callback_head {   struct callback_head *next;   void (*func)(struct callback_head *); } ;   131     typedef void (*ctor_fn_t)();    48     struct device ;    54     struct net_device ;   432     struct file_operations ;   444     struct completion ;   445     struct pt_regs ;    27     union __anonunion___u_9 {   struct list_head *__val;   char __c[1U]; } ;   189     union __anonunion___u_13 {   struct list_head *__val;   char __c[1U]; } ;   555     struct bug_entry {   int bug_addr_disp;   int file_disp;   unsigned short line;   unsigned short flags; } ;   111     struct timespec ;   112     struct compat_timespec ;   113     struct __anonstruct_futex_25 {   u32 *uaddr;   u32 val;   u32 flags;   u32 bitset;   u64 time;   u32 *uaddr2; } ;   113     struct __anonstruct_nanosleep_26 {   clockid_t clockid;   struct timespec *rmtp;   struct compat_timespec *compat_rmtp;   u64 expires; } ;   113     struct pollfd ;   113     struct __anonstruct_poll_27 {   struct pollfd *ufds;   int nfds;   int has_timeout;   unsigned long tv_sec;   unsigned long tv_nsec; } ;   113     union __anonunion____missing_field_name_24 {   struct __anonstruct_futex_25 futex;   struct __anonstruct_nanosleep_26 nanosleep;   struct __anonstruct_poll_27 poll; } ;   113     struct restart_block {   long int (*fn)(struct restart_block *);   union __anonunion____missing_field_name_24 __annonCompField4; } ;    39     struct page ;    26     struct task_struct ;    27     struct mm_struct ;   288     struct pt_regs {   unsigned long r15;   unsigned long r14;   unsigned long r13;   unsigned long r12;   unsigned long bp;   unsigned long bx;   unsigned long r11;   unsigned long r10;   unsigned long r9;   unsigned long r8;   unsigned long ax;   unsigned long cx;   unsigned long dx;   unsigned long si;   unsigned long di;   unsigned long orig_ax;   unsigned long ip;   unsigned long cs;   unsigned long flags;   unsigned long sp;   unsigned long ss; } ;    66     struct __anonstruct____missing_field_name_30 {   unsigned int a;   unsigned int b; } ;    66     struct __anonstruct____missing_field_name_31 {   u16 limit0;   u16 base0;   unsigned char base1;   unsigned char type;   unsigned char s;   unsigned char dpl;   unsigned char p;   unsigned char limit;   unsigned char avl;   unsigned char l;   unsigned char d;   unsigned char g;   unsigned char base2; } ;    66     union __anonunion____missing_field_name_29 {   struct __anonstruct____missing_field_name_30 __annonCompField5;   struct __anonstruct____missing_field_name_31 __annonCompField6; } ;    66     struct desc_struct {   union __anonunion____missing_field_name_29 __annonCompField7; } ;    12     typedef unsigned long pteval_t;    13     typedef unsigned long pmdval_t;    15     typedef unsigned long pgdval_t;    16     typedef unsigned long pgprotval_t;    18     struct __anonstruct_pte_t_32 {   pteval_t pte; } ;    18     typedef struct __anonstruct_pte_t_32 pte_t;    20     struct pgprot {   pgprotval_t pgprot; } ;   250     typedef struct pgprot pgprot_t;   252     struct __anonstruct_pgd_t_33 {   pgdval_t pgd; } ;   252     typedef struct __anonstruct_pgd_t_33 pgd_t;   291     struct __anonstruct_pmd_t_35 {   pmdval_t pmd; } ;   291     typedef struct __anonstruct_pmd_t_35 pmd_t;   417     typedef struct page *pgtable_t;   428     struct file ;   441     struct seq_file ;   479     struct thread_struct ;   481     struct cpumask ;    20     struct qspinlock {   atomic_t val; } ;    33     typedef struct qspinlock arch_spinlock_t;    34     struct qrwlock {   atomic_t cnts;   arch_spinlock_t wait_lock; } ;    14     typedef struct qrwlock arch_rwlock_t;   253     struct math_emu_info {   long ___orig_eip;   struct pt_regs *regs; } ;   338     struct cpumask {   unsigned long bits[128U]; } ;    15     typedef struct cpumask cpumask_t;   654     typedef struct cpumask *cpumask_var_t;    26     union __anonunion___u_42 {   int __val;   char __c[1U]; } ;    23     typedef atomic64_t atomic_long_t;    82     struct static_key {   atomic_t enabled; } ;    22     struct tracepoint_func {   void *func;   void *data;   int prio; } ;    28     struct tracepoint {   const char *name;   struct static_key key;   void (*regfunc)();   void (*unregfunc)();   struct tracepoint_func *funcs; } ;   298     struct fregs_state {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u32 status; } ;    26     struct __anonstruct____missing_field_name_59 {   u64 rip;   u64 rdp; } ;    26     struct __anonstruct____missing_field_name_60 {   u32 fip;   u32 fcs;   u32 foo;   u32 fos; } ;    26     union __anonunion____missing_field_name_58 {   struct __anonstruct____missing_field_name_59 __annonCompField13;   struct __anonstruct____missing_field_name_60 __annonCompField14; } ;    26     union __anonunion____missing_field_name_61 {   u32 padding1[12U];   u32 sw_reserved[12U]; } ;    26     struct fxregs_state {   u16 cwd;   u16 swd;   u16 twd;   u16 fop;   union __anonunion____missing_field_name_58 __annonCompField15;   u32 mxcsr;   u32 mxcsr_mask;   u32 st_space[32U];   u32 xmm_space[64U];   u32 padding[12U];   union __anonunion____missing_field_name_61 __annonCompField16; } ;    66     struct swregs_state {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u8 ftop;   u8 changed;   u8 lookahead;   u8 no_update;   u8 rm;   u8 alimit;   struct math_emu_info *info;   u32 entry_eip; } ;   226     struct xstate_header {   u64 xfeatures;   u64 xcomp_bv;   u64 reserved[6U]; } ;   232     struct xregs_state {   struct fxregs_state i387;   struct xstate_header header;   u8 extended_state_area[0U]; } ;   247     union fpregs_state {   struct fregs_state fsave;   struct fxregs_state fxsave;   struct swregs_state soft;   struct xregs_state xsave;   u8 __padding[4096U]; } ;   264     struct fpu {   unsigned int last_cpu;   unsigned char fpstate_active;   unsigned char fpregs_active;   unsigned char counter;   union fpregs_state state; } ;   169     struct seq_operations ;   371     struct perf_event ;   372     struct thread_struct {   struct desc_struct tls_array[3U];   unsigned long sp0;   unsigned long sp;   unsigned short es;   unsigned short ds;   unsigned short fsindex;   unsigned short gsindex;   unsigned long fsbase;   unsigned long gsbase;   struct perf_event *ptrace_bps[4U];   unsigned long debugreg6;   unsigned long ptrace_dr7;   unsigned long cr2;   unsigned long trap_nr;   unsigned long error_code;   unsigned long *io_bitmap_ptr;   unsigned long iopl;   unsigned int io_bitmap_max;   struct fpu fpu; } ;    69     typedef int pao_T__;    74     typedef int pao_T_____0;    33     struct lockdep_map ;    55     struct stack_trace {   unsigned int nr_entries;   unsigned int max_entries;   unsigned long *entries;   int skip; } ;    28     struct lockdep_subclass_key {   char __one_byte; } ;    53     struct lock_class_key {   struct lockdep_subclass_key subkeys[8U]; } ;    59     struct lock_class {   struct hlist_node hash_entry;   struct list_head lock_entry;   struct lockdep_subclass_key *key;   unsigned int subclass;   unsigned int dep_gen_id;   unsigned long usage_mask;   struct stack_trace usage_traces[13U];   struct list_head locks_after;   struct list_head locks_before;   unsigned int version;   unsigned long ops;   const char *name;   int name_version;   unsigned long contention_point[4U];   unsigned long contending_point[4U]; } ;   144     struct lockdep_map {   struct lock_class_key *key;   struct lock_class *class_cache[2U];   const char *name;   int cpu;   unsigned long ip; } ;   207     struct held_lock {   u64 prev_chain_key;   unsigned long acquire_ip;   struct lockdep_map *instance;   struct lockdep_map *nest_lock;   u64 waittime_stamp;   u64 holdtime_stamp;   unsigned short class_idx;   unsigned char irq_context;   unsigned char trylock;   unsigned char read;   unsigned char check;   unsigned char hardirqs_off;   unsigned short references;   unsigned int pin_count; } ;   572     struct raw_spinlock {   arch_spinlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    32     typedef struct raw_spinlock raw_spinlock_t;    33     struct __anonstruct____missing_field_name_75 {   u8 __padding[24U];   struct lockdep_map dep_map; } ;    33     union __anonunion____missing_field_name_74 {   struct raw_spinlock rlock;   struct __anonstruct____missing_field_name_75 __annonCompField19; } ;    33     struct spinlock {   union __anonunion____missing_field_name_74 __annonCompField20; } ;    76     typedef struct spinlock spinlock_t;    23     struct __anonstruct_rwlock_t_76 {   arch_rwlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    23     typedef struct __anonstruct_rwlock_t_76 rwlock_t;   416     struct seqcount {   unsigned int sequence;   struct lockdep_map dep_map; } ;    52     typedef struct seqcount seqcount_t;   404     struct __anonstruct_seqlock_t_89 {   struct seqcount seqcount;   spinlock_t lock; } ;   404     typedef struct __anonstruct_seqlock_t_89 seqlock_t;   598     struct timespec {   __kernel_time_t tv_sec;   long tv_nsec; } ;    83     struct user_namespace ;    22     struct __anonstruct_kuid_t_90 {   uid_t val; } ;    22     typedef struct __anonstruct_kuid_t_90 kuid_t;    27     struct __anonstruct_kgid_t_91 {   gid_t val; } ;    27     typedef struct __anonstruct_kgid_t_91 kgid_t;   139     struct kstat {   u64 ino;   dev_t dev;   umode_t mode;   unsigned int nlink;   kuid_t uid;   kgid_t gid;   dev_t rdev;   loff_t size;   struct timespec atime;   struct timespec mtime;   struct timespec ctime;   unsigned long blksize;   unsigned long long blocks; } ;    36     struct vm_area_struct ;    38     struct __wait_queue_head {   spinlock_t lock;   struct list_head task_list; } ;    43     typedef struct __wait_queue_head wait_queue_head_t;    97     struct __anonstruct_nodemask_t_92 {   unsigned long bits[16U]; } ;    97     typedef struct __anonstruct_nodemask_t_92 nodemask_t;    13     struct optimistic_spin_queue {   atomic_t tail; } ;    39     struct mutex {   atomic_t count;   spinlock_t wait_lock;   struct list_head wait_list;   struct task_struct *owner;   void *magic;   struct lockdep_map dep_map; } ;    67     struct mutex_waiter {   struct list_head list;   struct task_struct *task;   void *magic; } ;   177     struct rw_semaphore ;   178     struct rw_semaphore {   long count;   struct list_head wait_list;   raw_spinlock_t wait_lock;   struct optimistic_spin_queue osq;   struct task_struct *owner;   struct lockdep_map dep_map; } ;   176     struct completion {   unsigned int done;   wait_queue_head_t wait; } ;   446     union ktime {   s64 tv64; } ;    41     typedef union ktime ktime_t;  1129     struct timer_list {   struct hlist_node entry;   unsigned long expires;   void (*function)(unsigned long);   unsigned long data;   u32 flags;   int slack;   int start_pid;   void *start_site;   char start_comm[16U];   struct lockdep_map lockdep_map; } ;   240     struct hrtimer ;   241     enum hrtimer_restart ;   242     struct rb_node {   unsigned long __rb_parent_color;   struct rb_node *rb_right;   struct rb_node *rb_left; } ;    41     struct rb_root {   struct rb_node *rb_node; } ;   837     struct ctl_table ;   838     struct nsproxy ;   839     struct ctl_table_root ;   840     struct ctl_table_header ;   841     struct ctl_dir ;    37     typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *);    57     struct ctl_table_poll {   atomic_t event;   wait_queue_head_t wait; } ;    96     struct ctl_table {   const char *procname;   void *data;   int maxlen;   umode_t mode;   struct ctl_table *child;   proc_handler *proc_handler;   struct ctl_table_poll *poll;   void *extra1;   void *extra2; } ;   117     struct ctl_node {   struct rb_node node;   struct ctl_table_header *header; } ;   122     struct __anonstruct____missing_field_name_96 {   struct ctl_table *ctl_table;   int used;   int count;   int nreg; } ;   122     union __anonunion____missing_field_name_95 {   struct __anonstruct____missing_field_name_96 __annonCompField21;   struct callback_head rcu; } ;   122     struct ctl_table_set ;   122     struct ctl_table_header {   union __anonunion____missing_field_name_95 __annonCompField22;   struct completion *unregistering;   struct ctl_table *ctl_table_arg;   struct ctl_table_root *root;   struct ctl_table_set *set;   struct ctl_dir *parent;   struct ctl_node *node; } ;   143     struct ctl_dir {   struct ctl_table_header header;   struct rb_root root; } ;   149     struct ctl_table_set {   int (*is_seen)(struct ctl_table_set *);   struct ctl_dir dir; } ;   154     struct ctl_table_root {   struct ctl_table_set default_set;   struct ctl_table_set * (*lookup)(struct ctl_table_root *, struct nsproxy *);   int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ;   261     struct workqueue_struct ;   262     struct work_struct ;    54     struct work_struct {   atomic_long_t data;   struct list_head entry;   void (*func)(struct work_struct *);   struct lockdep_map lockdep_map; } ;   107     struct delayed_work {   struct work_struct work;   struct timer_list timer;   struct workqueue_struct *wq;   int cpu; } ;   268     struct notifier_block ;    53     struct notifier_block {   int (*notifier_call)(struct notifier_block *, unsigned long, void *);   struct notifier_block *next;   int priority; } ;    58     struct pm_message {   int event; } ;    64     typedef struct pm_message pm_message_t;    65     struct dev_pm_ops {   int (*prepare)(struct device *);   void (*complete)(struct device *);   int (*suspend)(struct device *);   int (*resume)(struct device *);   int (*freeze)(struct device *);   int (*thaw)(struct device *);   int (*poweroff)(struct device *);   int (*restore)(struct device *);   int (*suspend_late)(struct device *);   int (*resume_early)(struct device *);   int (*freeze_late)(struct device *);   int (*thaw_early)(struct device *);   int (*poweroff_late)(struct device *);   int (*restore_early)(struct device *);   int (*suspend_noirq)(struct device *);   int (*resume_noirq)(struct device *);   int (*freeze_noirq)(struct device *);   int (*thaw_noirq)(struct device *);   int (*poweroff_noirq)(struct device *);   int (*restore_noirq)(struct device *);   int (*runtime_suspend)(struct device *);   int (*runtime_resume)(struct device *);   int (*runtime_idle)(struct device *); } ;   320     enum rpm_status {   RPM_ACTIVE = 0,   RPM_RESUMING = 1,   RPM_SUSPENDED = 2,   RPM_SUSPENDING = 3 } ;   327     enum rpm_request {   RPM_REQ_NONE = 0,   RPM_REQ_IDLE = 1,   RPM_REQ_SUSPEND = 2,   RPM_REQ_AUTOSUSPEND = 3,   RPM_REQ_RESUME = 4 } ;   335     struct wakeup_source ;   336     struct wake_irq ;   337     struct pm_domain_data ;   338     struct pm_subsys_data {   spinlock_t lock;   unsigned int refcount;   struct list_head clock_list;   struct pm_domain_data *domain_data; } ;   556     struct dev_pm_qos ;   556     struct dev_pm_info {   pm_message_t power_state;   unsigned char can_wakeup;   unsigned char async_suspend;   bool is_prepared;   bool is_suspended;   bool is_noirq_suspended;   bool is_late_suspended;   bool early_init;   bool direct_complete;   spinlock_t lock;   struct list_head entry;   struct completion completion;   struct wakeup_source *wakeup;   bool wakeup_path;   bool syscore;   bool no_pm_callbacks;   struct timer_list suspend_timer;   unsigned long timer_expires;   struct work_struct work;   wait_queue_head_t wait_queue;   struct wake_irq *wakeirq;   atomic_t usage_count;   atomic_t child_count;   unsigned char disable_depth;   unsigned char idle_notification;   unsigned char request_pending;   unsigned char deferred_resume;   unsigned char run_wake;   unsigned char runtime_auto;   bool ignore_children;   unsigned char no_callbacks;   unsigned char irq_safe;   unsigned char use_autosuspend;   unsigned char timer_autosuspends;   unsigned char memalloc_noio;   enum rpm_request request;   enum rpm_status runtime_status;   int runtime_error;   int autosuspend_delay;   unsigned long last_busy;   unsigned long active_jiffies;   unsigned long suspended_jiffies;   unsigned long accounting_timestamp;   struct pm_subsys_data *subsys_data;   void (*set_latency_tolerance)(struct device *, s32 );   struct dev_pm_qos *qos; } ;   616     struct dev_pm_domain {   struct dev_pm_ops ops;   void (*detach)(struct device *, bool );   int (*activate)(struct device *);   void (*sync)(struct device *);   void (*dismiss)(struct device *); } ;    26     struct ldt_struct ;    26     struct vdso_image ;    26     struct __anonstruct_mm_context_t_161 {   struct ldt_struct *ldt;   unsigned short ia32_compat;   struct mutex lock;   void *vdso;   const struct vdso_image *vdso_image;   atomic_t perf_rdpmc_allowed; } ;    26     typedef struct __anonstruct_mm_context_t_161 mm_context_t;    22     struct bio_vec ;  1238     struct llist_node ;    64     struct llist_node {   struct llist_node *next; } ;    37     struct cred ;    19     struct inode ;    58     struct arch_uprobe_task {   unsigned long saved_scratch_register;   unsigned int saved_trap_nr;   unsigned int saved_tf; } ;    66     enum uprobe_task_state {   UTASK_RUNNING = 0,   UTASK_SSTEP = 1,   UTASK_SSTEP_ACK = 2,   UTASK_SSTEP_TRAPPED = 3 } ;    73     struct __anonstruct____missing_field_name_197 {   struct arch_uprobe_task autask;   unsigned long vaddr; } ;    73     struct __anonstruct____missing_field_name_198 {   struct callback_head dup_xol_work;   unsigned long dup_xol_addr; } ;    73     union __anonunion____missing_field_name_196 {   struct __anonstruct____missing_field_name_197 __annonCompField35;   struct __anonstruct____missing_field_name_198 __annonCompField36; } ;    73     struct uprobe ;    73     struct return_instance ;    73     struct uprobe_task {   enum uprobe_task_state state;   union __anonunion____missing_field_name_196 __annonCompField37;   struct uprobe *active_uprobe;   unsigned long xol_vaddr;   struct return_instance *return_instances;   unsigned int depth; } ;    94     struct return_instance {   struct uprobe *uprobe;   unsigned long func;   unsigned long stack;   unsigned long orig_ret_vaddr;   bool chained;   struct return_instance *next; } ;   110     struct xol_area ;   111     struct uprobes_state {   struct xol_area *xol_area; } ;   150     struct address_space ;   151     struct mem_cgroup ;   152     union __anonunion____missing_field_name_199 {   struct address_space *mapping;   void *s_mem;   atomic_t compound_mapcount; } ;   152     union __anonunion____missing_field_name_201 {   unsigned long index;   void *freelist; } ;   152     struct __anonstruct____missing_field_name_205 {   unsigned short inuse;   unsigned short objects;   unsigned char frozen; } ;   152     union __anonunion____missing_field_name_204 {   atomic_t _mapcount;   struct __anonstruct____missing_field_name_205 __annonCompField40;   int units; } ;   152     struct __anonstruct____missing_field_name_203 {   union __anonunion____missing_field_name_204 __annonCompField41;   atomic_t _refcount; } ;   152     union __anonunion____missing_field_name_202 {   unsigned long counters;   struct __anonstruct____missing_field_name_203 __annonCompField42;   unsigned int active; } ;   152     struct __anonstruct____missing_field_name_200 {   union __anonunion____missing_field_name_201 __annonCompField39;   union __anonunion____missing_field_name_202 __annonCompField43; } ;   152     struct dev_pagemap ;   152     struct __anonstruct____missing_field_name_207 {   struct page *next;   int pages;   int pobjects; } ;   152     struct __anonstruct____missing_field_name_208 {   unsigned long compound_head;   unsigned int compound_dtor;   unsigned int compound_order; } ;   152     struct __anonstruct____missing_field_name_209 {   unsigned long __pad;   pgtable_t pmd_huge_pte; } ;   152     union __anonunion____missing_field_name_206 {   struct list_head lru;   struct dev_pagemap *pgmap;   struct __anonstruct____missing_field_name_207 __annonCompField45;   struct callback_head callback_head;   struct __anonstruct____missing_field_name_208 __annonCompField46;   struct __anonstruct____missing_field_name_209 __annonCompField47; } ;   152     struct kmem_cache ;   152     union __anonunion____missing_field_name_210 {   unsigned long private;   spinlock_t *ptl;   struct kmem_cache *slab_cache; } ;   152     struct page {   unsigned long flags;   union __anonunion____missing_field_name_199 __annonCompField38;   struct __anonstruct____missing_field_name_200 __annonCompField44;   union __anonunion____missing_field_name_206 __annonCompField48;   union __anonunion____missing_field_name_210 __annonCompField49;   struct mem_cgroup *mem_cgroup; } ;   196     struct page_frag {   struct page *page;   __u32 offset;   __u32 size; } ;   281     struct userfaultfd_ctx ;   281     struct vm_userfaultfd_ctx {   struct userfaultfd_ctx *ctx; } ;   288     struct __anonstruct_shared_211 {   struct rb_node rb;   unsigned long rb_subtree_last; } ;   288     struct anon_vma ;   288     struct vm_operations_struct ;   288     struct mempolicy ;   288     struct vm_area_struct {   unsigned long vm_start;   unsigned long vm_end;   struct vm_area_struct *vm_next;   struct vm_area_struct *vm_prev;   struct rb_node vm_rb;   unsigned long rb_subtree_gap;   struct mm_struct *vm_mm;   pgprot_t vm_page_prot;   unsigned long vm_flags;   struct __anonstruct_shared_211 shared;   struct list_head anon_vma_chain;   struct anon_vma *anon_vma;   const struct vm_operations_struct *vm_ops;   unsigned long vm_pgoff;   struct file *vm_file;   void *vm_private_data;   struct mempolicy *vm_policy;   struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;   361     struct core_thread {   struct task_struct *task;   struct core_thread *next; } ;   366     struct core_state {   atomic_t nr_threads;   struct core_thread dumper;   struct completion startup; } ;   380     struct task_rss_stat {   int events;   int count[4U]; } ;   388     struct mm_rss_stat {   atomic_long_t count[4U]; } ;   393     struct kioctx_table ;   394     struct linux_binfmt ;   394     struct mmu_notifier_mm ;   394     struct mm_struct {   struct vm_area_struct *mmap;   struct rb_root mm_rb;   u32 vmacache_seqnum;   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   unsigned long mmap_base;   unsigned long mmap_legacy_base;   unsigned long task_size;   unsigned long highest_vm_end;   pgd_t *pgd;   atomic_t mm_users;   atomic_t mm_count;   atomic_long_t nr_ptes;   atomic_long_t nr_pmds;   int map_count;   spinlock_t page_table_lock;   struct rw_semaphore mmap_sem;   struct list_head mmlist;   unsigned long hiwater_rss;   unsigned long hiwater_vm;   unsigned long total_vm;   unsigned long locked_vm;   unsigned long pinned_vm;   unsigned long data_vm;   unsigned long exec_vm;   unsigned long stack_vm;   unsigned long def_flags;   unsigned long start_code;   unsigned long end_code;   unsigned long start_data;   unsigned long end_data;   unsigned long start_brk;   unsigned long brk;   unsigned long start_stack;   unsigned long arg_start;   unsigned long arg_end;   unsigned long env_start;   unsigned long env_end;   unsigned long saved_auxv[46U];   struct mm_rss_stat rss_stat;   struct linux_binfmt *binfmt;   cpumask_var_t cpu_vm_mask_var;   mm_context_t context;   unsigned long flags;   struct core_state *core_state;   spinlock_t ioctx_lock;   struct kioctx_table *ioctx_table;   struct task_struct *owner;   struct file *exe_file;   struct mmu_notifier_mm *mmu_notifier_mm;   struct cpumask cpumask_allocation;   unsigned long numa_next_scan;   unsigned long numa_scan_offset;   int numa_scan_seq;   bool tlb_flush_pending;   struct uprobes_state uprobes_state;   void *bd_addr;   atomic_long_t hugetlb_usage;   struct work_struct async_put_work; } ;   564     struct vm_fault ;   615     struct vdso_image {   void *data;   unsigned long size;   unsigned long alt;   unsigned long alt_len;   long sym_vvar_start;   long sym_vvar_page;   long sym_hpet_page;   long sym_pvclock_page;   long sym_VDSO32_NOTE_MASK;   long sym___kernel_sigreturn;   long sym___kernel_rt_sigreturn;   long sym___kernel_vsyscall;   long sym_int80_landing_pad; } ;    15     typedef __u64 Elf64_Addr;    16     typedef __u16 Elf64_Half;    18     typedef __u64 Elf64_Off;    20     typedef __u32 Elf64_Word;    21     typedef __u64 Elf64_Xword;   190     struct elf64_sym {   Elf64_Word st_name;   unsigned char st_info;   unsigned char st_other;   Elf64_Half st_shndx;   Elf64_Addr st_value;   Elf64_Xword st_size; } ;   198     typedef struct elf64_sym Elf64_Sym;   219     struct elf64_hdr {   unsigned char e_ident[16U];   Elf64_Half e_type;   Elf64_Half e_machine;   Elf64_Word e_version;   Elf64_Addr e_entry;   Elf64_Off e_phoff;   Elf64_Off e_shoff;   Elf64_Word e_flags;   Elf64_Half e_ehsize;   Elf64_Half e_phentsize;   Elf64_Half e_phnum;   Elf64_Half e_shentsize;   Elf64_Half e_shnum;   Elf64_Half e_shstrndx; } ;   235     typedef struct elf64_hdr Elf64_Ehdr;   313     struct elf64_shdr {   Elf64_Word sh_name;   Elf64_Word sh_type;   Elf64_Xword sh_flags;   Elf64_Addr sh_addr;   Elf64_Off sh_offset;   Elf64_Xword sh_size;   Elf64_Word sh_link;   Elf64_Word sh_info;   Elf64_Xword sh_addralign;   Elf64_Xword sh_entsize; } ;   325     typedef struct elf64_shdr Elf64_Shdr;    53     union __anonunion____missing_field_name_216 {   unsigned long bitmap[4U];   struct callback_head callback_head; } ;    53     struct idr_layer {   int prefix;   int layer;   struct idr_layer *ary[256U];   int count;   union __anonunion____missing_field_name_216 __annonCompField50; } ;    41     struct idr {   struct idr_layer *hint;   struct idr_layer *top;   int layers;   int cur;   spinlock_t lock;   int id_free_cnt;   struct idr_layer *id_free; } ;   124     struct ida_bitmap {   long nr_busy;   unsigned long bitmap[15U]; } ;   167     struct ida {   struct idr idr;   struct ida_bitmap *free_bitmap; } ;   199     struct dentry ;   200     struct iattr ;   201     struct super_block ;   202     struct file_system_type ;   203     struct kernfs_open_node ;   204     struct kernfs_iattrs ;   227     struct kernfs_root ;   227     struct kernfs_elem_dir {   unsigned long subdirs;   struct rb_root children;   struct kernfs_root *root; } ;    85     struct kernfs_node ;    85     struct kernfs_elem_symlink {   struct kernfs_node *target_kn; } ;    89     struct kernfs_ops ;    89     struct kernfs_elem_attr {   const struct kernfs_ops *ops;   struct kernfs_open_node *open;   loff_t size;   struct kernfs_node *notify_next; } ;    96     union __anonunion____missing_field_name_221 {   struct kernfs_elem_dir dir;   struct kernfs_elem_symlink symlink;   struct kernfs_elem_attr attr; } ;    96     struct kernfs_node {   atomic_t count;   atomic_t active;   struct lockdep_map dep_map;   struct kernfs_node *parent;   const char *name;   struct rb_node rb;   const void *ns;   unsigned int hash;   union __anonunion____missing_field_name_221 __annonCompField51;   void *priv;   unsigned short flags;   umode_t mode;   unsigned int ino;   struct kernfs_iattrs *iattr; } ;   138     struct kernfs_syscall_ops {   int (*remount_fs)(struct kernfs_root *, int *, char *);   int (*show_options)(struct seq_file *, struct kernfs_root *);   int (*mkdir)(struct kernfs_node *, const char *, umode_t );   int (*rmdir)(struct kernfs_node *);   int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *);   int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ;   157     struct kernfs_root {   struct kernfs_node *kn;   unsigned int flags;   struct ida ino_ida;   struct kernfs_syscall_ops *syscall_ops;   struct list_head supers;   wait_queue_head_t deactivate_waitq; } ;   173     struct kernfs_open_file {   struct kernfs_node *kn;   struct file *file;   void *priv;   struct mutex mutex;   struct mutex prealloc_mutex;   int event;   struct list_head list;   char *prealloc_buf;   size_t atomic_write_len;   bool mmapped;   const struct vm_operations_struct *vm_ops; } ;   191     struct kernfs_ops {   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   ssize_t  (*read)(struct kernfs_open_file *, char *, size_t , loff_t );   size_t atomic_write_len;   bool prealloc;   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *);   struct lock_class_key lockdep_key; } ;   499     struct sock ;   500     struct kobject ;   501     enum kobj_ns_type {   KOBJ_NS_TYPE_NONE = 0,   KOBJ_NS_TYPE_NET = 1,   KOBJ_NS_TYPES = 2 } ;   507     struct kobj_ns_type_operations {   enum kobj_ns_type type;   bool  (*current_may_mount)();   void * (*grab_current_ns)();   const void * (*netlink_ns)(struct sock *);   const void * (*initial_ns)();   void (*drop_ns)(void *); } ;    59     struct bin_attribute ;    60     struct attribute {   const char *name;   umode_t mode;   bool ignore_lockdep;   struct lock_class_key *key;   struct lock_class_key skey; } ;    37     struct attribute_group {   const char *name;   umode_t  (*is_visible)(struct kobject *, struct attribute *, int);   umode_t  (*is_bin_visible)(struct kobject *, struct bin_attribute *, int);   struct attribute **attrs;   struct bin_attribute **bin_attrs; } ;    92     struct bin_attribute {   struct attribute attr;   size_t size;   void *private;   ssize_t  (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   ssize_t  (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;   165     struct sysfs_ops {   ssize_t  (*show)(struct kobject *, struct attribute *, char *);   ssize_t  (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;   530     struct kref {   atomic_t refcount; } ;    52     struct kset ;    52     struct kobj_type ;    52     struct kobject {   const char *name;   struct list_head entry;   struct kobject *parent;   struct kset *kset;   struct kobj_type *ktype;   struct kernfs_node *sd;   struct kref kref;   struct delayed_work release;   unsigned char state_initialized;   unsigned char state_in_sysfs;   unsigned char state_add_uevent_sent;   unsigned char state_remove_uevent_sent;   unsigned char uevent_suppress; } ;   115     struct kobj_type {   void (*release)(struct kobject *);   const struct sysfs_ops *sysfs_ops;   struct attribute **default_attrs;   const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *);   const void * (*namespace)(struct kobject *); } ;   123     struct kobj_uevent_env {   char *argv[3U];   char *envp[32U];   int envp_idx;   char buf[2048U];   int buflen; } ;   131     struct kset_uevent_ops {   const int (*filter)(struct kset *, struct kobject *);   const const char * (*name)(struct kset *, struct kobject *);   const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;   148     struct kset {   struct list_head list;   spinlock_t list_lock;   struct kobject kobj;   const struct kset_uevent_ops *uevent_ops; } ;   223     struct kernel_param ;   228     struct kernel_param_ops {   unsigned int flags;   int (*set)(const char *, const struct kernel_param *);   int (*get)(char *, const struct kernel_param *);   void (*free)(void *); } ;    62     struct kparam_string ;    62     struct kparam_array ;    62     union __anonunion____missing_field_name_224 {   void *arg;   const struct kparam_string *str;   const struct kparam_array *arr; } ;    62     struct kernel_param {   const char *name;   struct module *mod;   const struct kernel_param_ops *ops;   const u16 perm;   s8 level;   u8 flags;   union __anonunion____missing_field_name_224 __annonCompField52; } ;    83     struct kparam_string {   unsigned int maxlen;   char *string; } ;    89     struct kparam_array {   unsigned int max;   unsigned int elemsize;   unsigned int *num;   const struct kernel_param_ops *ops;   void *elem; } ;   470     struct latch_tree_node {   struct rb_node node[2U]; } ;   211     struct mod_arch_specific { } ;    38     struct module_param_attrs ;    38     struct module_kobject {   struct kobject kobj;   struct module *mod;   struct kobject *drivers_dir;   struct module_param_attrs *mp;   struct completion *kobj_completion; } ;    48     struct module_attribute {   struct attribute attr;   ssize_t  (*show)(struct module_attribute *, struct module_kobject *, char *);   ssize_t  (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t );   void (*setup)(struct module *, const char *);   int (*test)(struct module *);   void (*free)(struct module *); } ;    74     struct exception_table_entry ;   290     enum module_state {   MODULE_STATE_LIVE = 0,   MODULE_STATE_COMING = 1,   MODULE_STATE_GOING = 2,   MODULE_STATE_UNFORMED = 3 } ;   297     struct mod_tree_node {   struct module *mod;   struct latch_tree_node node; } ;   304     struct module_layout {   void *base;   unsigned int size;   unsigned int text_size;   unsigned int ro_size;   struct mod_tree_node mtn; } ;   318     struct mod_kallsyms {   Elf64_Sym *symtab;   unsigned int num_symtab;   char *strtab; } ;   332     struct klp_modinfo {   Elf64_Ehdr hdr;   Elf64_Shdr *sechdrs;   char *secstrings;   unsigned int symndx; } ;   340     struct module_sect_attrs ;   340     struct module_notes_attrs ;   340     struct trace_event_call ;   340     struct trace_enum_map ;   340     struct module {   enum module_state state;   struct list_head list;   char name[56U];   struct module_kobject mkobj;   struct module_attribute *modinfo_attrs;   const char *version;   const char *srcversion;   struct kobject *holders_dir;   const struct kernel_symbol *syms;   const unsigned long *crcs;   unsigned int num_syms;   struct mutex param_lock;   struct kernel_param *kp;   unsigned int num_kp;   unsigned int num_gpl_syms;   const struct kernel_symbol *gpl_syms;   const unsigned long *gpl_crcs;   const struct kernel_symbol *unused_syms;   const unsigned long *unused_crcs;   unsigned int num_unused_syms;   unsigned int num_unused_gpl_syms;   const struct kernel_symbol *unused_gpl_syms;   const unsigned long *unused_gpl_crcs;   bool sig_ok;   bool async_probe_requested;   const struct kernel_symbol *gpl_future_syms;   const unsigned long *gpl_future_crcs;   unsigned int num_gpl_future_syms;   unsigned int num_exentries;   struct exception_table_entry *extable;   int (*init)();   struct module_layout core_layout;   struct module_layout init_layout;   struct mod_arch_specific arch;   unsigned int taints;   unsigned int num_bugs;   struct list_head bug_list;   struct bug_entry *bug_table;   struct mod_kallsyms *kallsyms;   struct mod_kallsyms core_kallsyms;   struct module_sect_attrs *sect_attrs;   struct module_notes_attrs *notes_attrs;   char *args;   void *percpu;   unsigned int percpu_size;   unsigned int num_tracepoints;   const struct tracepoint **tracepoints_ptrs;   unsigned int num_trace_bprintk_fmt;   const char **trace_bprintk_fmt_start;   struct trace_event_call **trace_events;   unsigned int num_trace_events;   struct trace_enum_map **trace_enums;   unsigned int num_trace_enums;   unsigned int num_ftrace_callsites;   unsigned long *ftrace_callsites;   bool klp;   bool klp_alive;   struct klp_modinfo *klp_info;   struct list_head source_list;   struct list_head target_list;   void (*exit)();   atomic_t refcnt;   ctor_fn_t  (**ctors)();   unsigned int num_ctors; } ;    22     struct kernel_cap_struct {   __u32 cap[2U]; } ;    25     typedef struct kernel_cap_struct kernel_cap_t;    84     struct plist_node {   int prio;   struct list_head prio_list;   struct list_head node_list; } ;     4     typedef unsigned long cputime_t;    25     struct sem_undo_list ;    25     struct sysv_sem {   struct sem_undo_list *undo_list; } ;    78     struct user_struct ;    26     struct sysv_shm {   struct list_head shm_clist; } ;    24     struct __anonstruct_sigset_t_232 {   unsigned long sig[1U]; } ;    24     typedef struct __anonstruct_sigset_t_232 sigset_t;    25     struct siginfo ;    17     typedef void __signalfn_t(int);    18     typedef __signalfn_t *__sighandler_t;    20     typedef void __restorefn_t();    21     typedef __restorefn_t *__sigrestore_t;    34     union sigval {   int sival_int;   void *sival_ptr; } ;    10     typedef union sigval sigval_t;    11     struct __anonstruct__kill_234 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid; } ;    11     struct __anonstruct__timer_235 {   __kernel_timer_t _tid;   int _overrun;   char _pad[0U];   sigval_t _sigval;   int _sys_private; } ;    11     struct __anonstruct__rt_236 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   sigval_t _sigval; } ;    11     struct __anonstruct__sigchld_237 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   int _status;   __kernel_clock_t _utime;   __kernel_clock_t _stime; } ;    11     struct __anonstruct__addr_bnd_240 {   void *_lower;   void *_upper; } ;    11     union __anonunion____missing_field_name_239 {   struct __anonstruct__addr_bnd_240 _addr_bnd;   __u32 _pkey; } ;    11     struct __anonstruct__sigfault_238 {   void *_addr;   short _addr_lsb;   union __anonunion____missing_field_name_239 __annonCompField53; } ;    11     struct __anonstruct__sigpoll_241 {   long _band;   int _fd; } ;    11     struct __anonstruct__sigsys_242 {   void *_call_addr;   int _syscall;   unsigned int _arch; } ;    11     union __anonunion__sifields_233 {   int _pad[28U];   struct __anonstruct__kill_234 _kill;   struct __anonstruct__timer_235 _timer;   struct __anonstruct__rt_236 _rt;   struct __anonstruct__sigchld_237 _sigchld;   struct __anonstruct__sigfault_238 _sigfault;   struct __anonstruct__sigpoll_241 _sigpoll;   struct __anonstruct__sigsys_242 _sigsys; } ;    11     struct siginfo {   int si_signo;   int si_errno;   int si_code;   union __anonunion__sifields_233 _sifields; } ;   118     typedef struct siginfo siginfo_t;    22     struct sigpending {   struct list_head list;   sigset_t signal; } ;   257     struct sigaction {   __sighandler_t sa_handler;   unsigned long sa_flags;   __sigrestore_t sa_restorer;   sigset_t sa_mask; } ;   271     struct k_sigaction {   struct sigaction sa; } ;   457     enum pid_type {   PIDTYPE_PID = 0,   PIDTYPE_PGID = 1,   PIDTYPE_SID = 2,   PIDTYPE_MAX = 3 } ;   464     struct pid_namespace ;   464     struct upid {   int nr;   struct pid_namespace *ns;   struct hlist_node pid_chain; } ;    56     struct pid {   atomic_t count;   unsigned int level;   struct hlist_head tasks[3U];   struct callback_head rcu;   struct upid numbers[1U]; } ;    68     struct pid_link {   struct hlist_node node;   struct pid *pid; } ;    43     struct seccomp_filter ;    44     struct seccomp {   int mode;   struct seccomp_filter *filter; } ;    40     struct rt_mutex_waiter ;    41     struct rlimit {   __kernel_ulong_t rlim_cur;   __kernel_ulong_t rlim_max; } ;    11     struct timerqueue_node {   struct rb_node node;   ktime_t expires; } ;    12     struct timerqueue_head {   struct rb_root head;   struct timerqueue_node *next; } ;    50     struct hrtimer_clock_base ;    51     struct hrtimer_cpu_base ;    60     enum hrtimer_restart {   HRTIMER_NORESTART = 0,   HRTIMER_RESTART = 1 } ;    65     struct hrtimer {   struct timerqueue_node node;   ktime_t _softexpires;   enum hrtimer_restart  (*function)(struct hrtimer *);   struct hrtimer_clock_base *base;   u8 state;   u8 is_rel;   int start_pid;   void *start_site;   char start_comm[16U]; } ;   125     struct hrtimer_clock_base {   struct hrtimer_cpu_base *cpu_base;   int index;   clockid_t clockid;   struct timerqueue_head active;   ktime_t  (*get_time)();   ktime_t offset; } ;   158     struct hrtimer_cpu_base {   raw_spinlock_t lock;   seqcount_t seq;   struct hrtimer *running;   unsigned int cpu;   unsigned int active_bases;   unsigned int clock_was_set_seq;   bool migration_enabled;   bool nohz_active;   unsigned char in_hrtirq;   unsigned char hres_active;   unsigned char hang_detected;   ktime_t expires_next;   struct hrtimer *next_timer;   unsigned int nr_events;   unsigned int nr_retries;   unsigned int nr_hangs;   unsigned int max_hang_time;   struct hrtimer_clock_base clock_base[4U]; } ;    12     enum kcov_mode {   KCOV_MODE_DISABLED = 0,   KCOV_MODE_TRACE = 1 } ;    17     struct task_io_accounting {   u64 rchar;   u64 wchar;   u64 syscr;   u64 syscw;   u64 read_bytes;   u64 write_bytes;   u64 cancelled_write_bytes; } ;    45     struct latency_record {   unsigned long backtrace[12U];   unsigned int count;   unsigned long time;   unsigned long max; } ;    41     struct assoc_array_ptr ;    41     struct assoc_array {   struct assoc_array_ptr *root;   unsigned long nr_leaves_on_tree; } ;    31     typedef int32_t key_serial_t;    34     typedef uint32_t key_perm_t;    35     struct key ;    36     struct signal_struct ;    37     struct key_type ;    41     struct keyring_index_key {   struct key_type *type;   const char *description;   size_t desc_len; } ;    91     union key_payload {   void *rcu_data0;   void *data[4U]; } ;   128     union __anonunion____missing_field_name_263 {   struct list_head graveyard_link;   struct rb_node serial_node; } ;   128     struct key_user ;   128     union __anonunion____missing_field_name_264 {   time_t expiry;   time_t revoked_at; } ;   128     struct __anonstruct____missing_field_name_266 {   struct key_type *type;   char *description; } ;   128     union __anonunion____missing_field_name_265 {   struct keyring_index_key index_key;   struct __anonstruct____missing_field_name_266 __annonCompField56; } ;   128     struct __anonstruct____missing_field_name_268 {   struct list_head name_link;   struct assoc_array keys; } ;   128     union __anonunion____missing_field_name_267 {   union key_payload payload;   struct __anonstruct____missing_field_name_268 __annonCompField58;   int reject_error; } ;   128     struct key {   atomic_t usage;   key_serial_t serial;   union __anonunion____missing_field_name_263 __annonCompField54;   struct rw_semaphore sem;   struct key_user *user;   void *security;   union __anonunion____missing_field_name_264 __annonCompField55;   time_t last_used_at;   kuid_t uid;   kgid_t gid;   key_perm_t perm;   unsigned short quotalen;   unsigned short datalen;   unsigned long flags;   union __anonunion____missing_field_name_265 __annonCompField57;   union __anonunion____missing_field_name_267 __annonCompField59;   int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ;   377     struct audit_context ;    27     struct group_info {   atomic_t usage;   int ngroups;   int nblocks;   kgid_t small_block[32U];   kgid_t *blocks[0U]; } ;    90     struct cred {   atomic_t usage;   atomic_t subscribers;   void *put_addr;   unsigned int magic;   kuid_t uid;   kgid_t gid;   kuid_t suid;   kgid_t sgid;   kuid_t euid;   kgid_t egid;   kuid_t fsuid;   kgid_t fsgid;   unsigned int securebits;   kernel_cap_t cap_inheritable;   kernel_cap_t cap_permitted;   kernel_cap_t cap_effective;   kernel_cap_t cap_bset;   kernel_cap_t cap_ambient;   unsigned char jit_keyring;   struct key *session_keyring;   struct key *process_keyring;   struct key *thread_keyring;   struct key *request_key_auth;   void *security;   struct user_struct *user;   struct user_namespace *user_ns;   struct group_info *group_info;   struct callback_head rcu; } ;   377     struct percpu_ref ;    55     typedef void percpu_ref_func_t(struct percpu_ref *);    68     struct percpu_ref {   atomic_long_t count;   unsigned long percpu_count_ptr;   percpu_ref_func_t *release;   percpu_ref_func_t *confirm_switch;   bool force_atomic;   struct callback_head rcu; } ;   327     enum rcu_sync_type {   RCU_SYNC = 0,   RCU_SCHED_SYNC = 1,   RCU_BH_SYNC = 2 } ;   333     struct rcu_sync {   int gp_state;   int gp_count;   wait_queue_head_t gp_wait;   int cb_state;   struct callback_head cb_head;   enum rcu_sync_type gp_type; } ;    65     struct percpu_rw_semaphore {   struct rcu_sync rss;   unsigned int *fast_read_ctr;   struct rw_semaphore rw_sem;   atomic_t slow_read_ctr;   wait_queue_head_t write_waitq; } ;    54     struct cgroup ;    55     struct cgroup_root ;    56     struct cgroup_subsys ;    57     struct cgroup_taskset ;   101     struct cgroup_file {   struct kernfs_node *kn; } ;    90     struct cgroup_subsys_state {   struct cgroup *cgroup;   struct cgroup_subsys *ss;   struct percpu_ref refcnt;   struct cgroup_subsys_state *parent;   struct list_head sibling;   struct list_head children;   int id;   unsigned int flags;   u64 serial_nr;   atomic_t online_cnt;   struct callback_head callback_head;   struct work_struct destroy_work; } ;   141     struct css_set {   atomic_t refcount;   struct hlist_node hlist;   struct list_head tasks;   struct list_head mg_tasks;   struct list_head cgrp_links;   struct cgroup *dfl_cgrp;   struct cgroup_subsys_state *subsys[13U];   struct list_head mg_preload_node;   struct list_head mg_node;   struct cgroup *mg_src_cgrp;   struct cgroup *mg_dst_cgrp;   struct css_set *mg_dst_cset;   struct list_head e_cset_node[13U];   struct list_head task_iters;   bool dead;   struct callback_head callback_head; } ;   221     struct cgroup {   struct cgroup_subsys_state self;   unsigned long flags;   int id;   int level;   int populated_cnt;   struct kernfs_node *kn;   struct cgroup_file procs_file;   struct cgroup_file events_file;   u16 subtree_control;   u16 subtree_ss_mask;   u16 old_subtree_control;   u16 old_subtree_ss_mask;   struct cgroup_subsys_state *subsys[13U];   struct cgroup_root *root;   struct list_head cset_links;   struct list_head e_csets[13U];   struct list_head pidlists;   struct mutex pidlist_mutex;   wait_queue_head_t offline_waitq;   struct work_struct release_agent_work;   int ancestor_ids[]; } ;   306     struct cgroup_root {   struct kernfs_root *kf_root;   unsigned int subsys_mask;   int hierarchy_id;   struct cgroup cgrp;   int cgrp_ancestor_id_storage;   atomic_t nr_cgrps;   struct list_head root_list;   unsigned int flags;   struct idr cgroup_idr;   char release_agent_path[4096U];   char name[64U]; } ;   345     struct cftype {   char name[64U];   unsigned long private;   size_t max_write_len;   unsigned int flags;   unsigned int file_offset;   struct cgroup_subsys *ss;   struct list_head node;   struct kernfs_ops *kf_ops;   u64  (*read_u64)(struct cgroup_subsys_state *, struct cftype *);   s64  (*read_s64)(struct cgroup_subsys_state *, struct cftype *);   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 );   int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 );   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   struct lock_class_key lockdep_key; } ;   430     struct cgroup_subsys {   struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *);   int (*css_online)(struct cgroup_subsys_state *);   void (*css_offline)(struct cgroup_subsys_state *);   void (*css_released)(struct cgroup_subsys_state *);   void (*css_free)(struct cgroup_subsys_state *);   void (*css_reset)(struct cgroup_subsys_state *);   int (*can_attach)(struct cgroup_taskset *);   void (*cancel_attach)(struct cgroup_taskset *);   void (*attach)(struct cgroup_taskset *);   void (*post_attach)();   int (*can_fork)(struct task_struct *);   void (*cancel_fork)(struct task_struct *);   void (*fork)(struct task_struct *);   void (*exit)(struct task_struct *);   void (*free)(struct task_struct *);   void (*bind)(struct cgroup_subsys_state *);   bool early_init;   bool implicit_on_dfl;   bool broken_hierarchy;   bool warned_broken_hierarchy;   int id;   const char *name;   const char *legacy_name;   struct cgroup_root *root;   struct idr css_idr;   struct list_head cfts;   struct cftype *dfl_cftypes;   struct cftype *legacy_cftypes;   unsigned int depends_on; } ;   128     struct futex_pi_state ;   129     struct robust_list_head ;   130     struct bio_list ;   131     struct fs_struct ;   132     struct perf_event_context ;   133     struct blk_plug ;   135     struct nameidata ;   188     struct cfs_rq ;   189     struct task_group ;   492     struct sighand_struct {   atomic_t count;   struct k_sigaction action[64U];   spinlock_t siglock;   wait_queue_head_t signalfd_wqh; } ;   534     struct pacct_struct {   int ac_flag;   long ac_exitcode;   unsigned long ac_mem;   cputime_t ac_utime;   cputime_t ac_stime;   unsigned long ac_minflt;   unsigned long ac_majflt; } ;   542     struct cpu_itimer {   cputime_t expires;   cputime_t incr;   u32 error;   u32 incr_error; } ;   549     struct prev_cputime {   cputime_t utime;   cputime_t stime;   raw_spinlock_t lock; } ;   574     struct task_cputime {   cputime_t utime;   cputime_t stime;   unsigned long long sum_exec_runtime; } ;   590     struct task_cputime_atomic {   atomic64_t utime;   atomic64_t stime;   atomic64_t sum_exec_runtime; } ;   612     struct thread_group_cputimer {   struct task_cputime_atomic cputime_atomic;   bool running;   bool checking_timer; } ;   657     struct autogroup ;   658     struct tty_struct ;   658     struct taskstats ;   658     struct tty_audit_buf ;   658     struct signal_struct {   atomic_t sigcnt;   atomic_t live;   int nr_threads;   atomic_t oom_victims;   struct list_head thread_head;   wait_queue_head_t wait_chldexit;   struct task_struct *curr_target;   struct sigpending shared_pending;   int group_exit_code;   int notify_count;   struct task_struct *group_exit_task;   int group_stop_count;   unsigned int flags;   unsigned char is_child_subreaper;   unsigned char has_child_subreaper;   int posix_timer_id;   struct list_head posix_timers;   struct hrtimer real_timer;   struct pid *leader_pid;   ktime_t it_real_incr;   struct cpu_itimer it[2U];   struct thread_group_cputimer cputimer;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   struct pid *tty_old_pgrp;   int leader;   struct tty_struct *tty;   struct autogroup *autogroup;   seqlock_t stats_lock;   cputime_t utime;   cputime_t stime;   cputime_t cutime;   cputime_t cstime;   cputime_t gtime;   cputime_t cgtime;   struct prev_cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   unsigned long cnvcsw;   unsigned long cnivcsw;   unsigned long min_flt;   unsigned long maj_flt;   unsigned long cmin_flt;   unsigned long cmaj_flt;   unsigned long inblock;   unsigned long oublock;   unsigned long cinblock;   unsigned long coublock;   unsigned long maxrss;   unsigned long cmaxrss;   struct task_io_accounting ioac;   unsigned long long sum_sched_runtime;   struct rlimit rlim[16U];   struct pacct_struct pacct;   struct taskstats *stats;   unsigned int audit_tty;   struct tty_audit_buf *tty_audit_buf;   bool oom_flag_origin;   short oom_score_adj;   short oom_score_adj_min;   struct mutex cred_guard_mutex; } ;   833     struct user_struct {   atomic_t __count;   atomic_t processes;   atomic_t sigpending;   atomic_t inotify_watches;   atomic_t inotify_devs;   atomic_t fanotify_listeners;   atomic_long_t epoll_watches;   unsigned long mq_bytes;   unsigned long locked_shm;   unsigned long unix_inflight;   atomic_long_t pipe_bufs;   struct key *uid_keyring;   struct key *session_keyring;   struct hlist_node uidhash_node;   kuid_t uid;   atomic_long_t locked_vm; } ;   878     struct backing_dev_info ;   879     struct reclaim_state ;   880     struct sched_info {   unsigned long pcount;   unsigned long long run_delay;   unsigned long long last_arrival;   unsigned long long last_queued; } ;   894     struct task_delay_info {   spinlock_t lock;   unsigned int flags;   u64 blkio_start;   u64 blkio_delay;   u64 swapin_delay;   u32 blkio_count;   u32 swapin_count;   u64 freepages_start;   u64 freepages_delay;   u32 freepages_count; } ;   951     struct wake_q_node {   struct wake_q_node *next; } ;  1183     struct io_context ;  1217     struct pipe_inode_info ;  1218     struct uts_namespace ;  1219     struct load_weight {   unsigned long weight;   u32 inv_weight; } ;  1226     struct sched_avg {   u64 last_update_time;   u64 load_sum;   u32 util_sum;   u32 period_contrib;   unsigned long load_avg;   unsigned long util_avg; } ;  1284     struct sched_statistics {   u64 wait_start;   u64 wait_max;   u64 wait_count;   u64 wait_sum;   u64 iowait_count;   u64 iowait_sum;   u64 sleep_start;   u64 sleep_max;   s64 sum_sleep_runtime;   u64 block_start;   u64 block_max;   u64 exec_max;   u64 slice_max;   u64 nr_migrations_cold;   u64 nr_failed_migrations_affine;   u64 nr_failed_migrations_running;   u64 nr_failed_migrations_hot;   u64 nr_forced_migrations;   u64 nr_wakeups;   u64 nr_wakeups_sync;   u64 nr_wakeups_migrate;   u64 nr_wakeups_local;   u64 nr_wakeups_remote;   u64 nr_wakeups_affine;   u64 nr_wakeups_affine_attempts;   u64 nr_wakeups_passive;   u64 nr_wakeups_idle; } ;  1319     struct sched_entity {   struct load_weight load;   struct rb_node run_node;   struct list_head group_node;   unsigned int on_rq;   u64 exec_start;   u64 sum_exec_runtime;   u64 vruntime;   u64 prev_sum_exec_runtime;   u64 nr_migrations;   struct sched_statistics statistics;   int depth;   struct sched_entity *parent;   struct cfs_rq *cfs_rq;   struct cfs_rq *my_q;   struct sched_avg avg; } ;  1356     struct rt_rq ;  1356     struct sched_rt_entity {   struct list_head run_list;   unsigned long timeout;   unsigned long watchdog_stamp;   unsigned int time_slice;   unsigned short on_rq;   unsigned short on_list;   struct sched_rt_entity *back;   struct sched_rt_entity *parent;   struct rt_rq *rt_rq;   struct rt_rq *my_q; } ;  1374     struct sched_dl_entity {   struct rb_node rb_node;   u64 dl_runtime;   u64 dl_deadline;   u64 dl_period;   u64 dl_bw;   s64 runtime;   u64 deadline;   unsigned int flags;   int dl_throttled;   int dl_boosted;   int dl_yielded;   struct hrtimer dl_timer; } ;  1438     struct tlbflush_unmap_batch {   struct cpumask cpumask;   bool flush_required;   bool writable; } ;  1457     struct sched_class ;  1457     struct files_struct ;  1457     struct compat_robust_list_head ;  1457     struct numa_group ;  1457     struct ftrace_ret_stack ;  1457     struct kcov ;  1457     struct task_struct {   volatile long state;   void *stack;   atomic_t usage;   unsigned int flags;   unsigned int ptrace;   struct llist_node wake_entry;   int on_cpu;   unsigned int wakee_flips;   unsigned long wakee_flip_decay_ts;   struct task_struct *last_wakee;   int wake_cpu;   int on_rq;   int prio;   int static_prio;   int normal_prio;   unsigned int rt_priority;   const struct sched_class *sched_class;   struct sched_entity se;   struct sched_rt_entity rt;   struct task_group *sched_task_group;   struct sched_dl_entity dl;   struct hlist_head preempt_notifiers;   unsigned int btrace_seq;   unsigned int policy;   int nr_cpus_allowed;   cpumask_t cpus_allowed;   unsigned long rcu_tasks_nvcsw;   bool rcu_tasks_holdout;   struct list_head rcu_tasks_holdout_list;   int rcu_tasks_idle_cpu;   struct sched_info sched_info;   struct list_head tasks;   struct plist_node pushable_tasks;   struct rb_node pushable_dl_tasks;   struct mm_struct *mm;   struct mm_struct *active_mm;   u32 vmacache_seqnum;   struct vm_area_struct *vmacache[4U];   struct task_rss_stat rss_stat;   int exit_state;   int exit_code;   int exit_signal;   int pdeath_signal;   unsigned long jobctl;   unsigned int personality;   unsigned char sched_reset_on_fork;   unsigned char sched_contributes_to_load;   unsigned char sched_migrated;   unsigned char sched_remote_wakeup;   unsigned char;   unsigned char in_execve;   unsigned char in_iowait;   unsigned char memcg_may_oom;   unsigned char memcg_kmem_skip_account;   unsigned char brk_randomized;   unsigned long atomic_flags;   struct restart_block restart_block;   pid_t pid;   pid_t tgid;   struct task_struct *real_parent;   struct task_struct *parent;   struct list_head children;   struct list_head sibling;   struct task_struct *group_leader;   struct list_head ptraced;   struct list_head ptrace_entry;   struct pid_link pids[3U];   struct list_head thread_group;   struct list_head thread_node;   struct completion *vfork_done;   int *set_child_tid;   int *clear_child_tid;   cputime_t utime;   cputime_t stime;   cputime_t utimescaled;   cputime_t stimescaled;   cputime_t gtime;   struct prev_cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   u64 start_time;   u64 real_start_time;   unsigned long min_flt;   unsigned long maj_flt;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   const struct cred *real_cred;   const struct cred *cred;   char comm[16U];   struct nameidata *nameidata;   struct sysv_sem sysvsem;   struct sysv_shm sysvshm;   unsigned long last_switch_count;   struct fs_struct *fs;   struct files_struct *files;   struct nsproxy *nsproxy;   struct signal_struct *signal;   struct sighand_struct *sighand;   sigset_t blocked;   sigset_t real_blocked;   sigset_t saved_sigmask;   struct sigpending pending;   unsigned long sas_ss_sp;   size_t sas_ss_size;   unsigned int sas_ss_flags;   struct callback_head *task_works;   struct audit_context *audit_context;   kuid_t loginuid;   unsigned int sessionid;   struct seccomp seccomp;   u32 parent_exec_id;   u32 self_exec_id;   spinlock_t alloc_lock;   raw_spinlock_t pi_lock;   struct wake_q_node wake_q;   struct rb_root pi_waiters;   struct rb_node *pi_waiters_leftmost;   struct rt_mutex_waiter *pi_blocked_on;   struct mutex_waiter *blocked_on;   unsigned int irq_events;   unsigned long hardirq_enable_ip;   unsigned long hardirq_disable_ip;   unsigned int hardirq_enable_event;   unsigned int hardirq_disable_event;   int hardirqs_enabled;   int hardirq_context;   unsigned long softirq_disable_ip;   unsigned long softirq_enable_ip;   unsigned int softirq_disable_event;   unsigned int softirq_enable_event;   int softirqs_enabled;   int softirq_context;   u64 curr_chain_key;   int lockdep_depth;   unsigned int lockdep_recursion;   struct held_lock held_locks[48U];   gfp_t lockdep_reclaim_gfp;   unsigned int in_ubsan;   void *journal_info;   struct bio_list *bio_list;   struct blk_plug *plug;   struct reclaim_state *reclaim_state;   struct backing_dev_info *backing_dev_info;   struct io_context *io_context;   unsigned long ptrace_message;   siginfo_t *last_siginfo;   struct task_io_accounting ioac;   u64 acct_rss_mem1;   u64 acct_vm_mem1;   cputime_t acct_timexpd;   nodemask_t mems_allowed;   seqcount_t mems_allowed_seq;   int cpuset_mem_spread_rotor;   int cpuset_slab_spread_rotor;   struct css_set *cgroups;   struct list_head cg_list;   struct robust_list_head *robust_list;   struct compat_robust_list_head *compat_robust_list;   struct list_head pi_state_list;   struct futex_pi_state *pi_state_cache;   struct perf_event_context *perf_event_ctxp[2U];   struct mutex perf_event_mutex;   struct list_head perf_event_list;   struct mempolicy *mempolicy;   short il_next;   short pref_node_fork;   int numa_scan_seq;   unsigned int numa_scan_period;   unsigned int numa_scan_period_max;   int numa_preferred_nid;   unsigned long numa_migrate_retry;   u64 node_stamp;   u64 last_task_numa_placement;   u64 last_sum_exec_runtime;   struct callback_head numa_work;   struct list_head numa_entry;   struct numa_group *numa_group;   unsigned long *numa_faults;   unsigned long total_numa_faults;   unsigned long numa_faults_locality[3U];   unsigned long numa_pages_migrated;   struct tlbflush_unmap_batch tlb_ubc;   struct callback_head rcu;   struct pipe_inode_info *splice_pipe;   struct page_frag task_frag;   struct task_delay_info *delays;   int make_it_fail;   int nr_dirtied;   int nr_dirtied_pause;   unsigned long dirty_paused_when;   int latency_record_count;   struct latency_record latency_record[32U];   u64 timer_slack_ns;   u64 default_timer_slack_ns;   unsigned int kasan_depth;   int curr_ret_stack;   struct ftrace_ret_stack *ret_stack;   unsigned long long ftrace_timestamp;   atomic_t trace_overrun;   atomic_t tracing_graph_pause;   unsigned long trace;   unsigned long trace_recursion;   enum kcov_mode kcov_mode;   unsigned int kcov_size;   void *kcov_area;   struct kcov *kcov;   struct mem_cgroup *memcg_in_oom;   gfp_t memcg_oom_gfp_mask;   int memcg_oom_order;   unsigned int memcg_nr_pages_over_high;   struct uprobe_task *utask;   unsigned int sequential_io;   unsigned int sequential_io_avg;   unsigned long task_state_change;   int pagefault_disabled;   struct task_struct *oom_reaper_list;   struct thread_struct thread; } ;   158     struct iovec {   void *iov_base;   __kernel_size_t iov_len; } ;    21     struct kvec {   void *iov_base;   size_t iov_len; } ;    27     union __anonunion____missing_field_name_293 {   const struct iovec *iov;   const struct kvec *kvec;   const struct bio_vec *bvec; } ;    27     struct iov_iter {   int type;   size_t iov_offset;   size_t count;   union __anonunion____missing_field_name_293 __annonCompField65;   unsigned long nr_segs; } ;    11     typedef unsigned short __kernel_sa_family_t;    23     typedef __kernel_sa_family_t sa_family_t;    24     struct sockaddr {   sa_family_t sa_family;   char sa_data[14U]; } ;    38     struct kiocb ;    54     struct poll_table_struct ;    55     struct net ;    72     struct fasync_struct ;    63     struct exception_table_entry {   int insn;   int fixup;   int handler; } ;   161     struct in6_addr ;   145     struct sk_buff ;   184     struct klist_node ;    37     struct klist_node {   void *n_klist;   struct list_head n_node;   struct kref n_ref; } ;    93     struct hlist_bl_node ;    93     struct hlist_bl_head {   struct hlist_bl_node *first; } ;    36     struct hlist_bl_node {   struct hlist_bl_node *next;   struct hlist_bl_node **pprev; } ;   114     struct __anonstruct____missing_field_name_303 {   spinlock_t lock;   int count; } ;   114     union __anonunion____missing_field_name_302 {   struct __anonstruct____missing_field_name_303 __annonCompField66; } ;   114     struct lockref {   union __anonunion____missing_field_name_302 __annonCompField67; } ;    75     struct path ;    76     struct vfsmount ;    77     struct __anonstruct____missing_field_name_305 {   u32 hash;   u32 len; } ;    77     union __anonunion____missing_field_name_304 {   struct __anonstruct____missing_field_name_305 __annonCompField68;   u64 hash_len; } ;    77     struct qstr {   union __anonunion____missing_field_name_304 __annonCompField69;   const unsigned char *name; } ;    65     struct dentry_operations ;    65     union __anonunion____missing_field_name_306 {   struct list_head d_lru;   wait_queue_head_t *d_wait; } ;    65     union __anonunion_d_u_307 {   struct hlist_node d_alias;   struct hlist_bl_node d_in_lookup_hash;   struct callback_head d_rcu; } ;    65     struct dentry {   unsigned int d_flags;   seqcount_t d_seq;   struct hlist_bl_node d_hash;   struct dentry *d_parent;   struct qstr d_name;   struct inode *d_inode;   unsigned char d_iname[32U];   struct lockref d_lockref;   const struct dentry_operations *d_op;   struct super_block *d_sb;   unsigned long d_time;   void *d_fsdata;   union __anonunion____missing_field_name_306 __annonCompField70;   struct list_head d_child;   struct list_head d_subdirs;   union __anonunion_d_u_307 d_u; } ;   121     struct dentry_operations {   int (*d_revalidate)(struct dentry *, unsigned int);   int (*d_weak_revalidate)(struct dentry *, unsigned int);   int (*d_hash)(const struct dentry *, struct qstr *);   int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *);   int (*d_delete)(const struct dentry *);   void (*d_release)(struct dentry *);   void (*d_prune)(struct dentry *);   void (*d_iput)(struct dentry *, struct inode *);   char * (*d_dname)(struct dentry *, char *, int);   struct vfsmount * (*d_automount)(struct path *);   int (*d_manage)(struct dentry *, bool );   struct inode * (*d_select_inode)(struct dentry *, unsigned int);   struct dentry * (*d_real)(struct dentry *, struct inode *); } ;   577     struct path {   struct vfsmount *mnt;   struct dentry *dentry; } ;    19     struct shrink_control {   gfp_t gfp_mask;   unsigned long nr_to_scan;   int nid;   struct mem_cgroup *memcg; } ;    27     struct shrinker {   unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *);   unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *);   int seeks;   long batch;   unsigned long flags;   struct list_head list;   atomic_long_t *nr_deferred; } ;    80     struct list_lru_one {   struct list_head list;   long nr_items; } ;    32     struct list_lru_memcg {   struct list_lru_one *lru[0U]; } ;    37     struct list_lru_node {   spinlock_t lock;   struct list_lru_one lru;   struct list_lru_memcg *memcg_lrus; } ;    47     struct list_lru {   struct list_lru_node *node;   struct list_head list; } ;    63     struct __anonstruct____missing_field_name_309 {   struct radix_tree_node *parent;   void *private_data; } ;    63     union __anonunion____missing_field_name_308 {   struct __anonstruct____missing_field_name_309 __annonCompField71;   struct callback_head callback_head; } ;    63     struct radix_tree_node {   unsigned char shift;   unsigned char offset;   unsigned int count;   union __anonunion____missing_field_name_308 __annonCompField72;   struct list_head private_list;   void *slots[64U];   unsigned long tags[3U][1U]; } ;   106     struct radix_tree_root {   gfp_t gfp_mask;   struct radix_tree_node *rnode; } ;    45     struct fiemap_extent {   __u64 fe_logical;   __u64 fe_physical;   __u64 fe_length;   __u64 fe_reserved64[2U];   __u32 fe_flags;   __u32 fe_reserved[3U]; } ;    38     enum migrate_mode {   MIGRATE_ASYNC = 0,   MIGRATE_SYNC_LIGHT = 1,   MIGRATE_SYNC = 2 } ;    47     struct block_device ;    19     struct bio_vec {   struct page *bv_page;   unsigned int bv_len;   unsigned int bv_offset; } ;   268     struct delayed_call {   void (*fn)(void *);   void *arg; } ;   261     struct bdi_writeback ;   262     struct export_operations ;   264     struct kstatfs ;   265     struct swap_info_struct ;   266     struct fscrypt_info ;   267     struct fscrypt_operations ;    76     struct iattr {   unsigned int ia_valid;   umode_t ia_mode;   kuid_t ia_uid;   kgid_t ia_gid;   loff_t ia_size;   struct timespec ia_atime;   struct timespec ia_mtime;   struct timespec ia_ctime;   struct file *ia_file; } ;   265     struct percpu_counter {   raw_spinlock_t lock;   s64 count;   struct list_head list;   s32 *counters; } ;   213     struct dquot ;   214     struct kqid ;    19     typedef __kernel_uid32_t projid_t;    23     struct __anonstruct_kprojid_t_315 {   projid_t val; } ;    23     typedef struct __anonstruct_kprojid_t_315 kprojid_t;   181     enum quota_type {   USRQUOTA = 0,   GRPQUOTA = 1,   PRJQUOTA = 2 } ;    66     typedef long long qsize_t;    67     union __anonunion____missing_field_name_316 {   kuid_t uid;   kgid_t gid;   kprojid_t projid; } ;    67     struct kqid {   union __anonunion____missing_field_name_316 __annonCompField74;   enum quota_type type; } ;   184     struct mem_dqblk {   qsize_t dqb_bhardlimit;   qsize_t dqb_bsoftlimit;   qsize_t dqb_curspace;   qsize_t dqb_rsvspace;   qsize_t dqb_ihardlimit;   qsize_t dqb_isoftlimit;   qsize_t dqb_curinodes;   time_t dqb_btime;   time_t dqb_itime; } ;   206     struct quota_format_type ;   207     struct mem_dqinfo {   struct quota_format_type *dqi_format;   int dqi_fmt_id;   struct list_head dqi_dirty_list;   unsigned long dqi_flags;   unsigned int dqi_bgrace;   unsigned int dqi_igrace;   qsize_t dqi_max_spc_limit;   qsize_t dqi_max_ino_limit;   void *dqi_priv; } ;   272     struct dquot {   struct hlist_node dq_hash;   struct list_head dq_inuse;   struct list_head dq_free;   struct list_head dq_dirty;   struct mutex dq_lock;   atomic_t dq_count;   wait_queue_head_t dq_wait_unused;   struct super_block *dq_sb;   struct kqid dq_id;   loff_t dq_off;   unsigned long dq_flags;   struct mem_dqblk dq_dqb; } ;   299     struct quota_format_ops {   int (*check_quota_file)(struct super_block *, int);   int (*read_file_info)(struct super_block *, int);   int (*write_file_info)(struct super_block *, int);   int (*free_file_info)(struct super_block *, int);   int (*read_dqblk)(struct dquot *);   int (*commit_dqblk)(struct dquot *);   int (*release_dqblk)(struct dquot *);   int (*get_next_id)(struct super_block *, struct kqid *); } ;   311     struct dquot_operations {   int (*write_dquot)(struct dquot *);   struct dquot * (*alloc_dquot)(struct super_block *, int);   void (*destroy_dquot)(struct dquot *);   int (*acquire_dquot)(struct dquot *);   int (*release_dquot)(struct dquot *);   int (*mark_dirty)(struct dquot *);   int (*write_info)(struct super_block *, int);   qsize_t * (*get_reserved_space)(struct inode *);   int (*get_projid)(struct inode *, kprojid_t *);   int (*get_next_id)(struct super_block *, struct kqid *); } ;   328     struct qc_dqblk {   int d_fieldmask;   u64 d_spc_hardlimit;   u64 d_spc_softlimit;   u64 d_ino_hardlimit;   u64 d_ino_softlimit;   u64 d_space;   u64 d_ino_count;   s64 d_ino_timer;   s64 d_spc_timer;   int d_ino_warns;   int d_spc_warns;   u64 d_rt_spc_hardlimit;   u64 d_rt_spc_softlimit;   u64 d_rt_space;   s64 d_rt_spc_timer;   int d_rt_spc_warns; } ;   351     struct qc_type_state {   unsigned int flags;   unsigned int spc_timelimit;   unsigned int ino_timelimit;   unsigned int rt_spc_timelimit;   unsigned int spc_warnlimit;   unsigned int ino_warnlimit;   unsigned int rt_spc_warnlimit;   unsigned long long ino;   blkcnt_t blocks;   blkcnt_t nextents; } ;   397     struct qc_state {   unsigned int s_incoredqs;   struct qc_type_state s_state[3U]; } ;   408     struct qc_info {   int i_fieldmask;   unsigned int i_flags;   unsigned int i_spc_timelimit;   unsigned int i_ino_timelimit;   unsigned int i_rt_spc_timelimit;   unsigned int i_spc_warnlimit;   unsigned int i_ino_warnlimit;   unsigned int i_rt_spc_warnlimit; } ;   421     struct quotactl_ops {   int (*quota_on)(struct super_block *, int, int, struct path *);   int (*quota_off)(struct super_block *, int);   int (*quota_enable)(struct super_block *, unsigned int);   int (*quota_disable)(struct super_block *, unsigned int);   int (*quota_sync)(struct super_block *, int);   int (*set_info)(struct super_block *, int, struct qc_info *);   int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *);   int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*get_state)(struct super_block *, struct qc_state *);   int (*rm_xquota)(struct super_block *, unsigned int); } ;   437     struct quota_format_type {   int qf_fmt_id;   const struct quota_format_ops *qf_ops;   struct module *qf_owner;   struct quota_format_type *qf_next; } ;   501     struct quota_info {   unsigned int flags;   struct mutex dqio_mutex;   struct mutex dqonoff_mutex;   struct inode *files[3U];   struct mem_dqinfo info[3U];   const struct quota_format_ops *ops[3U]; } ;   531     struct writeback_control ;   532     struct kiocb {   struct file *ki_filp;   loff_t ki_pos;   void (*ki_complete)(struct kiocb *, long, long);   void *private;   int ki_flags; } ;   371     struct address_space_operations {   int (*writepage)(struct page *, struct writeback_control *);   int (*readpage)(struct file *, struct page *);   int (*writepages)(struct address_space *, struct writeback_control *);   int (*set_page_dirty)(struct page *);   int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int);   int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **);   int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *);   sector_t  (*bmap)(struct address_space *, sector_t );   void (*invalidatepage)(struct page *, unsigned int, unsigned int);   int (*releasepage)(struct page *, gfp_t );   void (*freepage)(struct page *);   ssize_t  (*direct_IO)(struct kiocb *, struct iov_iter *);   int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode );   int (*launder_page)(struct page *);   int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);   void (*is_dirty_writeback)(struct page *, bool *, bool *);   int (*error_remove_page)(struct address_space *, struct page *);   int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *);   void (*swap_deactivate)(struct file *); } ;   428     struct address_space {   struct inode *host;   struct radix_tree_root page_tree;   spinlock_t tree_lock;   atomic_t i_mmap_writable;   struct rb_root i_mmap;   struct rw_semaphore i_mmap_rwsem;   unsigned long nrpages;   unsigned long nrexceptional;   unsigned long writeback_index;   const struct address_space_operations *a_ops;   unsigned long flags;   spinlock_t private_lock;   struct list_head private_list;   void *private_data; } ;   449     struct request_queue ;   450     struct hd_struct ;   450     struct gendisk ;   450     struct block_device {   dev_t bd_dev;   int bd_openers;   struct inode *bd_inode;   struct super_block *bd_super;   struct mutex bd_mutex;   struct list_head bd_inodes;   void *bd_claiming;   void *bd_holder;   int bd_holders;   bool bd_write_holder;   struct list_head bd_holder_disks;   struct block_device *bd_contains;   unsigned int bd_block_size;   struct hd_struct *bd_part;   unsigned int bd_part_count;   int bd_invalidated;   struct gendisk *bd_disk;   struct request_queue *bd_queue;   struct list_head bd_list;   unsigned long bd_private;   int bd_fsfreeze_count;   struct mutex bd_fsfreeze_mutex; } ;   566     struct posix_acl ;   592     struct inode_operations ;   592     union __anonunion____missing_field_name_321 {   const unsigned int i_nlink;   unsigned int __i_nlink; } ;   592     union __anonunion____missing_field_name_322 {   struct hlist_head i_dentry;   struct callback_head i_rcu; } ;   592     struct file_lock_context ;   592     struct cdev ;   592     union __anonunion____missing_field_name_323 {   struct pipe_inode_info *i_pipe;   struct block_device *i_bdev;   struct cdev *i_cdev;   char *i_link;   unsigned int i_dir_seq; } ;   592     struct inode {   umode_t i_mode;   unsigned short i_opflags;   kuid_t i_uid;   kgid_t i_gid;   unsigned int i_flags;   struct posix_acl *i_acl;   struct posix_acl *i_default_acl;   const struct inode_operations *i_op;   struct super_block *i_sb;   struct address_space *i_mapping;   void *i_security;   unsigned long i_ino;   union __anonunion____missing_field_name_321 __annonCompField75;   dev_t i_rdev;   loff_t i_size;   struct timespec i_atime;   struct timespec i_mtime;   struct timespec i_ctime;   spinlock_t i_lock;   unsigned short i_bytes;   unsigned int i_blkbits;   blkcnt_t i_blocks;   unsigned long i_state;   struct rw_semaphore i_rwsem;   unsigned long dirtied_when;   unsigned long dirtied_time_when;   struct hlist_node i_hash;   struct list_head i_io_list;   struct bdi_writeback *i_wb;   int i_wb_frn_winner;   u16 i_wb_frn_avg_time;   u16 i_wb_frn_history;   struct list_head i_lru;   struct list_head i_sb_list;   union __anonunion____missing_field_name_322 __annonCompField76;   u64 i_version;   atomic_t i_count;   atomic_t i_dio_count;   atomic_t i_writecount;   atomic_t i_readcount;   const struct file_operations *i_fop;   struct file_lock_context *i_flctx;   struct address_space i_data;   struct list_head i_devices;   union __anonunion____missing_field_name_323 __annonCompField77;   __u32 i_generation;   __u32 i_fsnotify_mask;   struct hlist_head i_fsnotify_marks;   struct fscrypt_info *i_crypt_info;   void *i_private; } ;   870     struct fown_struct {   rwlock_t lock;   struct pid *pid;   enum pid_type pid_type;   kuid_t uid;   kuid_t euid;   int signum; } ;   878     struct file_ra_state {   unsigned long start;   unsigned int size;   unsigned int async_size;   unsigned int ra_pages;   unsigned int mmap_miss;   loff_t prev_pos; } ;   901     union __anonunion_f_u_324 {   struct llist_node fu_llist;   struct callback_head fu_rcuhead; } ;   901     struct file {   union __anonunion_f_u_324 f_u;   struct path f_path;   struct inode *f_inode;   const struct file_operations *f_op;   spinlock_t f_lock;   atomic_long_t f_count;   unsigned int f_flags;   fmode_t f_mode;   struct mutex f_pos_lock;   loff_t f_pos;   struct fown_struct f_owner;   const struct cred *f_cred;   struct file_ra_state f_ra;   u64 f_version;   void *f_security;   void *private_data;   struct list_head f_ep_links;   struct list_head f_tfile_llink;   struct address_space *f_mapping; } ;   986     typedef void *fl_owner_t;   987     struct file_lock ;   988     struct file_lock_operations {   void (*fl_copy_lock)(struct file_lock *, struct file_lock *);   void (*fl_release_private)(struct file_lock *); } ;   994     struct lock_manager_operations {   int (*lm_compare_owner)(struct file_lock *, struct file_lock *);   unsigned long int (*lm_owner_key)(struct file_lock *);   fl_owner_t  (*lm_get_owner)(fl_owner_t );   void (*lm_put_owner)(fl_owner_t );   void (*lm_notify)(struct file_lock *);   int (*lm_grant)(struct file_lock *, int);   bool  (*lm_break)(struct file_lock *);   int (*lm_change)(struct file_lock *, int, struct list_head *);   void (*lm_setup)(struct file_lock *, void **); } ;  1021     struct nlm_lockowner ;  1022     struct nfs_lock_info {   u32 state;   struct nlm_lockowner *owner;   struct list_head list; } ;    14     struct nfs4_lock_state ;    15     struct nfs4_lock_info {   struct nfs4_lock_state *owner; } ;    19     struct __anonstruct_afs_326 {   struct list_head link;   int state; } ;    19     union __anonunion_fl_u_325 {   struct nfs_lock_info nfs_fl;   struct nfs4_lock_info nfs4_fl;   struct __anonstruct_afs_326 afs; } ;    19     struct file_lock {   struct file_lock *fl_next;   struct list_head fl_list;   struct hlist_node fl_link;   struct list_head fl_block;   fl_owner_t fl_owner;   unsigned int fl_flags;   unsigned char fl_type;   unsigned int fl_pid;   int fl_link_cpu;   struct pid *fl_nspid;   wait_queue_head_t fl_wait;   struct file *fl_file;   loff_t fl_start;   loff_t fl_end;   struct fasync_struct *fl_fasync;   unsigned long fl_break_time;   unsigned long fl_downgrade_time;   const struct file_lock_operations *fl_ops;   const struct lock_manager_operations *fl_lmops;   union __anonunion_fl_u_325 fl_u; } ;  1074     struct file_lock_context {   spinlock_t flc_lock;   struct list_head flc_flock;   struct list_head flc_posix;   struct list_head flc_lease; } ;  1287     struct fasync_struct {   spinlock_t fa_lock;   int magic;   int fa_fd;   struct fasync_struct *fa_next;   struct file *fa_file;   struct callback_head fa_rcu; } ;  1322     struct sb_writers {   int frozen;   wait_queue_head_t wait_unfrozen;   struct percpu_rw_semaphore rw_sem[3U]; } ;  1348     struct super_operations ;  1348     struct xattr_handler ;  1348     struct mtd_info ;  1348     struct super_block {   struct list_head s_list;   dev_t s_dev;   unsigned char s_blocksize_bits;   unsigned long s_blocksize;   loff_t s_maxbytes;   struct file_system_type *s_type;   const struct super_operations *s_op;   const struct dquot_operations *dq_op;   const struct quotactl_ops *s_qcop;   const struct export_operations *s_export_op;   unsigned long s_flags;   unsigned long s_iflags;   unsigned long s_magic;   struct dentry *s_root;   struct rw_semaphore s_umount;   int s_count;   atomic_t s_active;   void *s_security;   const struct xattr_handler **s_xattr;   const struct fscrypt_operations *s_cop;   struct hlist_bl_head s_anon;   struct list_head s_mounts;   struct block_device *s_bdev;   struct backing_dev_info *s_bdi;   struct mtd_info *s_mtd;   struct hlist_node s_instances;   unsigned int s_quota_types;   struct quota_info s_dquot;   struct sb_writers s_writers;   char s_id[32U];   u8 s_uuid[16U];   void *s_fs_info;   unsigned int s_max_links;   fmode_t s_mode;   u32 s_time_gran;   struct mutex s_vfs_rename_mutex;   char *s_subtype;   char *s_options;   const struct dentry_operations *s_d_op;   int cleancache_poolid;   struct shrinker s_shrink;   atomic_long_t s_remove_count;   int s_readonly_remount;   struct workqueue_struct *s_dio_done_wq;   struct hlist_head s_pins;   struct list_lru s_dentry_lru;   struct list_lru s_inode_lru;   struct callback_head rcu;   struct work_struct destroy_work;   struct mutex s_sync_lock;   int s_stack_depth;   spinlock_t s_inode_list_lock;   struct list_head s_inodes; } ;  1594     struct fiemap_extent_info {   unsigned int fi_flags;   unsigned int fi_extents_mapped;   unsigned int fi_extents_max;   struct fiemap_extent *fi_extents_start; } ;  1608     struct dir_context ;  1633     struct dir_context {   int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int);   loff_t pos; } ;  1640     struct file_operations {   struct module *owner;   loff_t  (*llseek)(struct file *, loff_t , int);   ssize_t  (*read)(struct file *, char *, size_t , loff_t *);   ssize_t  (*write)(struct file *, const char *, size_t , loff_t *);   ssize_t  (*read_iter)(struct kiocb *, struct iov_iter *);   ssize_t  (*write_iter)(struct kiocb *, struct iov_iter *);   int (*iterate)(struct file *, struct dir_context *);   int (*iterate_shared)(struct file *, struct dir_context *);   unsigned int (*poll)(struct file *, struct poll_table_struct *);   long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long);   long int (*compat_ioctl)(struct file *, unsigned int, unsigned long);   int (*mmap)(struct file *, struct vm_area_struct *);   int (*open)(struct inode *, struct file *);   int (*flush)(struct file *, fl_owner_t );   int (*release)(struct inode *, struct file *);   int (*fsync)(struct file *, loff_t , loff_t , int);   int (*aio_fsync)(struct kiocb *, int);   int (*fasync)(int, struct file *, int);   int (*lock)(struct file *, int, struct file_lock *);   ssize_t  (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int);   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   int (*check_flags)(int);   int (*flock)(struct file *, int, struct file_lock *);   ssize_t  (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int);   ssize_t  (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int);   int (*setlease)(struct file *, long, struct file_lock **, void **);   long int (*fallocate)(struct file *, int, loff_t , loff_t );   void (*show_fdinfo)(struct seq_file *, struct file *);   ssize_t  (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int);   int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 );   ssize_t  (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ;  1709     struct inode_operations {   struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int);   const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *);   int (*permission)(struct inode *, int);   struct posix_acl * (*get_acl)(struct inode *, int);   int (*readlink)(struct dentry *, char *, int);   int (*create)(struct inode *, struct dentry *, umode_t , bool );   int (*link)(struct dentry *, struct inode *, struct dentry *);   int (*unlink)(struct inode *, struct dentry *);   int (*symlink)(struct inode *, struct dentry *, const char *);   int (*mkdir)(struct inode *, struct dentry *, umode_t );   int (*rmdir)(struct inode *, struct dentry *);   int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t );   int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *);   int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int);   int (*setattr)(struct dentry *, struct iattr *);   int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *);   int (*setxattr)(struct dentry *, struct inode *, const char *, const void *, size_t , int);   ssize_t  (*getxattr)(struct dentry *, struct inode *, const char *, void *, size_t );   ssize_t  (*listxattr)(struct dentry *, char *, size_t );   int (*removexattr)(struct dentry *, const char *);   int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 );   int (*update_time)(struct inode *, struct timespec *, int);   int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *);   int (*tmpfile)(struct inode *, struct dentry *, umode_t );   int (*set_acl)(struct inode *, struct posix_acl *, int); } ;  1766     struct super_operations {   struct inode * (*alloc_inode)(struct super_block *);   void (*destroy_inode)(struct inode *);   void (*dirty_inode)(struct inode *, int);   int (*write_inode)(struct inode *, struct writeback_control *);   int (*drop_inode)(struct inode *);   void (*evict_inode)(struct inode *);   void (*put_super)(struct super_block *);   int (*sync_fs)(struct super_block *, int);   int (*freeze_super)(struct super_block *);   int (*freeze_fs)(struct super_block *);   int (*thaw_super)(struct super_block *);   int (*unfreeze_fs)(struct super_block *);   int (*statfs)(struct dentry *, struct kstatfs *);   int (*remount_fs)(struct super_block *, int *, char *);   void (*umount_begin)(struct super_block *);   int (*show_options)(struct seq_file *, struct dentry *);   int (*show_devname)(struct seq_file *, struct dentry *);   int (*show_path)(struct seq_file *, struct dentry *);   int (*show_stats)(struct seq_file *, struct dentry *);   ssize_t  (*quota_read)(struct super_block *, int, char *, size_t , loff_t );   ssize_t  (*quota_write)(struct super_block *, int, const char *, size_t , loff_t );   struct dquot ** (*get_dquots)(struct inode *);   int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t );   long int (*nr_cached_objects)(struct super_block *, struct shrink_control *);   long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;  2005     struct file_system_type {   const char *name;   int fs_flags;   struct dentry * (*mount)(struct file_system_type *, int, const char *, void *);   void (*kill_sb)(struct super_block *);   struct module *owner;   struct file_system_type *next;   struct hlist_head fs_supers;   struct lock_class_key s_lock_key;   struct lock_class_key s_umount_key;   struct lock_class_key s_vfs_rename_key;   struct lock_class_key s_writers_key[3U];   struct lock_class_key i_lock_key;   struct lock_class_key i_mutex_key;   struct lock_class_key i_mutex_dir_key; } ;  3176     struct seq_file {   char *buf;   size_t size;   size_t from;   size_t count;   size_t pad_until;   loff_t index;   loff_t read_pos;   u64 version;   struct mutex lock;   const struct seq_operations *op;   int poll_event;   const struct file *file;   void *private; } ;    30     struct seq_operations {   void * (*start)(struct seq_file *, loff_t *);   void (*stop)(struct seq_file *, void *);   void * (*next)(struct seq_file *, void *, loff_t *);   int (*show)(struct seq_file *, void *); } ;   222     struct pinctrl ;   223     struct pinctrl_state ;   194     struct dev_pin_info {   struct pinctrl *p;   struct pinctrl_state *default_state;   struct pinctrl_state *init_state;   struct pinctrl_state *sleep_state;   struct pinctrl_state *idle_state; } ;    43     struct ratelimit_state {   raw_spinlock_t lock;   int interval;   int burst;   int printed;   int missed;   unsigned long begin; } ;    48     struct dma_map_ops ;    48     struct dev_archdata {   struct dma_map_ops *dma_ops;   void *iommu; } ;    24     struct device_private ;    25     struct device_driver ;    26     struct driver_private ;    27     struct class ;    28     struct subsys_private ;    29     struct bus_type ;    30     struct device_node ;    31     struct fwnode_handle ;    32     struct iommu_ops ;    33     struct iommu_group ;    61     struct device_attribute ;    61     struct bus_type {   const char *name;   const char *dev_name;   struct device *dev_root;   struct device_attribute *dev_attrs;   const struct attribute_group **bus_groups;   const struct attribute_group **dev_groups;   const struct attribute_group **drv_groups;   int (*match)(struct device *, struct device_driver *);   int (*uevent)(struct device *, struct kobj_uevent_env *);   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*online)(struct device *);   int (*offline)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct dev_pm_ops *pm;   const struct iommu_ops *iommu_ops;   struct subsys_private *p;   struct lock_class_key lock_key; } ;   142     struct device_type ;   201     enum probe_type {   PROBE_DEFAULT_STRATEGY = 0,   PROBE_PREFER_ASYNCHRONOUS = 1,   PROBE_FORCE_SYNCHRONOUS = 2 } ;   207     struct of_device_id ;   207     struct acpi_device_id ;   207     struct device_driver {   const char *name;   struct bus_type *bus;   struct module *owner;   const char *mod_name;   bool suppress_bind_attrs;   enum probe_type probe_type;   const struct of_device_id *of_match_table;   const struct acpi_device_id *acpi_match_table;   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct attribute_group **groups;   const struct dev_pm_ops *pm;   struct driver_private *p; } ;   357     struct class_attribute ;   357     struct class {   const char *name;   struct module *owner;   struct class_attribute *class_attrs;   const struct attribute_group **dev_groups;   struct kobject *dev_kobj;   int (*dev_uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *);   void (*class_release)(struct class *);   void (*dev_release)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct kobj_ns_type_operations *ns_type;   const void * (*namespace)(struct device *);   const struct dev_pm_ops *pm;   struct subsys_private *p; } ;   450     struct class_attribute {   struct attribute attr;   ssize_t  (*show)(struct class *, struct class_attribute *, char *);   ssize_t  (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;   518     struct device_type {   const char *name;   const struct attribute_group **groups;   int (*uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *);   void (*release)(struct device *);   const struct dev_pm_ops *pm; } ;   546     struct device_attribute {   struct attribute attr;   ssize_t  (*show)(struct device *, struct device_attribute *, char *);   ssize_t  (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;   699     struct device_dma_parameters {   unsigned int max_segment_size;   unsigned long segment_boundary_mask; } ;   708     struct irq_domain ;   708     struct dma_coherent_mem ;   708     struct cma ;   708     struct device {   struct device *parent;   struct device_private *p;   struct kobject kobj;   const char *init_name;   const struct device_type *type;   struct mutex mutex;   struct bus_type *bus;   struct device_driver *driver;   void *platform_data;   void *driver_data;   struct dev_pm_info power;   struct dev_pm_domain *pm_domain;   struct irq_domain *msi_domain;   struct dev_pin_info *pins;   struct list_head msi_list;   int numa_node;   u64 *dma_mask;   u64 coherent_dma_mask;   unsigned long dma_pfn_offset;   struct device_dma_parameters *dma_parms;   struct list_head dma_pools;   struct dma_coherent_mem *dma_mem;   struct cma *cma_area;   struct dev_archdata archdata;   struct device_node *of_node;   struct fwnode_handle *fwnode;   dev_t devt;   u32 id;   spinlock_t devres_lock;   struct list_head devres_head;   struct klist_node knode_class;   struct class *class;   const struct attribute_group **groups;   void (*release)(struct device *);   struct iommu_group *iommu_group;   bool offline_disabled;   bool offline; } ;   862     struct wakeup_source {   const char *name;   struct list_head entry;   spinlock_t lock;   struct wake_irq *wakeirq;   struct timer_list timer;   unsigned long timer_expires;   ktime_t total_time;   ktime_t max_time;   ktime_t last_time;   ktime_t start_prevent_time;   ktime_t prevent_sleep_time;   unsigned long event_count;   unsigned long active_count;   unsigned long relax_count;   unsigned long expire_count;   unsigned long wakeup_count;   bool active;   bool autosleep_enabled; } ;  1339     struct dma_attrs {   unsigned long flags[1U]; } ;    70     struct scatterlist ;    89     enum dma_data_direction {   DMA_BIDIRECTIONAL = 0,   DMA_TO_DEVICE = 1,   DMA_FROM_DEVICE = 2,   DMA_NONE = 3 } ;   273     struct vm_fault {   unsigned int flags;   gfp_t gfp_mask;   unsigned long pgoff;   void *virtual_address;   struct page *cow_page;   struct page *page;   void *entry;   unsigned long max_pgoff;   pte_t *pte; } ;   317     struct vm_operations_struct {   void (*open)(struct vm_area_struct *);   void (*close)(struct vm_area_struct *);   int (*mremap)(struct vm_area_struct *);   int (*fault)(struct vm_area_struct *, struct vm_fault *);   int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int);   void (*map_pages)(struct vm_area_struct *, struct vm_fault *);   int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*access)(struct vm_area_struct *, unsigned long, void *, int, int);   const char * (*name)(struct vm_area_struct *);   int (*set_policy)(struct vm_area_struct *, struct mempolicy *);   struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long);   struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;  2409     struct scatterlist {   unsigned long sg_magic;   unsigned long page_link;   unsigned int offset;   unsigned int length;   dma_addr_t dma_address;   unsigned int dma_length; } ;    21     struct sg_table {   struct scatterlist *sgl;   unsigned int nents;   unsigned int orig_nents; } ;   406     struct dma_map_ops {   void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , struct dma_attrs *);   void (*free)(struct device *, size_t , void *, dma_addr_t , struct dma_attrs *);   int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , struct dma_attrs *);   int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , struct dma_attrs *);   dma_addr_t  (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , struct dma_attrs *);   void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs *);   int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *);   void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *);   void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction );   void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction );   int (*mapping_error)(struct device *, dma_addr_t );   int (*dma_supported)(struct device *, u64 );   int (*set_dma_mask)(struct device *, u64 );   int is_phys; } ;    15     typedef u64 netdev_features_t;    69     union __anonunion_in6_u_335 {   __u8 u6_addr8[16U];   __be16 u6_addr16[8U];   __be32 u6_addr32[4U]; } ;    69     struct in6_addr {   union __anonunion_in6_u_335 in6_u; } ;    46     struct ethhdr {   unsigned char h_dest[6U];   unsigned char h_source[6U];   __be16 h_proto; } ;   199     struct pipe_buf_operations ;   199     struct pipe_buffer {   struct page *page;   unsigned int offset;   unsigned int len;   const struct pipe_buf_operations *ops;   unsigned int flags;   unsigned long private; } ;    27     struct pipe_inode_info {   struct mutex mutex;   wait_queue_head_t wait;   unsigned int nrbufs;   unsigned int curbuf;   unsigned int buffers;   unsigned int readers;   unsigned int writers;   unsigned int files;   unsigned int waiting_writers;   unsigned int r_counter;   unsigned int w_counter;   struct page *tmp_page;   struct fasync_struct *fasync_readers;   struct fasync_struct *fasync_writers;   struct pipe_buffer *bufs;   struct user_struct *user; } ;    63     struct pipe_buf_operations {   int can_merge;   int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);   void (*release)(struct pipe_inode_info *, struct pipe_buffer *);   int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);   void (*get)(struct pipe_inode_info *, struct pipe_buffer *); } ;   265     struct napi_struct ;   266     struct nf_conntrack {   atomic_t use; } ;   253     union __anonunion____missing_field_name_345 {   __be32 ipv4_daddr;   struct in6_addr ipv6_daddr;   char neigh_header[8U]; } ;   253     struct nf_bridge_info {   atomic_t use;   unsigned char orig_proto;   unsigned char pkt_otherhost;   unsigned char in_prerouting;   unsigned char bridged_dnat;   __u16 frag_max_size;   struct net_device *physindev;   struct net_device *physoutdev;   union __anonunion____missing_field_name_345 __annonCompField81; } ;   277     struct sk_buff_head {   struct sk_buff *next;   struct sk_buff *prev;   __u32 qlen;   spinlock_t lock; } ;   304     struct skb_frag_struct ;   304     typedef struct skb_frag_struct skb_frag_t;   305     struct __anonstruct_page_346 {   struct page *p; } ;   305     struct skb_frag_struct {   struct __anonstruct_page_346 page;   __u32 page_offset;   __u32 size; } ;   338     struct skb_shared_hwtstamps {   ktime_t hwtstamp; } ;   404     struct skb_shared_info {   unsigned char nr_frags;   __u8 tx_flags;   unsigned short gso_size;   unsigned short gso_segs;   unsigned short gso_type;   struct sk_buff *frag_list;   struct skb_shared_hwtstamps hwtstamps;   u32 tskey;   __be32 ip6_frag_id;   atomic_t dataref;   void *destructor_arg;   skb_frag_t frags[17U]; } ;   492     typedef unsigned int sk_buff_data_t;   493     struct __anonstruct____missing_field_name_348 {   u32 stamp_us;   u32 stamp_jiffies; } ;   493     union __anonunion____missing_field_name_347 {   u64 v64;   struct __anonstruct____missing_field_name_348 __annonCompField82; } ;   493     struct skb_mstamp {   union __anonunion____missing_field_name_347 __annonCompField83; } ;   556     union __anonunion____missing_field_name_351 {   ktime_t tstamp;   struct skb_mstamp skb_mstamp; } ;   556     struct __anonstruct____missing_field_name_350 {   struct sk_buff *next;   struct sk_buff *prev;   union __anonunion____missing_field_name_351 __annonCompField84; } ;   556     union __anonunion____missing_field_name_349 {   struct __anonstruct____missing_field_name_350 __annonCompField85;   struct rb_node rbnode; } ;   556     struct sec_path ;   556     struct __anonstruct____missing_field_name_353 {   __u16 csum_start;   __u16 csum_offset; } ;   556     union __anonunion____missing_field_name_352 {   __wsum csum;   struct __anonstruct____missing_field_name_353 __annonCompField87; } ;   556     union __anonunion____missing_field_name_354 {   unsigned int napi_id;   unsigned int sender_cpu; } ;   556     union __anonunion____missing_field_name_355 {   __u32 secmark;   __u32 offload_fwd_mark; } ;   556     union __anonunion____missing_field_name_356 {   __u32 mark;   __u32 reserved_tailroom; } ;   556     union __anonunion____missing_field_name_357 {   __be16 inner_protocol;   __u8 inner_ipproto; } ;   556     struct sk_buff {   union __anonunion____missing_field_name_349 __annonCompField86;   struct sock *sk;   struct net_device *dev;   char cb[48U];   unsigned long _skb_refdst;   void (*destructor)(struct sk_buff *);   struct sec_path *sp;   struct nf_conntrack *nfct;   struct nf_bridge_info *nf_bridge;   unsigned int len;   unsigned int data_len;   __u16 mac_len;   __u16 hdr_len;   __u16 queue_mapping;   unsigned char cloned;   unsigned char nohdr;   unsigned char fclone;   unsigned char peeked;   unsigned char head_frag;   unsigned char xmit_more;   __u32 headers_start[0U];   __u8 __pkt_type_offset[0U];   unsigned char pkt_type;   unsigned char pfmemalloc;   unsigned char ignore_df;   unsigned char nfctinfo;   unsigned char nf_trace;   unsigned char ip_summed;   unsigned char ooo_okay;   unsigned char l4_hash;   unsigned char sw_hash;   unsigned char wifi_acked_valid;   unsigned char wifi_acked;   unsigned char no_fcs;   unsigned char encapsulation;   unsigned char encap_hdr_csum;   unsigned char csum_valid;   unsigned char csum_complete_sw;   unsigned char csum_level;   unsigned char csum_bad;   unsigned char ndisc_nodetype;   unsigned char ipvs_property;   unsigned char inner_protocol_type;   unsigned char remcsum_offload;   __u16 tc_index;   __u16 tc_verd;   union __anonunion____missing_field_name_352 __annonCompField88;   __u32 priority;   int skb_iif;   __u32 hash;   __be16 vlan_proto;   __u16 vlan_tci;   union __anonunion____missing_field_name_354 __annonCompField89;   union __anonunion____missing_field_name_355 __annonCompField90;   union __anonunion____missing_field_name_356 __annonCompField91;   union __anonunion____missing_field_name_357 __annonCompField92;   __u16 inner_transport_header;   __u16 inner_network_header;   __u16 inner_mac_header;   __be16 protocol;   __u16 transport_header;   __u16 network_header;   __u16 mac_header;   __u32 headers_end[0U];   sk_buff_data_t tail;   sk_buff_data_t end;   unsigned char *head;   unsigned char *data;   unsigned int truesize;   atomic_t users; } ;   823     struct dst_entry ;  1402     struct dql {   unsigned int num_queued;   unsigned int adj_limit;   unsigned int last_obj_cnt;   unsigned int limit;   unsigned int num_completed;   unsigned int prev_ovlimit;   unsigned int prev_num_queued;   unsigned int prev_last_obj_cnt;   unsigned int lowest_slack;   unsigned long slack_start_time;   unsigned int max_limit;   unsigned int min_limit;   unsigned int slack_hold_time; } ;    43     struct __anonstruct_sync_serial_settings_360 {   unsigned int clock_rate;   unsigned int clock_type;   unsigned short loopback; } ;    43     typedef struct __anonstruct_sync_serial_settings_360 sync_serial_settings;    50     struct __anonstruct_te1_settings_361 {   unsigned int clock_rate;   unsigned int clock_type;   unsigned short loopback;   unsigned int slot_map; } ;    50     typedef struct __anonstruct_te1_settings_361 te1_settings;    55     struct __anonstruct_raw_hdlc_proto_362 {   unsigned short encoding;   unsigned short parity; } ;    55     typedef struct __anonstruct_raw_hdlc_proto_362 raw_hdlc_proto;    65     struct __anonstruct_fr_proto_363 {   unsigned int t391;   unsigned int t392;   unsigned int n391;   unsigned int n392;   unsigned int n393;   unsigned short lmi;   unsigned short dce; } ;    65     typedef struct __anonstruct_fr_proto_363 fr_proto;    69     struct __anonstruct_fr_proto_pvc_364 {   unsigned int dlci; } ;    69     typedef struct __anonstruct_fr_proto_pvc_364 fr_proto_pvc;    74     struct __anonstruct_fr_proto_pvc_info_365 {   unsigned int dlci;   char master[16U]; } ;    74     typedef struct __anonstruct_fr_proto_pvc_info_365 fr_proto_pvc_info;    79     struct __anonstruct_cisco_proto_366 {   unsigned int interval;   unsigned int timeout; } ;    79     typedef struct __anonstruct_cisco_proto_366 cisco_proto;   117     struct ifmap {   unsigned long mem_start;   unsigned long mem_end;   unsigned short base_addr;   unsigned char irq;   unsigned char dma;   unsigned char port; } ;   197     union __anonunion_ifs_ifsu_367 {   raw_hdlc_proto *raw_hdlc;   cisco_proto *cisco;   fr_proto *fr;   fr_proto_pvc *fr_pvc;   fr_proto_pvc_info *fr_pvc_info;   sync_serial_settings *sync;   te1_settings *te1; } ;   197     struct if_settings {   unsigned int type;   unsigned int size;   union __anonunion_ifs_ifsu_367 ifs_ifsu; } ;   216     union __anonunion_ifr_ifrn_368 {   char ifrn_name[16U]; } ;   216     union __anonunion_ifr_ifru_369 {   struct sockaddr ifru_addr;   struct sockaddr ifru_dstaddr;   struct sockaddr ifru_broadaddr;   struct sockaddr ifru_netmask;   struct sockaddr ifru_hwaddr;   short ifru_flags;   int ifru_ivalue;   int ifru_mtu;   struct ifmap ifru_map;   char ifru_slave[16U];   char ifru_newname[16U];   void *ifru_data;   struct if_settings ifru_settings; } ;   216     struct ifreq {   union __anonunion_ifr_ifrn_368 ifr_ifrn;   union __anonunion_ifr_ifru_369 ifr_ifru; } ;    18     typedef s32 compat_time_t;    39     typedef s32 compat_long_t;    44     typedef u32 compat_uptr_t;    45     struct compat_timespec {   compat_time_t tv_sec;   s32 tv_nsec; } ;   278     struct compat_robust_list {   compat_uptr_t next; } ;   282     struct compat_robust_list_head {   struct compat_robust_list list;   compat_long_t futex_offset;   compat_uptr_t list_op_pending; } ;    39     struct ethtool_cmd {   __u32 cmd;   __u32 supported;   __u32 advertising;   __u16 speed;   __u8 duplex;   __u8 port;   __u8 phy_address;   __u8 transceiver;   __u8 autoneg;   __u8 mdio_support;   __u32 maxtxpkt;   __u32 maxrxpkt;   __u16 speed_hi;   __u8 eth_tp_mdix;   __u8 eth_tp_mdix_ctrl;   __u32 lp_advertising;   __u32 reserved[2U]; } ;   131     struct ethtool_drvinfo {   __u32 cmd;   char driver[32U];   char version[32U];   char fw_version[32U];   char bus_info[32U];   char erom_version[32U];   char reserved2[12U];   __u32 n_priv_flags;   __u32 n_stats;   __u32 testinfo_len;   __u32 eedump_len;   __u32 regdump_len; } ;   195     struct ethtool_wolinfo {   __u32 cmd;   __u32 supported;   __u32 wolopts;   __u8 sopass[6U]; } ;   239     struct ethtool_tunable {   __u32 cmd;   __u32 id;   __u32 type_id;   __u32 len;   void *data[0U]; } ;   251     struct ethtool_regs {   __u32 cmd;   __u32 version;   __u32 len;   __u8 data[0U]; } ;   273     struct ethtool_eeprom {   __u32 cmd;   __u32 magic;   __u32 offset;   __u32 len;   __u8 data[0U]; } ;   299     struct ethtool_eee {   __u32 cmd;   __u32 supported;   __u32 advertised;   __u32 lp_advertised;   __u32 eee_active;   __u32 eee_enabled;   __u32 tx_lpi_enabled;   __u32 tx_lpi_timer;   __u32 reserved[2U]; } ;   328     struct ethtool_modinfo {   __u32 cmd;   __u32 type;   __u32 eeprom_len;   __u32 reserved[8U]; } ;   345     struct ethtool_coalesce {   __u32 cmd;   __u32 rx_coalesce_usecs;   __u32 rx_max_coalesced_frames;   __u32 rx_coalesce_usecs_irq;   __u32 rx_max_coalesced_frames_irq;   __u32 tx_coalesce_usecs;   __u32 tx_max_coalesced_frames;   __u32 tx_coalesce_usecs_irq;   __u32 tx_max_coalesced_frames_irq;   __u32 stats_block_coalesce_usecs;   __u32 use_adaptive_rx_coalesce;   __u32 use_adaptive_tx_coalesce;   __u32 pkt_rate_low;   __u32 rx_coalesce_usecs_low;   __u32 rx_max_coalesced_frames_low;   __u32 tx_coalesce_usecs_low;   __u32 tx_max_coalesced_frames_low;   __u32 pkt_rate_high;   __u32 rx_coalesce_usecs_high;   __u32 rx_max_coalesced_frames_high;   __u32 tx_coalesce_usecs_high;   __u32 tx_max_coalesced_frames_high;   __u32 rate_sample_interval; } ;   444     struct ethtool_ringparam {   __u32 cmd;   __u32 rx_max_pending;   __u32 rx_mini_max_pending;   __u32 rx_jumbo_max_pending;   __u32 tx_max_pending;   __u32 rx_pending;   __u32 rx_mini_pending;   __u32 rx_jumbo_pending;   __u32 tx_pending; } ;   481     struct ethtool_channels {   __u32 cmd;   __u32 max_rx;   __u32 max_tx;   __u32 max_other;   __u32 max_combined;   __u32 rx_count;   __u32 tx_count;   __u32 other_count;   __u32 combined_count; } ;   509     struct ethtool_pauseparam {   __u32 cmd;   __u32 autoneg;   __u32 rx_pause;   __u32 tx_pause; } ;   613     struct ethtool_test {   __u32 cmd;   __u32 flags;   __u32 reserved;   __u32 len;   __u64 data[0U]; } ;   645     struct ethtool_stats {   __u32 cmd;   __u32 n_stats;   __u64 data[0U]; } ;   687     struct ethtool_tcpip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be16 psrc;   __be16 pdst;   __u8 tos; } ;   720     struct ethtool_ah_espip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be32 spi;   __u8 tos; } ;   736     struct ethtool_usrip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be32 l4_4_bytes;   __u8 tos;   __u8 ip_ver;   __u8 proto; } ;   756     struct ethtool_tcpip6_spec {   __be32 ip6src[4U];   __be32 ip6dst[4U];   __be16 psrc;   __be16 pdst;   __u8 tclass; } ;   774     struct ethtool_ah_espip6_spec {   __be32 ip6src[4U];   __be32 ip6dst[4U];   __be32 spi;   __u8 tclass; } ;   790     struct ethtool_usrip6_spec {   __be32 ip6src[4U];   __be32 ip6dst[4U];   __be32 l4_4_bytes;   __u8 tclass;   __u8 l4_proto; } ;   806     union ethtool_flow_union {   struct ethtool_tcpip4_spec tcp_ip4_spec;   struct ethtool_tcpip4_spec udp_ip4_spec;   struct ethtool_tcpip4_spec sctp_ip4_spec;   struct ethtool_ah_espip4_spec ah_ip4_spec;   struct ethtool_ah_espip4_spec esp_ip4_spec;   struct ethtool_usrip4_spec usr_ip4_spec;   struct ethtool_tcpip6_spec tcp_ip6_spec;   struct ethtool_tcpip6_spec udp_ip6_spec;   struct ethtool_tcpip6_spec sctp_ip6_spec;   struct ethtool_ah_espip6_spec ah_ip6_spec;   struct ethtool_ah_espip6_spec esp_ip6_spec;   struct ethtool_usrip6_spec usr_ip6_spec;   struct ethhdr ether_spec;   __u8 hdata[52U]; } ;   823     struct ethtool_flow_ext {   __u8 padding[2U];   unsigned char h_dest[6U];   __be16 vlan_etype;   __be16 vlan_tci;   __be32 data[2U]; } ;   842     struct ethtool_rx_flow_spec {   __u32 flow_type;   union ethtool_flow_union h_u;   struct ethtool_flow_ext h_ext;   union ethtool_flow_union m_u;   struct ethtool_flow_ext m_ext;   __u64 ring_cookie;   __u32 location; } ;   892     struct ethtool_rxnfc {   __u32 cmd;   __u32 flow_type;   __u64 data;   struct ethtool_rx_flow_spec fs;   __u32 rule_cnt;   __u32 rule_locs[0U]; } ;  1063     struct ethtool_flash {   __u32 cmd;   __u32 region;   char data[128U]; } ;  1071     struct ethtool_dump {   __u32 cmd;   __u32 version;   __u32 flag;   __u32 len;   __u8 data[0U]; } ;  1147     struct ethtool_ts_info {   __u32 cmd;   __u32 so_timestamping;   __s32 phc_index;   __u32 tx_types;   __u32 tx_reserved[3U];   __u32 rx_filters;   __u32 rx_reserved[3U]; } ;  1505     struct ethtool_link_settings {   __u32 cmd;   __u32 speed;   __u8 duplex;   __u8 port;   __u8 phy_address;   __u8 autoneg;   __u8 mdio_support;   __u8 eth_tp_mdix;   __u8 eth_tp_mdix_ctrl;   __s8 link_mode_masks_nwords;   __u32 reserved[8U];   __u32 link_mode_masks[0U]; } ;    39     enum ethtool_phys_id_state {   ETHTOOL_ID_INACTIVE = 0,   ETHTOOL_ID_ACTIVE = 1,   ETHTOOL_ID_ON = 2,   ETHTOOL_ID_OFF = 3 } ;    97     struct __anonstruct_link_modes_387 {   unsigned long supported[1U];   unsigned long advertising[1U];   unsigned long lp_advertising[1U]; } ;    97     struct ethtool_link_ksettings {   struct ethtool_link_settings base;   struct __anonstruct_link_modes_387 link_modes; } ;   158     struct ethtool_ops {   int (*get_settings)(struct net_device *, struct ethtool_cmd *);   int (*set_settings)(struct net_device *, struct ethtool_cmd *);   void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);   int (*get_regs_len)(struct net_device *);   void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);   void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);   int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);   u32  (*get_msglevel)(struct net_device *);   void (*set_msglevel)(struct net_device *, u32 );   int (*nway_reset)(struct net_device *);   u32  (*get_link)(struct net_device *);   int (*get_eeprom_len)(struct net_device *);   int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);   int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);   void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);   int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);   void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *);   int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *);   void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);   void (*get_strings)(struct net_device *, u32 , u8 *);   int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state );   void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *);   int (*begin)(struct net_device *);   void (*complete)(struct net_device *);   u32  (*get_priv_flags)(struct net_device *);   int (*set_priv_flags)(struct net_device *, u32 );   int (*get_sset_count)(struct net_device *, int);   int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *);   int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);   int (*flash_device)(struct net_device *, struct ethtool_flash *);   int (*reset)(struct net_device *, u32 *);   u32  (*get_rxfh_key_size)(struct net_device *);   u32  (*get_rxfh_indir_size)(struct net_device *);   int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *);   int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8 );   void (*get_channels)(struct net_device *, struct ethtool_channels *);   int (*set_channels)(struct net_device *, struct ethtool_channels *);   int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);   int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *);   int (*set_dump)(struct net_device *, struct ethtool_dump *);   int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);   int (*get_module_info)(struct net_device *, struct ethtool_modinfo *);   int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*get_eee)(struct net_device *, struct ethtool_eee *);   int (*set_eee)(struct net_device *, struct ethtool_eee *);   int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *);   int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *);   int (*get_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *);   int (*set_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *);   int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *);   int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); } ;   375     struct prot_inuse ;   376     struct netns_core {   struct ctl_table_header *sysctl_hdr;   int sysctl_somaxconn;   struct prot_inuse *inuse; } ;    38     struct u64_stats_sync { } ;   160     struct ipstats_mib {   u64 mibs[36U];   struct u64_stats_sync syncp; } ;    61     struct icmp_mib {   unsigned long mibs[28U]; } ;    67     struct icmpmsg_mib {   atomic_long_t mibs[512U]; } ;    72     struct icmpv6_mib {   unsigned long mibs[6U]; } ;    83     struct icmpv6msg_mib {   atomic_long_t mibs[512U]; } ;    93     struct tcp_mib {   unsigned long mibs[16U]; } ;   100     struct udp_mib {   unsigned long mibs[9U]; } ;   106     struct linux_mib {   unsigned long mibs[117U]; } ;   112     struct linux_xfrm_mib {   unsigned long mibs[29U]; } ;   118     struct proc_dir_entry ;   118     struct netns_mib {   struct tcp_mib *tcp_statistics;   struct ipstats_mib *ip_statistics;   struct linux_mib *net_statistics;   struct udp_mib *udp_statistics;   struct udp_mib *udplite_statistics;   struct icmp_mib *icmp_statistics;   struct icmpmsg_mib *icmpmsg_statistics;   struct proc_dir_entry *proc_net_devsnmp6;   struct udp_mib *udp_stats_in6;   struct udp_mib *udplite_stats_in6;   struct ipstats_mib *ipv6_statistics;   struct icmpv6_mib *icmpv6_statistics;   struct icmpv6msg_mib *icmpv6msg_statistics;   struct linux_xfrm_mib *xfrm_statistics; } ;    26     struct netns_unix {   int sysctl_max_dgram_qlen;   struct ctl_table_header *ctl; } ;    12     struct netns_packet {   struct mutex sklist_lock;   struct hlist_head sklist; } ;    14     struct netns_frags {   struct percpu_counter mem;   int timeout;   int high_thresh;   int low_thresh;   int max_dist; } ;   187     struct ipv4_devconf ;   188     struct fib_rules_ops ;   189     struct fib_table ;   190     struct local_ports {   seqlock_t lock;   int range[2U];   bool warned; } ;    24     struct ping_group_range {   seqlock_t lock;   kgid_t range[2U]; } ;    29     struct inet_peer_base ;    29     struct xt_table ;    29     struct netns_ipv4 {   struct ctl_table_header *forw_hdr;   struct ctl_table_header *frags_hdr;   struct ctl_table_header *ipv4_hdr;   struct ctl_table_header *route_hdr;   struct ctl_table_header *xfrm4_hdr;   struct ipv4_devconf *devconf_all;   struct ipv4_devconf *devconf_dflt;   struct fib_rules_ops *rules_ops;   bool fib_has_custom_rules;   struct fib_table *fib_local;   struct fib_table *fib_main;   struct fib_table *fib_default;   int fib_num_tclassid_users;   struct hlist_head *fib_table_hash;   bool fib_offload_disabled;   struct sock *fibnl;   struct sock **icmp_sk;   struct sock *mc_autojoin_sk;   struct inet_peer_base *peers;   struct sock **tcp_sk;   struct netns_frags frags;   struct xt_table *iptable_filter;   struct xt_table *iptable_mangle;   struct xt_table *iptable_raw;   struct xt_table *arptable_filter;   struct xt_table *iptable_security;   struct xt_table *nat_table;   int sysctl_icmp_echo_ignore_all;   int sysctl_icmp_echo_ignore_broadcasts;   int sysctl_icmp_ignore_bogus_error_responses;   int sysctl_icmp_ratelimit;   int sysctl_icmp_ratemask;   int sysctl_icmp_errors_use_inbound_ifaddr;   struct local_ports ip_local_ports;   int sysctl_tcp_ecn;   int sysctl_tcp_ecn_fallback;   int sysctl_ip_default_ttl;   int sysctl_ip_no_pmtu_disc;   int sysctl_ip_fwd_use_pmtu;   int sysctl_ip_nonlocal_bind;   int sysctl_ip_dynaddr;   int sysctl_ip_early_demux;   int sysctl_fwmark_reflect;   int sysctl_tcp_fwmark_accept;   int sysctl_tcp_l3mdev_accept;   int sysctl_tcp_mtu_probing;   int sysctl_tcp_base_mss;   int sysctl_tcp_probe_threshold;   u32 sysctl_tcp_probe_interval;   int sysctl_tcp_keepalive_time;   int sysctl_tcp_keepalive_probes;   int sysctl_tcp_keepalive_intvl;   int sysctl_tcp_syn_retries;   int sysctl_tcp_synack_retries;   int sysctl_tcp_syncookies;   int sysctl_tcp_reordering;   int sysctl_tcp_retries1;   int sysctl_tcp_retries2;   int sysctl_tcp_orphan_retries;   int sysctl_tcp_fin_timeout;   unsigned int sysctl_tcp_notsent_lowat;   int sysctl_igmp_max_memberships;   int sysctl_igmp_max_msf;   int sysctl_igmp_llm_reports;   int sysctl_igmp_qrv;   struct ping_group_range ping_group_range;   atomic_t dev_addr_genid;   unsigned long *sysctl_local_reserved_ports;   struct list_head mr_tables;   struct fib_rules_ops *mr_rules_ops;   int sysctl_fib_multipath_use_neigh;   atomic_t rt_genid; } ;   142     struct neighbour ;   142     struct dst_ops {   unsigned short family;   unsigned int gc_thresh;   int (*gc)(struct dst_ops *);   struct dst_entry * (*check)(struct dst_entry *, __u32 );   unsigned int (*default_advmss)(const struct dst_entry *);   unsigned int (*mtu)(const struct dst_entry *);   u32 * (*cow_metrics)(struct dst_entry *, unsigned long);   void (*destroy)(struct dst_entry *);   void (*ifdown)(struct dst_entry *, struct net_device *, int);   struct dst_entry * (*negative_advice)(struct dst_entry *);   void (*link_failure)(struct sk_buff *);   void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 );   void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *);   int (*local_out)(struct net *, struct sock *, struct sk_buff *);   struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *);   struct kmem_cache *kmem_cachep;   struct percpu_counter pcpuc_entries; } ;    73     struct netns_sysctl_ipv6 {   struct ctl_table_header *hdr;   struct ctl_table_header *route_hdr;   struct ctl_table_header *icmp_hdr;   struct ctl_table_header *frags_hdr;   struct ctl_table_header *xfrm6_hdr;   int bindv6only;   int flush_delay;   int ip6_rt_max_size;   int ip6_rt_gc_min_interval;   int ip6_rt_gc_timeout;   int ip6_rt_gc_interval;   int ip6_rt_gc_elasticity;   int ip6_rt_mtu_expires;   int ip6_rt_min_advmss;   int flowlabel_consistency;   int auto_flowlabels;   int icmpv6_time;   int anycast_src_echo_reply;   int ip_nonlocal_bind;   int fwmark_reflect;   int idgen_retries;   int idgen_delay;   int flowlabel_state_ranges; } ;    40     struct ipv6_devconf ;    40     struct rt6_info ;    40     struct rt6_statistics ;    40     struct fib6_table ;    40     struct netns_ipv6 {   struct netns_sysctl_ipv6 sysctl;   struct ipv6_devconf *devconf_all;   struct ipv6_devconf *devconf_dflt;   struct inet_peer_base *peers;   struct netns_frags frags;   struct xt_table *ip6table_filter;   struct xt_table *ip6table_mangle;   struct xt_table *ip6table_raw;   struct xt_table *ip6table_security;   struct xt_table *ip6table_nat;   struct rt6_info *ip6_null_entry;   struct rt6_statistics *rt6_stats;   struct timer_list ip6_fib_timer;   struct hlist_head *fib_table_hash;   struct fib6_table *fib6_main_tbl;   struct list_head fib6_walkers;   struct dst_ops ip6_dst_ops;   rwlock_t fib6_walker_lock;   spinlock_t fib6_gc_lock;   unsigned int ip6_rt_gc_expire;   unsigned long ip6_rt_last_gc;   struct rt6_info *ip6_prohibit_entry;   struct rt6_info *ip6_blk_hole_entry;   struct fib6_table *fib6_local_tbl;   struct fib_rules_ops *fib6_rules_ops;   struct sock **icmp_sk;   struct sock *ndisc_sk;   struct sock *tcp_sk;   struct sock *igmp_sk;   struct sock *mc_autojoin_sk;   struct list_head mr6_tables;   struct fib_rules_ops *mr6_rules_ops;   atomic_t dev_addr_genid;   atomic_t fib6_sernum; } ;    89     struct netns_nf_frag {   struct netns_sysctl_ipv6 sysctl;   struct netns_frags frags; } ;    95     struct netns_sysctl_lowpan {   struct ctl_table_header *frags_hdr; } ;    14     struct netns_ieee802154_lowpan {   struct netns_sysctl_lowpan sysctl;   struct netns_frags frags; } ;    20     struct sctp_mib ;    21     struct netns_sctp {   struct sctp_mib *sctp_statistics;   struct proc_dir_entry *proc_net_sctp;   struct ctl_table_header *sysctl_header;   struct sock *ctl_sock;   struct list_head local_addr_list;   struct list_head addr_waitq;   struct timer_list addr_wq_timer;   struct list_head auto_asconf_splist;   spinlock_t addr_wq_lock;   spinlock_t local_addr_lock;   unsigned int rto_initial;   unsigned int rto_min;   unsigned int rto_max;   int rto_alpha;   int rto_beta;   int max_burst;   int cookie_preserve_enable;   char *sctp_hmac_alg;   unsigned int valid_cookie_life;   unsigned int sack_timeout;   unsigned int hb_interval;   int max_retrans_association;   int max_retrans_path;   int max_retrans_init;   int pf_retrans;   int pf_enable;   int sndbuf_policy;   int rcvbuf_policy;   int default_auto_asconf;   int addip_enable;   int addip_noauth;   int prsctp_enable;   int auth_enable;   int scope_policy;   int rwnd_upd_shift;   unsigned long max_autoclose; } ;   141     struct netns_dccp {   struct sock *v4_ctl_sk;   struct sock *v6_ctl_sk; } ;    79     struct nf_logger ;    80     struct netns_nf {   struct proc_dir_entry *proc_netfilter;   const struct nf_logger *nf_loggers[13U];   struct ctl_table_header *nf_log_dir_header;   struct list_head hooks[13U][8U]; } ;    19     struct ebt_table ;    20     struct netns_xt {   struct list_head tables[13U];   bool notrack_deprecated_warning;   bool clusterip_deprecated_warning;   struct ebt_table *broute_table;   struct ebt_table *frame_filter;   struct ebt_table *frame_nat; } ;    19     struct hlist_nulls_node ;    19     struct hlist_nulls_head {   struct hlist_nulls_node *first; } ;    23     struct hlist_nulls_node {   struct hlist_nulls_node *next;   struct hlist_nulls_node **pprev; } ;    32     struct nf_proto_net {   struct ctl_table_header *ctl_table_header;   struct ctl_table *ctl_table;   struct ctl_table_header *ctl_compat_header;   struct ctl_table *ctl_compat_table;   unsigned int users; } ;    25     struct nf_generic_net {   struct nf_proto_net pn;   unsigned int timeout; } ;    30     struct nf_tcp_net {   struct nf_proto_net pn;   unsigned int timeouts[14U];   unsigned int tcp_loose;   unsigned int tcp_be_liberal;   unsigned int tcp_max_retrans; } ;    44     struct nf_udp_net {   struct nf_proto_net pn;   unsigned int timeouts[2U]; } ;    49     struct nf_icmp_net {   struct nf_proto_net pn;   unsigned int timeout; } ;    54     struct nf_ip_net {   struct nf_generic_net generic;   struct nf_tcp_net tcp;   struct nf_udp_net udp;   struct nf_icmp_net icmp;   struct nf_icmp_net icmpv6;   struct ctl_table_header *ctl_table_header;   struct ctl_table *ctl_table; } ;    65     struct ct_pcpu {   spinlock_t lock;   struct hlist_nulls_head unconfirmed;   struct hlist_nulls_head dying; } ;    72     struct ip_conntrack_stat ;    72     struct nf_ct_event_notifier ;    72     struct nf_exp_event_notifier ;    72     struct netns_ct {   atomic_t count;   unsigned int expect_count;   struct delayed_work ecache_dwork;   bool ecache_dwork_pending;   struct ctl_table_header *sysctl_header;   struct ctl_table_header *acct_sysctl_header;   struct ctl_table_header *tstamp_sysctl_header;   struct ctl_table_header *event_sysctl_header;   struct ctl_table_header *helper_sysctl_header;   unsigned int sysctl_log_invalid;   int sysctl_events;   int sysctl_acct;   int sysctl_auto_assign_helper;   bool auto_assign_helper_warned;   int sysctl_tstamp;   int sysctl_checksum;   struct ct_pcpu *pcpu_lists;   struct ip_conntrack_stat *stat;   struct nf_ct_event_notifier *nf_conntrack_event_cb;   struct nf_exp_event_notifier *nf_expect_event_cb;   struct nf_ip_net nf_ct_proto;   unsigned int labels_used;   u8 label_words; } ;   104     struct nft_af_info ;   105     struct netns_nftables {   struct list_head af_info;   struct list_head commit_list;   struct nft_af_info *ipv4;   struct nft_af_info *ipv6;   struct nft_af_info *inet;   struct nft_af_info *arp;   struct nft_af_info *bridge;   struct nft_af_info *netdev;   unsigned int base_seq;   u8 gencursor; } ;   478     struct tasklet_struct {   struct tasklet_struct *next;   unsigned long state;   atomic_t count;   void (*func)(unsigned long);   unsigned long data; } ;   700     struct flow_cache_percpu {   struct hlist_head *hash_table;   int hash_count;   u32 hash_rnd;   int hash_rnd_recalc;   struct tasklet_struct flush_tasklet; } ;    16     struct flow_cache {   u32 hash_shift;   struct flow_cache_percpu *percpu;   struct notifier_block hotcpu_notifier;   int low_watermark;   int high_watermark;   struct timer_list rnd_timer; } ;    25     struct xfrm_policy_hash {   struct hlist_head *table;   unsigned int hmask;   u8 dbits4;   u8 sbits4;   u8 dbits6;   u8 sbits6; } ;    21     struct xfrm_policy_hthresh {   struct work_struct work;   seqlock_t lock;   u8 lbits4;   u8 rbits4;   u8 lbits6;   u8 rbits6; } ;    30     struct netns_xfrm {   struct list_head state_all;   struct hlist_head *state_bydst;   struct hlist_head *state_bysrc;   struct hlist_head *state_byspi;   unsigned int state_hmask;   unsigned int state_num;   struct work_struct state_hash_work;   struct hlist_head state_gc_list;   struct work_struct state_gc_work;   struct list_head policy_all;   struct hlist_head *policy_byidx;   unsigned int policy_idx_hmask;   struct hlist_head policy_inexact[3U];   struct xfrm_policy_hash policy_bydst[3U];   unsigned int policy_count[6U];   struct work_struct policy_hash_work;   struct xfrm_policy_hthresh policy_hthresh;   struct sock *nlsk;   struct sock *nlsk_stash;   u32 sysctl_aevent_etime;   u32 sysctl_aevent_rseqth;   int sysctl_larval_drop;   u32 sysctl_acq_expires;   struct ctl_table_header *sysctl_hdr;   struct dst_ops xfrm4_dst_ops;   struct dst_ops xfrm6_dst_ops;   spinlock_t xfrm_state_lock;   rwlock_t xfrm_policy_lock;   struct mutex xfrm_cfg_mutex;   struct flow_cache flow_cache_global;   atomic_t flow_cache_genid;   struct list_head flow_cache_gc_list;   atomic_t flow_cache_gc_count;   spinlock_t flow_cache_gc_lock;   struct work_struct flow_cache_gc_work;   struct work_struct flow_cache_flush_work;   struct mutex flow_flush_sem; } ;    89     struct mpls_route ;    90     struct netns_mpls {   size_t platform_labels;   struct mpls_route **platform_label;   struct ctl_table_header *ctl; } ;    16     struct proc_ns_operations ;    17     struct ns_common {   atomic_long_t stashed;   const struct proc_ns_operations *ops;   unsigned int inum; } ;    11     struct net_generic ;    12     struct netns_ipvs ;    13     struct net {   atomic_t passive;   atomic_t count;   spinlock_t rules_mod_lock;   atomic64_t cookie_gen;   struct list_head list;   struct list_head cleanup_list;   struct list_head exit_list;   struct user_namespace *user_ns;   spinlock_t nsid_lock;   struct idr netns_ids;   struct ns_common ns;   struct proc_dir_entry *proc_net;   struct proc_dir_entry *proc_net_stat;   struct ctl_table_set sysctls;   struct sock *rtnl;   struct sock *genl_sock;   struct list_head dev_base_head;   struct hlist_head *dev_name_head;   struct hlist_head *dev_index_head;   unsigned int dev_base_seq;   int ifindex;   unsigned int dev_unreg_count;   struct list_head rules_ops;   struct net_device *loopback_dev;   struct netns_core core;   struct netns_mib mib;   struct netns_packet packet;   struct netns_unix unx;   struct netns_ipv4 ipv4;   struct netns_ipv6 ipv6;   struct netns_ieee802154_lowpan ieee802154_lowpan;   struct netns_sctp sctp;   struct netns_dccp dccp;   struct netns_nf nf;   struct netns_xt xt;   struct netns_ct ct;   struct netns_nftables nft;   struct netns_nf_frag nf_frag;   struct sock *nfnl;   struct sock *nfnl_stash;   struct list_head nfnl_acct_list;   struct list_head nfct_timeout_list;   struct sk_buff_head wext_nlevents;   struct net_generic *gen;   struct netns_xfrm xfrm;   struct netns_ipvs *ipvs;   struct netns_mpls mpls;   struct sock *diag_nlsk;   atomic_t fnhe_genid; } ;   247     struct __anonstruct_possible_net_t_402 {   struct net *net; } ;   247     typedef struct __anonstruct_possible_net_t_402 possible_net_t;   287     struct pernet_operations {   struct list_head list;   int (*init)(struct net *);   void (*exit)(struct net *);   void (*exit_batch)(struct list_head *);   int *id;   size_t size; } ;    13     typedef unsigned long kernel_ulong_t;   186     struct acpi_device_id {   __u8 id[9U];   kernel_ulong_t driver_data;   __u32 cls;   __u32 cls_msk; } ;   229     struct of_device_id {   char name[32U];   char type[32U];   char compatible[128U];   const void *data; } ;   659     enum fwnode_type {   FWNODE_INVALID = 0,   FWNODE_OF = 1,   FWNODE_ACPI = 2,   FWNODE_ACPI_DATA = 3,   FWNODE_PDATA = 4,   FWNODE_IRQCHIP = 5 } ;   668     struct fwnode_handle {   enum fwnode_type type;   struct fwnode_handle *secondary; } ;    32     typedef u32 phandle;    34     struct property {   char *name;   int length;   void *value;   struct property *next;   unsigned long _flags;   unsigned int unique_id;   struct bin_attribute attr; } ;    44     struct device_node {   const char *name;   const char *type;   phandle phandle;   const char *full_name;   struct fwnode_handle fwnode;   struct property *properties;   struct property *deadprops;   struct device_node *parent;   struct device_node *child;   struct device_node *sibling;   struct kobject kobj;   unsigned long _flags;   void *data; } ;   296     struct mii_bus ;   303     struct mdio_device {   struct device dev;   const struct dev_pm_ops *pm_ops;   struct mii_bus *bus;   int (*bus_match)(struct device *, struct device_driver *);   void (*device_free)(struct mdio_device *);   void (*device_remove)(struct mdio_device *);   int addr;   int flags; } ;    41     struct mdio_driver_common {   struct device_driver driver;   int flags; } ;   244     struct phy_device ;   245     enum ldv_29701 {   PHY_INTERFACE_MODE_NA = 0,   PHY_INTERFACE_MODE_MII = 1,   PHY_INTERFACE_MODE_GMII = 2,   PHY_INTERFACE_MODE_SGMII = 3,   PHY_INTERFACE_MODE_TBI = 4,   PHY_INTERFACE_MODE_REVMII = 5,   PHY_INTERFACE_MODE_RMII = 6,   PHY_INTERFACE_MODE_RGMII = 7,   PHY_INTERFACE_MODE_RGMII_ID = 8,   PHY_INTERFACE_MODE_RGMII_RXID = 9,   PHY_INTERFACE_MODE_RGMII_TXID = 10,   PHY_INTERFACE_MODE_RTBI = 11,   PHY_INTERFACE_MODE_SMII = 12,   PHY_INTERFACE_MODE_XGMII = 13,   PHY_INTERFACE_MODE_MOCA = 14,   PHY_INTERFACE_MODE_QSGMII = 15,   PHY_INTERFACE_MODE_MAX = 16 } ;    84     typedef enum ldv_29701 phy_interface_t;   130     enum ldv_29752 {   MDIOBUS_ALLOCATED = 1,   MDIOBUS_REGISTERED = 2,   MDIOBUS_UNREGISTERED = 3,   MDIOBUS_RELEASED = 4 } ;   137     struct mii_bus {   struct module *owner;   const char *name;   char id[17U];   void *priv;   int (*read)(struct mii_bus *, int, int);   int (*write)(struct mii_bus *, int, int, u16 );   int (*reset)(struct mii_bus *);   struct mutex mdio_lock;   struct device *parent;   enum ldv_29752 state;   struct device dev;   struct mdio_device *mdio_map[32U];   u32 phy_mask;   u32 phy_ignore_ta_mask;   int irq[32U]; } ;   218     enum phy_state {   PHY_DOWN = 0,   PHY_STARTING = 1,   PHY_READY = 2,   PHY_PENDING = 3,   PHY_UP = 4,   PHY_AN = 5,   PHY_RUNNING = 6,   PHY_NOLINK = 7,   PHY_FORCING = 8,   PHY_CHANGELINK = 9,   PHY_HALTED = 10,   PHY_RESUMING = 11 } ;   233     struct phy_c45_device_ids {   u32 devices_in_package;   u32 device_ids[8U]; } ;   326     struct phy_driver ;   326     struct phy_device {   struct mdio_device mdio;   struct phy_driver *drv;   u32 phy_id;   struct phy_c45_device_ids c45_ids;   bool is_c45;   bool is_internal;   bool is_pseudo_fixed_link;   bool has_fixups;   bool suspended;   enum phy_state state;   u32 dev_flags;   phy_interface_t interface;   int speed;   int duplex;   int pause;   int asym_pause;   int link;   u32 interrupts;   u32 supported;   u32 advertising;   u32 lp_advertising;   int autoneg;   int link_timeout;   int irq;   void *priv;   struct work_struct phy_queue;   struct delayed_work state_queue;   atomic_t irq_disable;   struct mutex lock;   struct net_device *attached_dev;   u8 mdix;   void (*adjust_link)(struct net_device *); } ;   428     struct phy_driver {   struct mdio_driver_common mdiodrv;   u32 phy_id;   char *name;   unsigned int phy_id_mask;   u32 features;   u32 flags;   const void *driver_data;   int (*soft_reset)(struct phy_device *);   int (*config_init)(struct phy_device *);   int (*probe)(struct phy_device *);   int (*suspend)(struct phy_device *);   int (*resume)(struct phy_device *);   int (*config_aneg)(struct phy_device *);   int (*aneg_done)(struct phy_device *);   int (*read_status)(struct phy_device *);   int (*ack_interrupt)(struct phy_device *);   int (*config_intr)(struct phy_device *);   int (*did_interrupt)(struct phy_device *);   void (*remove)(struct phy_device *);   int (*match_phy_device)(struct phy_device *);   int (*ts_info)(struct phy_device *, struct ethtool_ts_info *);   int (*hwtstamp)(struct phy_device *, struct ifreq *);   bool  (*rxtstamp)(struct phy_device *, struct sk_buff *, int);   void (*txtstamp)(struct phy_device *, struct sk_buff *, int);   int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *);   void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *);   void (*link_change_notify)(struct phy_device *);   int (*read_mmd_indirect)(struct phy_device *, int, int, int);   void (*write_mmd_indirect)(struct phy_device *, int, int, int, u32 );   int (*module_info)(struct phy_device *, struct ethtool_modinfo *);   int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *);   int (*get_sset_count)(struct phy_device *);   void (*get_strings)(struct phy_device *, u8 *);   void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); } ;   841     struct fixed_phy_status {   int link;   int speed;   int duplex;   int pause;   int asym_pause; } ;    27     enum dsa_tag_protocol {   DSA_TAG_PROTO_NONE = 0,   DSA_TAG_PROTO_DSA = 1,   DSA_TAG_PROTO_TRAILER = 2,   DSA_TAG_PROTO_EDSA = 3,   DSA_TAG_PROTO_BRCM = 4 } ;    35     struct dsa_chip_data {   struct device *host_dev;   int sw_addr;   int eeprom_len;   struct device_node *of_node;   char *port_names[12U];   struct device_node *port_dn[12U];   s8 *rtable; } ;    68     struct dsa_platform_data {   struct device *netdev;   struct net_device *of_netdev;   int nr_chips;   struct dsa_chip_data *chip; } ;    84     struct packet_type ;    85     struct dsa_switch ;    85     struct dsa_switch_tree {   struct dsa_platform_data *pd;   struct net_device *master_netdev;   int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);   enum dsa_tag_protocol tag_protocol;   struct ethtool_ops master_ethtool_ops;   s8 cpu_switch;   s8 cpu_port;   struct dsa_switch *ds[4U]; } ;   121     struct dsa_switch_driver ;   121     struct dsa_switch {   struct device *dev;   struct dsa_switch_tree *dst;   int index;   void *priv;   struct dsa_chip_data *cd;   struct dsa_switch_driver *drv;   char hwmon_name[24U];   struct device *hwmon_dev;   u32 dsa_port_mask;   u32 enabled_port_mask;   u32 phys_mii_mask;   struct mii_bus *slave_mii_bus;   struct net_device *ports[12U]; } ;   195     struct switchdev_trans ;   196     struct switchdev_obj ;   197     struct switchdev_obj_port_fdb ;   198     struct switchdev_obj_port_vlan ;   199     struct dsa_switch_driver {   struct list_head list;   enum dsa_tag_protocol tag_protocol;   const char * (*probe)(struct device *, struct device *, int, void **);   int (*setup)(struct dsa_switch *);   int (*set_addr)(struct dsa_switch *, u8 *);   u32  (*get_phy_flags)(struct dsa_switch *, int);   int (*phy_read)(struct dsa_switch *, int, int);   int (*phy_write)(struct dsa_switch *, int, int, u16 );   void (*adjust_link)(struct dsa_switch *, int, struct phy_device *);   void (*fixed_link_update)(struct dsa_switch *, int, struct fixed_phy_status *);   void (*get_strings)(struct dsa_switch *, int, uint8_t *);   void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *);   int (*get_sset_count)(struct dsa_switch *);   void (*get_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *);   int (*set_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *);   int (*suspend)(struct dsa_switch *);   int (*resume)(struct dsa_switch *);   int (*port_enable)(struct dsa_switch *, int, struct phy_device *);   void (*port_disable)(struct dsa_switch *, int, struct phy_device *);   int (*set_eee)(struct dsa_switch *, int, struct phy_device *, struct ethtool_eee *);   int (*get_eee)(struct dsa_switch *, int, struct ethtool_eee *);   int (*get_temp)(struct dsa_switch *, int *);   int (*get_temp_limit)(struct dsa_switch *, int *);   int (*set_temp_limit)(struct dsa_switch *, int);   int (*get_temp_alarm)(struct dsa_switch *, bool *);   int (*get_eeprom_len)(struct dsa_switch *);   int (*get_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *);   int (*set_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *);   int (*get_regs_len)(struct dsa_switch *, int);   void (*get_regs)(struct dsa_switch *, int, struct ethtool_regs *, void *);   int (*port_bridge_join)(struct dsa_switch *, int, struct net_device *);   void (*port_bridge_leave)(struct dsa_switch *, int);   void (*port_stp_state_set)(struct dsa_switch *, int, u8 );   int (*port_vlan_filtering)(struct dsa_switch *, int, bool );   int (*port_vlan_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *);   void (*port_vlan_add)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *);   int (*port_vlan_del)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *);   int (*port_vlan_dump)(struct dsa_switch *, int, struct switchdev_obj_port_vlan *, int (*)(struct switchdev_obj *));   int (*port_fdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *);   void (*port_fdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *);   int (*port_fdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *);   int (*port_fdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_fdb *, int (*)(struct switchdev_obj *)); } ;   347     struct ieee_ets {   __u8 willing;   __u8 ets_cap;   __u8 cbs;   __u8 tc_tx_bw[8U];   __u8 tc_rx_bw[8U];   __u8 tc_tsa[8U];   __u8 prio_tc[8U];   __u8 tc_reco_bw[8U];   __u8 tc_reco_tsa[8U];   __u8 reco_prio_tc[8U]; } ;    69     struct ieee_maxrate {   __u64 tc_maxrate[8U]; } ;    87     struct ieee_qcn {   __u8 rpg_enable[8U];   __u32 rppp_max_rps[8U];   __u32 rpg_time_reset[8U];   __u32 rpg_byte_reset[8U];   __u32 rpg_threshold[8U];   __u32 rpg_max_rate[8U];   __u32 rpg_ai_rate[8U];   __u32 rpg_hai_rate[8U];   __u32 rpg_gd[8U];   __u32 rpg_min_dec_fac[8U];   __u32 rpg_min_rate[8U];   __u32 cndd_state_machine[8U]; } ;   132     struct ieee_qcn_stats {   __u64 rppp_rp_centiseconds[8U];   __u32 rppp_created_rps[8U]; } ;   144     struct ieee_pfc {   __u8 pfc_cap;   __u8 pfc_en;   __u8 mbc;   __u16 delay;   __u64 requests[8U];   __u64 indications[8U]; } ;   164     struct cee_pg {   __u8 willing;   __u8 error;   __u8 pg_en;   __u8 tcs_supported;   __u8 pg_bw[8U];   __u8 prio_pg[8U]; } ;   187     struct cee_pfc {   __u8 willing;   __u8 error;   __u8 pfc_en;   __u8 tcs_supported; } ;   202     struct dcb_app {   __u8 selector;   __u8 priority;   __u16 protocol; } ;   236     struct dcb_peer_app_info {   __u8 willing;   __u8 error; } ;    40     struct dcbnl_rtnl_ops {   int (*ieee_getets)(struct net_device *, struct ieee_ets *);   int (*ieee_setets)(struct net_device *, struct ieee_ets *);   int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *);   int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *);   int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *);   int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *);   int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *);   int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *);   int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *);   int (*ieee_getapp)(struct net_device *, struct dcb_app *);   int (*ieee_setapp)(struct net_device *, struct dcb_app *);   int (*ieee_delapp)(struct net_device *, struct dcb_app *);   int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *);   int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *);   u8  (*getstate)(struct net_device *);   u8  (*setstate)(struct net_device *, u8 );   void (*getpermhwaddr)(struct net_device *, u8 *);   void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 );   void (*setpgbwgcfgtx)(struct net_device *, int, u8 );   void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 );   void (*setpgbwgcfgrx)(struct net_device *, int, u8 );   void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);   void (*getpgbwgcfgtx)(struct net_device *, int, u8 *);   void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);   void (*getpgbwgcfgrx)(struct net_device *, int, u8 *);   void (*setpfccfg)(struct net_device *, int, u8 );   void (*getpfccfg)(struct net_device *, int, u8 *);   u8  (*setall)(struct net_device *);   u8  (*getcap)(struct net_device *, int, u8 *);   int (*getnumtcs)(struct net_device *, int, u8 *);   int (*setnumtcs)(struct net_device *, int, u8 );   u8  (*getpfcstate)(struct net_device *);   void (*setpfcstate)(struct net_device *, u8 );   void (*getbcncfg)(struct net_device *, int, u32 *);   void (*setbcncfg)(struct net_device *, int, u32 );   void (*getbcnrp)(struct net_device *, int, u8 *);   void (*setbcnrp)(struct net_device *, int, u8 );   int (*setapp)(struct net_device *, u8 , u16 , u8 );   int (*getapp)(struct net_device *, u8 , u16 );   u8  (*getfeatcfg)(struct net_device *, int, u8 *);   u8  (*setfeatcfg)(struct net_device *, int, u8 );   u8  (*getdcbx)(struct net_device *);   u8  (*setdcbx)(struct net_device *, u8 );   int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *);   int (*peer_getapptable)(struct net_device *, struct dcb_app *);   int (*cee_peer_getpg)(struct net_device *, struct cee_pg *);   int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ;   105     struct taskstats {   __u16 version;   __u32 ac_exitcode;   __u8 ac_flag;   __u8 ac_nice;   __u64 cpu_count;   __u64 cpu_delay_total;   __u64 blkio_count;   __u64 blkio_delay_total;   __u64 swapin_count;   __u64 swapin_delay_total;   __u64 cpu_run_real_total;   __u64 cpu_run_virtual_total;   char ac_comm[32U];   __u8 ac_sched;   __u8 ac_pad[3U];   __u32 ac_uid;   __u32 ac_gid;   __u32 ac_pid;   __u32 ac_ppid;   __u32 ac_btime;   __u64 ac_etime;   __u64 ac_utime;   __u64 ac_stime;   __u64 ac_minflt;   __u64 ac_majflt;   __u64 coremem;   __u64 virtmem;   __u64 hiwater_rss;   __u64 hiwater_vm;   __u64 read_char;   __u64 write_char;   __u64 read_syscalls;   __u64 write_syscalls;   __u64 read_bytes;   __u64 write_bytes;   __u64 cancelled_write_bytes;   __u64 nvcsw;   __u64 nivcsw;   __u64 ac_utimescaled;   __u64 ac_stimescaled;   __u64 cpu_scaled_run_real_total;   __u64 freepages_count;   __u64 freepages_delay_total; } ;    58     struct mnt_namespace ;    59     struct ipc_namespace ;    60     struct cgroup_namespace ;    61     struct nsproxy {   atomic_t count;   struct uts_namespace *uts_ns;   struct ipc_namespace *ipc_ns;   struct mnt_namespace *mnt_ns;   struct pid_namespace *pid_ns_for_children;   struct net *net_ns;   struct cgroup_namespace *cgroup_ns; } ;    86     struct uid_gid_extent {   u32 first;   u32 lower_first;   u32 count; } ;    19     struct uid_gid_map {   u32 nr_extents;   struct uid_gid_extent extent[5U]; } ;    20     struct user_namespace {   struct uid_gid_map uid_map;   struct uid_gid_map gid_map;   struct uid_gid_map projid_map;   atomic_t count;   struct user_namespace *parent;   int level;   kuid_t owner;   kgid_t group;   struct ns_common ns;   unsigned long flags;   struct key *persistent_keyring_register;   struct rw_semaphore persistent_keyring_register_sem; } ;   608     struct cgroup_namespace {   atomic_t count;   struct ns_common ns;   struct user_namespace *user_ns;   struct css_set *root_cset; } ;   662     struct netprio_map {   struct callback_head rcu;   u32 priomap_len;   u32 priomap[]; } ;    41     struct nlmsghdr {   __u32 nlmsg_len;   __u16 nlmsg_type;   __u16 nlmsg_flags;   __u32 nlmsg_seq;   __u32 nlmsg_pid; } ;   143     struct nlattr {   __u16 nla_len;   __u16 nla_type; } ;   105     struct netlink_callback {   struct sk_buff *skb;   const struct nlmsghdr *nlh;   int (*start)(struct netlink_callback *);   int (*dump)(struct sk_buff *, struct netlink_callback *);   int (*done)(struct netlink_callback *);   void *data;   struct module *module;   u16 family;   u16 min_dump_alloc;   unsigned int prev_seq;   unsigned int seq;   long args[6U]; } ;   183     struct ndmsg {   __u8 ndm_family;   __u8 ndm_pad1;   __u16 ndm_pad2;   __s32 ndm_ifindex;   __u16 ndm_state;   __u8 ndm_flags;   __u8 ndm_type; } ;    41     struct rtnl_link_stats64 {   __u64 rx_packets;   __u64 tx_packets;   __u64 rx_bytes;   __u64 tx_bytes;   __u64 rx_errors;   __u64 tx_errors;   __u64 rx_dropped;   __u64 tx_dropped;   __u64 multicast;   __u64 collisions;   __u64 rx_length_errors;   __u64 rx_over_errors;   __u64 rx_crc_errors;   __u64 rx_frame_errors;   __u64 rx_fifo_errors;   __u64 rx_missed_errors;   __u64 tx_aborted_errors;   __u64 tx_carrier_errors;   __u64 tx_fifo_errors;   __u64 tx_heartbeat_errors;   __u64 tx_window_errors;   __u64 rx_compressed;   __u64 tx_compressed;   __u64 rx_nohandler; } ;   830     struct ifla_vf_stats {   __u64 rx_packets;   __u64 tx_packets;   __u64 rx_bytes;   __u64 tx_bytes;   __u64 broadcast;   __u64 multicast; } ;    16     struct ifla_vf_info {   __u32 vf;   __u8 mac[32U];   __u32 vlan;   __u32 qos;   __u32 spoofchk;   __u32 linkstate;   __u32 min_tx_rate;   __u32 max_tx_rate;   __u32 rss_query_en;   __u32 trusted; } ;   118     struct tc_stats {   __u64 bytes;   __u32 packets;   __u32 drops;   __u32 overlimits;   __u32 bps;   __u32 pps;   __u32 qlen;   __u32 backlog; } ;    96     struct tc_sizespec {   unsigned char cell_log;   unsigned char size_log;   short cell_align;   int overhead;   unsigned int linklayer;   unsigned int mpu;   unsigned int mtu;   unsigned int tsize; } ;   122     struct tcf_t {   __u64 install;   __u64 lastuse;   __u64 expires; } ;   471     struct netpoll_info ;   472     struct wireless_dev ;   473     struct wpan_dev ;   474     struct mpls_dev ;    66     enum netdev_tx {   __NETDEV_TX_MIN = -2147483648,   NETDEV_TX_OK = 0,   NETDEV_TX_BUSY = 16 } ;   110     typedef enum netdev_tx netdev_tx_t;   129     struct net_device_stats {   unsigned long rx_packets;   unsigned long tx_packets;   unsigned long rx_bytes;   unsigned long tx_bytes;   unsigned long rx_errors;   unsigned long tx_errors;   unsigned long rx_dropped;   unsigned long tx_dropped;   unsigned long multicast;   unsigned long collisions;   unsigned long rx_length_errors;   unsigned long rx_over_errors;   unsigned long rx_crc_errors;   unsigned long rx_frame_errors;   unsigned long rx_fifo_errors;   unsigned long rx_missed_errors;   unsigned long tx_aborted_errors;   unsigned long tx_carrier_errors;   unsigned long tx_fifo_errors;   unsigned long tx_heartbeat_errors;   unsigned long tx_window_errors;   unsigned long rx_compressed;   unsigned long tx_compressed; } ;   192     struct neigh_parms ;   213     struct netdev_hw_addr_list {   struct list_head list;   int count; } ;   218     struct hh_cache {   u16 hh_len;   u16 __pad;   seqlock_t hh_lock;   unsigned long hh_data[16U]; } ;   247     struct header_ops {   int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int);   int (*parse)(const struct sk_buff *, unsigned char *);   int (*cache)(const struct neighbour *, struct hh_cache *, __be16 );   void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *);   bool  (*validate)(const char *, unsigned int); } ;   298     struct napi_struct {   struct list_head poll_list;   unsigned long state;   int weight;   unsigned int gro_count;   int (*poll)(struct napi_struct *, int);   spinlock_t poll_lock;   int poll_owner;   struct net_device *dev;   struct sk_buff *gro_list;   struct sk_buff *skb;   struct hrtimer timer;   struct list_head dev_list;   struct hlist_node napi_hash_node;   unsigned int napi_id; } ;   344     enum rx_handler_result {   RX_HANDLER_CONSUMED = 0,   RX_HANDLER_ANOTHER = 1,   RX_HANDLER_EXACT = 2,   RX_HANDLER_PASS = 3 } ;   392     typedef enum rx_handler_result rx_handler_result_t;   393     typedef rx_handler_result_t  rx_handler_func_t(struct sk_buff **);   538     struct Qdisc ;   538     struct netdev_queue {   struct net_device *dev;   struct Qdisc *qdisc;   struct Qdisc *qdisc_sleeping;   struct kobject kobj;   int numa_node;   unsigned long tx_maxrate;   unsigned long trans_timeout;   spinlock_t _xmit_lock;   int xmit_lock_owner;   unsigned long trans_start;   unsigned long state;   struct dql dql; } ;   609     struct rps_map {   unsigned int len;   struct callback_head rcu;   u16 cpus[0U]; } ;   621     struct rps_dev_flow {   u16 cpu;   u16 filter;   unsigned int last_qtail; } ;   633     struct rps_dev_flow_table {   unsigned int mask;   struct callback_head rcu;   struct rps_dev_flow flows[0U]; } ;   685     struct netdev_rx_queue {   struct rps_map *rps_map;   struct rps_dev_flow_table *rps_flow_table;   struct kobject kobj;   struct net_device *dev; } ;   708     struct xps_map {   unsigned int len;   unsigned int alloc_len;   struct callback_head rcu;   u16 queues[0U]; } ;   721     struct xps_dev_maps {   struct callback_head rcu;   struct xps_map *cpu_map[0U]; } ;   732     struct netdev_tc_txq {   u16 count;   u16 offset; } ;   743     struct netdev_fcoe_hbainfo {   char manufacturer[64U];   char serial_number[64U];   char hardware_version[64U];   char driver_version[64U];   char optionrom_version[64U];   char firmware_version[64U];   char model[256U];   char model_description[256U]; } ;   759     struct netdev_phys_item_id {   unsigned char id[32U];   unsigned char id_len; } ;   785     struct tc_cls_u32_offload ;   786     struct tc_cls_flower_offload ;   786     union __anonunion____missing_field_name_420 {   u8 tc;   struct tc_cls_u32_offload *cls_u32;   struct tc_cls_flower_offload *cls_flower; } ;   786     struct tc_to_netdev {   unsigned int type;   union __anonunion____missing_field_name_420 __annonCompField106; } ;   800     struct net_device_ops {   int (*ndo_init)(struct net_device *);   void (*ndo_uninit)(struct net_device *);   int (*ndo_open)(struct net_device *);   int (*ndo_stop)(struct net_device *);   netdev_tx_t  (*ndo_start_xmit)(struct sk_buff *, struct net_device *);   netdev_features_t  (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t );   u16  (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16  (*)(struct net_device *, struct sk_buff *));   void (*ndo_change_rx_flags)(struct net_device *, int);   void (*ndo_set_rx_mode)(struct net_device *);   int (*ndo_set_mac_address)(struct net_device *, void *);   int (*ndo_validate_addr)(struct net_device *);   int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int);   int (*ndo_set_config)(struct net_device *, struct ifmap *);   int (*ndo_change_mtu)(struct net_device *, int);   int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *);   void (*ndo_tx_timeout)(struct net_device *);   struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *);   struct net_device_stats * (*ndo_get_stats)(struct net_device *);   int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 );   int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 );   void (*ndo_poll_controller)(struct net_device *);   int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *);   void (*ndo_netpoll_cleanup)(struct net_device *);   int (*ndo_busy_poll)(struct napi_struct *);   int (*ndo_set_vf_mac)(struct net_device *, int, u8 *);   int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 );   int (*ndo_set_vf_rate)(struct net_device *, int, int, int);   int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool );   int (*ndo_set_vf_trust)(struct net_device *, int, bool );   int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *);   int (*ndo_set_vf_link_state)(struct net_device *, int, int);   int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *);   int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **);   int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *);   int (*ndo_set_vf_guid)(struct net_device *, int, u64 , int);   int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool );   int (*ndo_setup_tc)(struct net_device *, u32 , __be16 , struct tc_to_netdev *);   int (*ndo_fcoe_enable)(struct net_device *);   int (*ndo_fcoe_disable)(struct net_device *);   int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int);   int (*ndo_fcoe_ddp_done)(struct net_device *, u16 );   int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int);   int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *);   int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int);   int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 );   int (*ndo_add_slave)(struct net_device *, struct net_device *);   int (*ndo_del_slave)(struct net_device *, struct net_device *);   netdev_features_t  (*ndo_fix_features)(struct net_device *, netdev_features_t );   int (*ndo_set_features)(struct net_device *, netdev_features_t );   int (*ndo_neigh_construct)(struct neighbour *);   void (*ndo_neigh_destroy)(struct neighbour *);   int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 , u16 );   int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 );   int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int);   int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16 );   int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 , int);   int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16 );   int (*ndo_change_carrier)(struct net_device *, bool );   int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *);   int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t );   void (*ndo_add_vxlan_port)(struct net_device *, sa_family_t , __be16 );   void (*ndo_del_vxlan_port)(struct net_device *, sa_family_t , __be16 );   void (*ndo_add_geneve_port)(struct net_device *, sa_family_t , __be16 );   void (*ndo_del_geneve_port)(struct net_device *, sa_family_t , __be16 );   void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *);   void (*ndo_dfwd_del_station)(struct net_device *, void *);   netdev_tx_t  (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *);   int (*ndo_get_lock_subclass)(struct net_device *);   int (*ndo_set_tx_maxrate)(struct net_device *, int, u32 );   int (*ndo_get_iflink)(const struct net_device *);   int (*ndo_change_proto_down)(struct net_device *, bool );   int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *);   void (*ndo_set_rx_headroom)(struct net_device *, int); } ;  1337     struct __anonstruct_adj_list_421 {   struct list_head upper;   struct list_head lower; } ;  1337     struct __anonstruct_all_adj_list_422 {   struct list_head upper;   struct list_head lower; } ;  1337     struct iw_handler_def ;  1337     struct iw_public_data ;  1337     struct switchdev_ops ;  1337     struct l3mdev_ops ;  1337     struct vlan_info ;  1337     struct tipc_bearer ;  1337     struct in_device ;  1337     struct dn_dev ;  1337     struct inet6_dev ;  1337     struct tcf_proto ;  1337     struct cpu_rmap ;  1337     struct pcpu_lstats ;  1337     struct pcpu_sw_netstats ;  1337     struct pcpu_dstats ;  1337     struct pcpu_vstats ;  1337     union __anonunion____missing_field_name_423 {   void *ml_priv;   struct pcpu_lstats *lstats;   struct pcpu_sw_netstats *tstats;   struct pcpu_dstats *dstats;   struct pcpu_vstats *vstats; } ;  1337     struct garp_port ;  1337     struct mrp_port ;  1337     struct rtnl_link_ops ;  1337     struct net_device {   char name[16U];   struct hlist_node name_hlist;   char *ifalias;   unsigned long mem_end;   unsigned long mem_start;   unsigned long base_addr;   int irq;   atomic_t carrier_changes;   unsigned long state;   struct list_head dev_list;   struct list_head napi_list;   struct list_head unreg_list;   struct list_head close_list;   struct list_head ptype_all;   struct list_head ptype_specific;   struct __anonstruct_adj_list_421 adj_list;   struct __anonstruct_all_adj_list_422 all_adj_list;   netdev_features_t features;   netdev_features_t hw_features;   netdev_features_t wanted_features;   netdev_features_t vlan_features;   netdev_features_t hw_enc_features;   netdev_features_t mpls_features;   netdev_features_t gso_partial_features;   int ifindex;   int group;   struct net_device_stats stats;   atomic_long_t rx_dropped;   atomic_long_t tx_dropped;   atomic_long_t rx_nohandler;   const struct iw_handler_def *wireless_handlers;   struct iw_public_data *wireless_data;   const struct net_device_ops *netdev_ops;   const struct ethtool_ops *ethtool_ops;   const struct switchdev_ops *switchdev_ops;   const struct l3mdev_ops *l3mdev_ops;   const struct header_ops *header_ops;   unsigned int flags;   unsigned int priv_flags;   unsigned short gflags;   unsigned short padded;   unsigned char operstate;   unsigned char link_mode;   unsigned char if_port;   unsigned char dma;   unsigned int mtu;   unsigned short type;   unsigned short hard_header_len;   unsigned short needed_headroom;   unsigned short needed_tailroom;   unsigned char perm_addr[32U];   unsigned char addr_assign_type;   unsigned char addr_len;   unsigned short neigh_priv_len;   unsigned short dev_id;   unsigned short dev_port;   spinlock_t addr_list_lock;   unsigned char name_assign_type;   bool uc_promisc;   struct netdev_hw_addr_list uc;   struct netdev_hw_addr_list mc;   struct netdev_hw_addr_list dev_addrs;   struct kset *queues_kset;   unsigned int promiscuity;   unsigned int allmulti;   struct vlan_info *vlan_info;   struct dsa_switch_tree *dsa_ptr;   struct tipc_bearer *tipc_ptr;   void *atalk_ptr;   struct in_device *ip_ptr;   struct dn_dev *dn_ptr;   struct inet6_dev *ip6_ptr;   void *ax25_ptr;   struct wireless_dev *ieee80211_ptr;   struct wpan_dev *ieee802154_ptr;   struct mpls_dev *mpls_ptr;   unsigned long last_rx;   unsigned char *dev_addr;   struct netdev_rx_queue *_rx;   unsigned int num_rx_queues;   unsigned int real_num_rx_queues;   unsigned long gro_flush_timeout;   rx_handler_func_t *rx_handler;   void *rx_handler_data;   struct tcf_proto *ingress_cl_list;   struct netdev_queue *ingress_queue;   struct list_head nf_hooks_ingress;   unsigned char broadcast[32U];   struct cpu_rmap *rx_cpu_rmap;   struct hlist_node index_hlist;   struct netdev_queue *_tx;   unsigned int num_tx_queues;   unsigned int real_num_tx_queues;   struct Qdisc *qdisc;   unsigned long tx_queue_len;   spinlock_t tx_global_lock;   int watchdog_timeo;   struct xps_dev_maps *xps_maps;   struct tcf_proto *egress_cl_list;   u32 offload_fwd_mark;   struct timer_list watchdog_timer;   int *pcpu_refcnt;   struct list_head todo_list;   struct list_head link_watch_list;   unsigned char reg_state;   bool dismantle;   unsigned short rtnl_link_state;   void (*destructor)(struct net_device *);   struct netpoll_info *npinfo;   possible_net_t nd_net;   union __anonunion____missing_field_name_423 __annonCompField107;   struct garp_port *garp_port;   struct mrp_port *mrp_port;   struct device dev;   const struct attribute_group *sysfs_groups[4U];   const struct attribute_group *sysfs_rx_queue_group;   const struct rtnl_link_ops *rtnl_link_ops;   unsigned int gso_max_size;   u16 gso_max_segs;   const struct dcbnl_rtnl_ops *dcbnl_ops;   u8 num_tc;   struct netdev_tc_txq tc_to_txq[16U];   u8 prio_tc_map[16U];   unsigned int fcoe_ddp_xid;   struct netprio_map *priomap;   struct phy_device *phydev;   struct lock_class_key *qdisc_tx_busylock;   bool proto_down; } ;  2125     struct packet_type {   __be16 type;   struct net_device *dev;   int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);   bool  (*id_match)(struct packet_type *, struct sock *);   void *af_packet_priv;   struct list_head list; } ;  2155     struct pcpu_sw_netstats {   u64 rx_packets;   u64 rx_bytes;   u64 tx_packets;   u64 tx_bytes;   struct u64_stats_sync syncp; } ;   519     struct tcmsg {   unsigned char tcm_family;   unsigned char tcm__pad1;   unsigned short tcm__pad2;   int tcm_ifindex;   __u32 tcm_handle;   __u32 tcm_parent;   __u32 tcm_info; } ;   141     struct nla_policy {   u16 type;   u16 len; } ;    27     struct gnet_stats_basic_packed {   __u64 bytes;   __u32 packets; } ;    41     struct gnet_stats_rate_est64 {   __u64 bps;   __u64 pps; } ;    51     struct gnet_stats_queue {   __u32 qlen;   __u32 backlog;   __u32 drops;   __u32 requeues;   __u32 overlimits; } ;    77     struct gnet_stats_basic_cpu {   struct gnet_stats_basic_packed bstats;   struct u64_stats_sync syncp; } ;    13     struct gnet_dump {   spinlock_t *lock;   struct sk_buff *skb;   struct nlattr *tail;   int compat_tc_stats;   int compat_xstats;   int padattr;   void *xstats;   int xstats_len;   struct tc_stats tc_stats; } ;    25     struct rtnl_link_ops {   struct list_head list;   const char *kind;   size_t priv_size;   void (*setup)(struct net_device *);   int maxtype;   const struct nla_policy *policy;   int (*validate)(struct nlattr **, struct nlattr **);   int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **);   int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **);   void (*dellink)(struct net_device *, struct list_head *);   size_t  (*get_size)(const struct net_device *);   int (*fill_info)(struct sk_buff *, const struct net_device *);   size_t  (*get_xstats_size)(const struct net_device *);   int (*fill_xstats)(struct sk_buff *, const struct net_device *);   unsigned int (*get_num_tx_queues)();   unsigned int (*get_num_rx_queues)();   int slave_maxtype;   const struct nla_policy *slave_policy;   int (*slave_validate)(struct nlattr **, struct nlattr **);   int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **);   size_t  (*get_slave_size)(const struct net_device *, const struct net_device *);   int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *);   struct net * (*get_link_net)(const struct net_device *);   size_t  (*get_linkxstats_size)(const struct net_device *);   int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *); } ;   157     struct Qdisc_ops ;   158     struct qdisc_walker ;   159     struct tcf_walker ;    35     struct qdisc_size_table {   struct callback_head rcu;   struct list_head list;   struct tc_sizespec szopts;   int refcnt;   u16 data[]; } ;    46     struct Qdisc {   int (*enqueue)(struct sk_buff *, struct Qdisc *);   struct sk_buff * (*dequeue)(struct Qdisc *);   unsigned int flags;   u32 limit;   const struct Qdisc_ops *ops;   struct qdisc_size_table *stab;   struct list_head list;   u32 handle;   u32 parent;   int (*reshape_fail)(struct sk_buff *, struct Qdisc *);   void *u32_node;   struct Qdisc *__parent;   struct netdev_queue *dev_queue;   struct gnet_stats_rate_est64 rate_est;   struct gnet_stats_basic_cpu *cpu_bstats;   struct gnet_stats_queue *cpu_qstats;   struct Qdisc *next_sched;   struct sk_buff *gso_skb;   unsigned long state;   struct sk_buff_head q;   struct gnet_stats_basic_packed bstats;   unsigned int __state;   struct gnet_stats_queue qstats;   struct callback_head callback_head;   int padded;   atomic_t refcnt;   spinlock_t busylock; } ;   152     struct Qdisc_class_ops {   struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);   int (*graft)(struct Qdisc *, unsigned long, struct Qdisc *, struct Qdisc **);   struct Qdisc * (*leaf)(struct Qdisc *, unsigned long);   void (*qlen_notify)(struct Qdisc *, unsigned long);   unsigned long int (*get)(struct Qdisc *, u32 );   void (*put)(struct Qdisc *, unsigned long);   int (*change)(struct Qdisc *, u32 , u32 , struct nlattr **, unsigned long *);   int (*delete)(struct Qdisc *, unsigned long);   void (*walk)(struct Qdisc *, struct qdisc_walker *);   struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);   unsigned long int (*bind_tcf)(struct Qdisc *, unsigned long, u32 );   void (*unbind_tcf)(struct Qdisc *, unsigned long);   int (*dump)(struct Qdisc *, unsigned long, struct sk_buff *, struct tcmsg *);   int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); } ;   180     struct Qdisc_ops {   struct Qdisc_ops *next;   const struct Qdisc_class_ops *cl_ops;   char id[16U];   int priv_size;   int (*enqueue)(struct sk_buff *, struct Qdisc *);   struct sk_buff * (*dequeue)(struct Qdisc *);   struct sk_buff * (*peek)(struct Qdisc *);   unsigned int (*drop)(struct Qdisc *);   int (*init)(struct Qdisc *, struct nlattr *);   void (*reset)(struct Qdisc *);   void (*destroy)(struct Qdisc *);   int (*change)(struct Qdisc *, struct nlattr *);   void (*attach)(struct Qdisc *);   int (*dump)(struct Qdisc *, struct sk_buff *);   int (*dump_stats)(struct Qdisc *, struct gnet_dump *);   struct module *owner; } ;   204     struct tcf_result {   unsigned long class;   u32 classid; } ;   210     struct tcf_proto_ops {   struct list_head head;   char kind[16U];   int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *);   int (*init)(struct tcf_proto *);   bool  (*destroy)(struct tcf_proto *, bool );   unsigned long int (*get)(struct tcf_proto *, u32 );   int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, unsigned long, u32 , struct nlattr **, unsigned long *, bool );   int (*delete)(struct tcf_proto *, unsigned long);   void (*walk)(struct tcf_proto *, struct tcf_walker *);   int (*dump)(struct net *, struct tcf_proto *, unsigned long, struct sk_buff *, struct tcmsg *);   struct module *owner; } ;   235     struct tcf_proto {   struct tcf_proto *next;   void *root;   int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *);   __be16 protocol;   u32 prio;   u32 classid;   struct Qdisc *q;   void *data;   const struct tcf_proto_ops *ops;   struct callback_head rcu; } ;   253     struct qdisc_skb_cb {   unsigned int pkt_len;   u16 slave_dev_queue_mapping;   u16 tc_classid;   unsigned char data[20U]; } ;   844     struct qdisc_walker {   int stop;   int skip;   int count;   int (*fn)(struct Qdisc *, unsigned long, struct qdisc_walker *); } ;   134     struct tc_ife {   __u32 index;   __u32 capab;   int action;   int refcnt;   int bindcnt;   __u16 flags; } ;    28     struct net_generic {   unsigned int len;   struct callback_head rcu;   void *ptr[0U]; } ;    40     union __anonunion___u_447 {   struct net_generic *__val;   char __c[1U]; } ;    46     struct tcf_common {   struct hlist_node tcfc_head;   u32 tcfc_index;   int tcfc_refcnt;   int tcfc_bindcnt;   u32 tcfc_capab;   int tcfc_action;   struct tcf_t tcfc_tm;   struct gnet_stats_basic_packed tcfc_bstats;   struct gnet_stats_queue tcfc_qstats;   struct gnet_stats_rate_est64 tcfc_rate_est;   spinlock_t tcfc_lock;   struct callback_head tcfc_rcu;   struct gnet_stats_basic_cpu *cpu_bstats;   struct gnet_stats_queue *cpu_qstats; } ;    29     struct tcf_hashinfo {   struct hlist_head *htab;   unsigned int hmask;   spinlock_t lock;   u32 index; } ;    80     struct tc_action_ops ;    80     struct tc_action {   void *priv;   const struct tc_action_ops *ops;   __u32 type;   __u32 order;   struct list_head list;   struct tcf_hashinfo *hinfo; } ;    89     struct tc_action_ops {   struct list_head head;   char kind[16U];   __u32 type;   struct module *owner;   int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *);   int (*dump)(struct sk_buff *, struct tc_action *, int, int);   void (*cleanup)(struct tc_action *, int);   int (*lookup)(struct net *, struct tc_action *, u32 );   int (*init)(struct net *, struct nlattr *, struct nlattr *, struct tc_action *, int, int);   int (*walk)(struct net *, struct sk_buff *, struct netlink_callback *, int, struct tc_action *);   void (*stats_update)(struct tc_action *, u64 , u32 , u64 ); } ;   111     struct tc_action_net {   struct tcf_hashinfo *hinfo;   const struct tc_action_ops *ops; } ;   191     struct tcf_ife_info {   struct tcf_common common;   u8 eth_dst[6U];   u8 eth_src[6U];   u16 eth_type;   u16 flags;   struct list_head metalist; } ;    19     struct tcf_meta_ops ;    19     struct tcf_meta_info {   const struct tcf_meta_ops *ops;   void *metaval;   u16 metaid;   struct list_head metalist; } ;    28     struct tcf_meta_ops {   u16 metaid;   u16 metatype;   const char *name;   const char *synopsis;   struct list_head list;   int (*check_presence)(struct sk_buff *, struct tcf_meta_info *);   int (*encode)(struct sk_buff *, void *, struct tcf_meta_info *);   int (*decode)(struct sk_buff *, void *, u16 );   int (*get)(struct sk_buff *, struct tcf_meta_info *);   int (*alloc)(struct tcf_meta_info *, void *);   void (*release)(struct tcf_meta_info *);   int (*validate)(void *, int);   struct module *owner; } ;   605     struct ifeheadr {   __be16 metalen;   u8 tlv_data[]; } ;   610     struct meta_tlvhdr {   __be16 type;   __be16 len; } ;     1     void * __builtin_memcpy(void *, const void *, unsigned long);     1     long int __builtin_expect(long exp, long c);   220     void __read_once_size(const volatile void *p, void *res, int size);   245     void __write_once_size(volatile void *p, void *res, int size);    33     extern struct module __this_module;     7     __u32  __arch_swab32(__u32 val);    46     __u16  __fswab16(__u16 val);    55     __u32  __fswab32(__u32 val);   154     int printk(const char *, ...);     8     void ldv_spin_lock();     9     void ldv_spin_unlock();    26     void * ldv_undef_ptr();    25     void INIT_LIST_HEAD(struct list_head *list);    48     void __list_add(struct list_head *, struct list_head *, struct list_head *);    75     void list_add_tail(struct list_head *new, struct list_head *head);   113     void list_del(struct list_head *);   187     int list_empty(const struct list_head *head);    87     void __bad_percpu_size();    71     void warn_slowpath_null(const char *, const int);    31     void * __memcpy(void *, const void *, size_t );    56     void * __memset(void *, int, size_t );    66     int strcmp(const char *, const char *);   125     void * kmemdup(const void *, size_t , gfp_t );    24     int atomic_read(const atomic_t *v);     8     extern int __preempt_count;    67     void __preempt_count_add(int val);    72     void __preempt_count_sub(int val);   334     void lock_acquire(struct lockdep_map *, unsigned int, int, int, int, struct lockdep_map *, unsigned long);   338     void lock_release(struct lockdep_map *, int, unsigned long);   571     void lockdep_rcu_suspicious(const char *, const int, const char *);    93     void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);    22     void _raw_spin_lock(raw_spinlock_t *);    30     void _raw_spin_lock_bh(raw_spinlock_t *);    41     void _raw_spin_unlock(raw_spinlock_t *);    42     void _raw_spin_unlock_bh(raw_spinlock_t *);    18     void _raw_read_lock(rwlock_t *);    19     void _raw_write_lock(rwlock_t *);    30     void _raw_read_unlock(rwlock_t *);    31     void _raw_write_unlock(rwlock_t *);   289     raw_spinlock_t * spinlock_check(spinlock_t *lock);   300     void ldv_spin_lock_5(spinlock_t *lock);   300     void spin_lock(spinlock_t *lock);   309     void ldv_spin_lock_bh_6(spinlock_t *lock);   309     void spin_lock_bh(spinlock_t *lock);   349     void ldv_spin_unlock_9(spinlock_t *lock);   349     void spin_unlock(spinlock_t *lock);   358     void ldv_spin_unlock_bh_10(spinlock_t *lock);   358     void spin_unlock_bh(spinlock_t *lock);    78     extern volatile unsigned long jiffies;   435     clock_t  jiffies_to_clock_t(unsigned long);   306     void __rcu_read_lock();   312     void __rcu_read_unlock();   110     bool  rcu_is_watching();   484     void rcu_lock_acquire(struct lockdep_map *map);   489     void rcu_lock_release(struct lockdep_map *map);   494     extern struct lockdep_map rcu_lock_map;   498     int debug_lockdep_rcu_enabled();   500     int rcu_read_lock_held();   851     void rcu_read_lock();   905     void rcu_read_unlock();    36     int __request_module(bool , const char *, ...);   594     bool  try_module_get(struct module *);   596     void module_put(struct module *);   154     void kfree(const void *);   322     void * ldv_kmem_cache_alloc_20(struct kmem_cache *ldv_func_arg1, gfp_t flags);   466     void * kmalloc(size_t size, gfp_t flags);    18     void ldv_check_alloc_flags(gfp_t flags);    47     int ___ratelimit(struct ratelimit_state *, const char *);   954     struct sk_buff * ldv_skb_clone_30(struct sk_buff *ldv_func_arg1, gfp_t flags);   958     struct sk_buff * ldv_skb_clone_39(struct sk_buff *ldv_func_arg1, gfp_t flags);   963     struct sk_buff * ldv_skb_copy_32(const struct sk_buff *ldv_func_arg1, gfp_t flags);   976     int ldv_pskb_expand_head_27(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   980     int ldv_pskb_expand_head_28(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   984     int ldv_pskb_expand_head_36(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   988     int ldv_pskb_expand_head_37(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);   992     int ldv_pskb_expand_head_38(struct sk_buff *ldv_func_arg1, int ldv_func_arg2, int ldv_func_arg3, gfp_t flags);  1165     unsigned char * skb_end_pointer(const struct sk_buff *skb);  1316     int skb_header_cloned(const struct sk_buff *skb);  1771     unsigned int skb_headlen(const struct sk_buff *skb);  1849     unsigned char * skb_tail_pointer(const struct sk_buff *skb);  1897     unsigned char * skb_push(struct sk_buff *, unsigned int);  1898     unsigned char * __skb_push(struct sk_buff *skb, unsigned int len);  1905     unsigned char * skb_pull(struct sk_buff *, unsigned int);  1906     unsigned char * __skb_pull(struct sk_buff *skb, unsigned int len);  1918     unsigned char * __pskb_pull_tail(struct sk_buff *, int);  1934     int pskb_may_pull(struct sk_buff *skb, unsigned int len);  1949     unsigned int skb_headroom(const struct sk_buff *skb);  2130     void skb_reset_network_header(struct sk_buff *skb);  2141     unsigned char * skb_mac_header(const struct sk_buff *skb);  2151     void skb_reset_mac_header(struct sk_buff *skb);  2156     void skb_set_mac_header(struct sk_buff *skb, const int offset);  2290     void skb_trim(struct sk_buff *, unsigned int);  2377     struct sk_buff * ldv___netdev_alloc_skb_33(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2381     struct sk_buff * ldv___netdev_alloc_skb_34(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2385     struct sk_buff * ldv___netdev_alloc_skb_35(struct net_device *ldv_func_arg1, unsigned int ldv_func_arg2, gfp_t flags);  2671     int __skb_cow(struct sk_buff *skb, unsigned int headroom, int cloned);  2712     int skb_cow_head(struct sk_buff *skb, unsigned int headroom);  3666     bool  skb_is_gso(const struct sk_buff *skb);    26     struct ethhdr * eth_hdr(const struct sk_buff *skb);    27     void rtnl_lock();    28     void rtnl_unlock();   241     int nla_parse(struct nlattr **, int, const struct nlattr *, int, const struct nla_policy *);   262     int nla_put(struct sk_buff *, int, int, const void *);   263     int nla_put_64bit(struct sk_buff *, int, int, const void *, int);   531     void nlmsg_trim(struct sk_buff *skb, const void *mark);   644     int nla_attr_size(int payload);   653     int nla_total_size(int payload);   680     void * nla_data(const struct nlattr *nla);   689     int nla_len(const struct nlattr *nla);   744     int nla_parse_nested(struct nlattr **tb, int maxtype, const struct nlattr *nla, const struct nla_policy *policy);   768     int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value);   812     int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value);  1043     u16  nla_get_u16(const struct nlattr *nla);  1200     struct nlattr * nla_nest_start(struct sk_buff *skb, int attrtype);  1220     int nla_nest_end(struct sk_buff *skb, struct nlattr *start);    36     __be16  eth_type_trans(struct sk_buff *, struct net_device *);    96     bool  is_zero_ether_addr(const u8 *addr);   247     void eth_zero_addr(u8 *addr);   274     void ether_addr_copy(u8 *dst, const u8 *src);   275     struct qdisc_skb_cb * qdisc_skb_cb(const struct sk_buff *skb);   491     unsigned int qdisc_pkt_len(const struct sk_buff *skb);   530     void _bstats_update(struct gnet_stats_basic_packed *bstats, __u64 bytes, __u32 packets);   537     void bstats_update(struct gnet_stats_basic_packed *bstats, const struct sk_buff *skb);    34     void * net_generic(const struct net *net, int id);    54     int tcf_hashinfo_init(struct tcf_hashinfo *hf, unsigned int mask);   118     int tc_action_net_init(struct tc_action_net *tn, const struct tc_action_ops *ops, unsigned int mask);   133     void tcf_hashinfo_destroy(const struct tc_action_ops *, struct tcf_hashinfo *);   136     void tc_action_net_exit(struct tc_action_net *tn);   142     int tcf_generic_walker(struct tc_action_net *, struct sk_buff *, struct netlink_callback *, int, struct tc_action *);   145     int tcf_hash_search(struct tc_action_net *, struct tc_action *, u32 );   147     int tcf_hash_check(struct tc_action_net *, u32 , struct tc_action *, int);   149     int tcf_hash_create(struct tc_action_net *, u32 , struct nlattr *, struct tc_action *, int, int, bool );   152     void tcf_hash_insert(struct tc_action_net *, struct tc_action *);   154     int __tcf_hash_release(struct tc_action *, bool , bool );   156     int tcf_hash_release(struct tc_action *a, bool bind);   161     int tcf_register_action(struct tc_action_ops *, struct pernet_operations *);   162     int tcf_unregister_action(struct tc_action_ops *, struct pernet_operations *);    47     int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi);    48     int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi);    49     int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval);    51     int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval);    52     int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval);    53     int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);    54     int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);    55     int ife_validate_meta_u32(void *val, int len);    56     int ife_validate_meta_u16(void *val, int len);    57     void ife_release_meta_gen(struct tcf_meta_info *mi);    58     int register_ife_op(struct tcf_meta_ops *mops);    59     int unregister_ife_op(struct tcf_meta_ops *mops);    39     int ife_net_id = 0;    40     int max_metacnt = 5;    42     const struct nla_policy ife_policy[8U] = { { (unsigned short)0, (unsigned short)0 }, { (unsigned short)0, 24U }, { (unsigned short)0, (unsigned short)0 }, { (unsigned short)0, 6U }, { (unsigned short)0, 6U }, { 2U, (unsigned short)0 } };    64     const char __kstrtab_ife_tlv_meta_encode[20U] = { 'i', 'f', 'e', '_', 't', 'l', 'v', '_', 'm', 'e', 't', 'a', '_', 'e', 'n', 'c', 'o', 'd', 'e', '\x0' };    64     const struct kernel_symbol __ksymtab_ife_tlv_meta_encode;    64     const struct kernel_symbol __ksymtab_ife_tlv_meta_encode = { (unsigned long)(&ife_tlv_meta_encode), (const char *)(&__kstrtab_ife_tlv_meta_encode) };    73     const char __kstrtab_ife_get_meta_u32[17U] = { 'i', 'f', 'e', '_', 'g', 'e', 't', '_', 'm', 'e', 't', 'a', '_', 'u', '3', '2', '\x0' };    73     const struct kernel_symbol __ksymtab_ife_get_meta_u32;    73     const struct kernel_symbol __ksymtab_ife_get_meta_u32 = { (unsigned long)(&ife_get_meta_u32), (const char *)(&__kstrtab_ife_get_meta_u32) };    82     const char __kstrtab_ife_check_meta_u32[19U] = { 'i', 'f', 'e', '_', 'c', 'h', 'e', 'c', 'k', '_', 'm', 'e', 't', 'a', '_', 'u', '3', '2', '\x0' };    82     const struct kernel_symbol __ksymtab_ife_check_meta_u32;    82     const struct kernel_symbol __ksymtab_ife_check_meta_u32 = { (unsigned long)(&ife_check_meta_u32), (const char *)(&__kstrtab_ife_check_meta_u32) };    99     const char __kstrtab_ife_encode_meta_u32[20U] = { 'i', 'f', 'e', '_', 'e', 'n', 'c', 'o', 'd', 'e', '_', 'm', 'e', 't', 'a', '_', 'u', '3', '2', '\x0' };    99     const struct kernel_symbol __ksymtab_ife_encode_meta_u32;    99     const struct kernel_symbol __ksymtab_ife_encode_meta_u32 = { (unsigned long)(&ife_encode_meta_u32), (const char *)(&__kstrtab_ife_encode_meta_u32) };   108     const char __kstrtab_ife_get_meta_u16[17U] = { 'i', 'f', 'e', '_', 'g', 'e', 't', '_', 'm', 'e', 't', 'a', '_', 'u', '1', '6', '\x0' };   108     const struct kernel_symbol __ksymtab_ife_get_meta_u16;   108     const struct kernel_symbol __ksymtab_ife_get_meta_u16 = { (unsigned long)(&ife_get_meta_u16), (const char *)(&__kstrtab_ife_get_meta_u16) };   118     const char __kstrtab_ife_alloc_meta_u32[19U] = { 'i', 'f', 'e', '_', 'a', 'l', 'l', 'o', 'c', '_', 'm', 'e', 't', 'a', '_', 'u', '3', '2', '\x0' };   118     const struct kernel_symbol __ksymtab_ife_alloc_meta_u32;   118     const struct kernel_symbol __ksymtab_ife_alloc_meta_u32 = { (unsigned long)(&ife_alloc_meta_u32), (const char *)(&__kstrtab_ife_alloc_meta_u32) };   128     const char __kstrtab_ife_alloc_meta_u16[19U] = { 'i', 'f', 'e', '_', 'a', 'l', 'l', 'o', 'c', '_', 'm', 'e', 't', 'a', '_', 'u', '1', '6', '\x0' };   128     const struct kernel_symbol __ksymtab_ife_alloc_meta_u16;   128     const struct kernel_symbol __ksymtab_ife_alloc_meta_u16 = { (unsigned long)(&ife_alloc_meta_u16), (const char *)(&__kstrtab_ife_alloc_meta_u16) };   134     const char __kstrtab_ife_release_meta_gen[21U] = { 'i', 'f', 'e', '_', 'r', 'e', 'l', 'e', 'a', 's', 'e', '_', 'm', 'e', 't', 'a', '_', 'g', 'e', 'n', '\x0' };   134     const struct kernel_symbol __ksymtab_ife_release_meta_gen;   134     const struct kernel_symbol __ksymtab_ife_release_meta_gen = { (unsigned long)(&ife_release_meta_gen), (const char *)(&__kstrtab_ife_release_meta_gen) };   143     const char __kstrtab_ife_validate_meta_u32[22U] = { 'i', 'f', 'e', '_', 'v', 'a', 'l', 'i', 'd', 'a', 't', 'e', '_', 'm', 'e', 't', 'a', '_', 'u', '3', '2', '\x0' };   143     const struct kernel_symbol __ksymtab_ife_validate_meta_u32;   143     const struct kernel_symbol __ksymtab_ife_validate_meta_u32 = { (unsigned long)(&ife_validate_meta_u32), (const char *)(&__kstrtab_ife_validate_meta_u32) };   153     const char __kstrtab_ife_validate_meta_u16[22U] = { 'i', 'f', 'e', '_', 'v', 'a', 'l', 'i', 'd', 'a', 't', 'e', '_', 'm', 'e', 't', 'a', '_', 'u', '1', '6', '\x0' };   153     const struct kernel_symbol __ksymtab_ife_validate_meta_u16;   153     const struct kernel_symbol __ksymtab_ife_validate_meta_u16 = { (unsigned long)(&ife_validate_meta_u16), (const char *)(&__kstrtab_ife_validate_meta_u16) };   155     struct list_head ifeoplist = { &ifeoplist, &ifeoplist };   156     struct __anonstruct_rwlock_t_76 ife_mod_lock = { { { 0 }, { { 0 } } }, 3736018669U, 4294967295U, (void *)-1, { 0, { 0, 0 }, "ife_mod_lock", 0, 0UL } };   158     struct tcf_meta_ops * find_ife_oplist(u16 metaid);   202     const char __kstrtab_unregister_ife_op[18U] = { 'u', 'n', 'r', 'e', 'g', 'i', 's', 't', 'e', 'r', '_', 'i', 'f', 'e', '_', 'o', 'p', '\x0' };   202     const struct kernel_symbol __ksymtab_unregister_ife_op;   202     const struct kernel_symbol __ksymtab_unregister_ife_op = { (unsigned long)(&unregister_ife_op), (const char *)(&__kstrtab_unregister_ife_op) };   221     const char __kstrtab_register_ife_op[16U] = { 'r', 'e', 'g', 'i', 's', 't', 'e', 'r', '_', 'i', 'f', 'e', '_', 'o', 'p', '\x0' };   221     const struct kernel_symbol __ksymtab_register_ife_op;   221     const struct kernel_symbol __ksymtab_register_ife_op = { (unsigned long)(®ister_ife_op), (const char *)(&__kstrtab_register_ife_op) };   223     int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len);   246     int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, void *val, int len);   278     int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, int len);   311     int use_all_metadata(struct tcf_ife_info *ife);   329     int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife);   362     void _tcf_ife_cleanup(struct tc_action *a, int bind);   380     void tcf_ife_cleanup(struct tc_action *a, int bind);   390     int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb);   415     int tcf_ife_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind);   540     int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref);   588     int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife, u16 metaid, u16 mlen, void *mdata);   616     int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res);   673     int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife);   688     int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res);   792     int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res);   813     int tcf_ife_walker(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, int type, struct tc_action *a);   822     int tcf_ife_search(struct net *net, struct tc_action *a, u32 index);   829     struct tc_action_ops act_ife_ops = { { 0, 0 }, { 'i', 'f', 'e', '\x0' }, 25U, &__this_module, &tcf_ife_act, &tcf_ife_dump, &tcf_ife_cleanup, &tcf_ife_search, &tcf_ife_init, &tcf_ife_walker, 0 };   841     int ife_init_net(struct net *net);   848     void ife_exit_net(struct net *net);   855     struct pernet_operations ife_net_ops = { { 0, 0 }, &ife_init_net, &ife_exit_net, 0, &ife_net_id, 16UL };   862     int ife_init_module();   867     void ife_cleanup_module();   895     void ldv_check_final_state();   904     void ldv_initialize();   907     void ldv_handler_precall();   910     int nondet_int();   913     int LDV_IN_INTERRUPT = 0;   916     void ldv_main0_sequence_infinite_withcheck_stateful();    10     void ldv_error();    25     int ldv_undef_int();     7     bool  ldv_is_err(const void *ptr);    14     void * ldv_err_ptr(long error);    21     long int ldv_ptr_err(const void *ptr);    28     bool  ldv_is_err_or_null(const void *ptr);    20     int ldv_spin = 0;    30     struct page * ldv_some_page();    33     struct page * ldv_check_alloc_flags_and_return_some_page(gfp_t flags);    42     void ldv_check_alloc_nonatomic();    63     int ldv_spin_trylock();           return ;         }        {       918     struct sk_buff *var_group1;   919     const struct tc_action *var_tcf_ife_act_27_p1;   920     struct tcf_result *var_tcf_ife_act_27_p2;   921     struct tc_action *var_group2;   922     int var_tcf_ife_dump_22_p2;   923     int var_tcf_ife_dump_22_p3;   924     int var_tcf_ife_cleanup_19_p1;   925     struct net *var_group3;   926     struct nlattr *var_group4;   927     struct nlattr *var_tcf_ife_init_21_p2;   928     struct tc_action *var_tcf_ife_init_21_p3;   929     int var_tcf_ife_init_21_p4;   930     int var_tcf_ife_init_21_p5;   931     struct netlink_callback *var_tcf_ife_walker_28_p2;   932     int var_tcf_ife_walker_28_p3;   933     struct tc_action *var_tcf_ife_walker_28_p4;   934     unsigned int var_tcf_ife_search_29_p2;   935     int tmp;   936     int tmp___0;   937     int tmp___1;  1014     LDV_IN_INTERRUPT = 1;  1023     ldv_initialize() { /* Function call is skipped due to function is undefined */}  1033     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}           {   864       int tmp;   864       tmp = tcf_register_action(&act_ife_ops, &ife_net_ops) { /* Function call is skipped due to function is undefined */}           } 1041     goto ldv_50003;  1041     tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */}  1043     goto ldv_50002;  1042     ldv_50002:;  1044     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}  1044     switch (tmp___0);  1121     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}  1122     -tcf_ife_init(var_group3, var_group4, var_tcf_ife_init_21_p2, var_tcf_ife_init_21_p3, var_tcf_ife_init_21_p4, var_tcf_ife_init_21_p5)           {         }  418       struct tc_action_net *tn;   419       void *tmp;   420       struct nlattr *tb[8U];   421       struct nlattr *tb2[5U];   422       struct tcf_ife_info *ife;   423       struct tc_ife *parm;   424       unsigned short ife_type;   425       u8 *daddr;   426       u8 *saddr;   427       int ret;   428       int exists;   429       int err;   430       void *tmp___0;   431       const struct tcf_common *__mptr;   432       void *tmp___1;   433       void *tmp___2;             {    36         struct net_generic *ng;    37         void *ptr;    38         struct net_generic *________p1;    39         struct net_generic *_________p1;    40         union __anonunion___u_447 __u;    41         _Bool __warned;    42         int tmp;    43         int tmp___0;               {   853           _Bool __warned;   854           int tmp;   855           _Bool tmp___0;   856           int tmp___1;                 {                   {   309             Ignored inline assembler code   69               int pao_ID__;    69               pao_ID__ = 0;    69               switch (4UL);    70               __case__[4UL == 4UL]69 skipped uneccesary edges    71               return ;;                   }}                 {   486             lock_acquire(map, 0U, 0, 2, 0, (struct lockdep_map *)0, 0UL) { /* Function call is skipped due to function is undefined */}                 }  856           tmp = debug_lockdep_rcu_enabled() { /* Function call is skipped due to function is undefined */}               }   40         _________p1 = __u.__val;    40         ________p1 = _________p1;    40         tmp = debug_lockdep_rcu_enabled() { /* Function call is skipped due to function is undefined */}    40         ng = ________p1;    41         ptr = (ng->ptr)[id + -1];               {             }  907           _Bool __warned;   908           int tmp;   909           _Bool tmp___0;   910           int tmp___1;   907           tmp = debug_lockdep_rcu_enabled() { /* Function call is skipped due to function is undefined */}                 {               }315 Ignored inline assembler code                   {                 }   74               int pao_ID__;    74               pao_ID__ = 0;    74               switch (4UL);    75               __case__[4UL == 4UL]74 skipped uneccesary edges    76               return ;;                   }  419       tn = (struct tc_action_net *)tmp;   424       ife_type = 0U;   425       daddr = (u8 *)0U;   426       saddr = (u8 *)0U;   427       ret = 0;   427       exists = 0;   430       -nla_parse_nested((struct nlattr **)(&tb), 7, (const struct nlattr *)nla, (const struct nla_policy *)(&ife_policy))             {   747         int tmp;   748         void *tmp___0;   749         int tmp___1;   748         tmp___1 = nla_parse(tb, maxtype, (const struct nlattr *)tmp___0, tmp, policy) { /* Function call is skipped due to function is undefined */}             }  437       parm = (struct tc_ife *)tmp___0;   439       exists = tcf_hash_check(tn, parm->index, a, bind) { /* Function call is skipped due to function is undefined */}   443       int __CPAchecker_TMP_0 = (int)(parm->flags);             {   158         int tmp;   158         tmp = __tcf_hash_release(a, (int)bind, 0) { /* Function call is skipped due to function is undefined */}             }  467       const struct tcf_common *__CPAchecker_TMP_1 = (const struct tcf_common *)(a->priv);   467       __mptr = __CPAchecker_TMP_1;   467       ife = (struct tcf_ife_info *)__mptr;   468       ife->flags = parm->flags;   470       int __CPAchecker_TMP_2 = (int)(parm->flags);             {               {             }  311           _raw_spin_lock_bh(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */}               }  479       ife->common.tcfc_action = parm->action;   481       int __CPAchecker_TMP_3 = (int)(parm->flags);             {   249         __memset((void *)addr, 0, 6UL) { /* Function call is skipped due to function is undefined */}             }            {   249         __memset((void *)addr, 0, 6UL) { /* Function call is skipped due to function is undefined */}             }  492       ife->eth_type = ife_type;             {           }  313         struct tcf_meta_ops *o;   314         int rc;   315         int installed;   316         const struct list_head *__mptr;   317         const struct list_head *__mptr___0;   314         rc = 0;   315         installed = 0;   317         __mptr = (const struct list_head *)(ifeoplist.next);   317         o = ((struct tcf_meta_ops *)__mptr) + 18446744073709551592UL;   317         goto ldv_49742;   319         goto ldv_49741;   318         ldv_49741:;   318         u32 __CPAchecker_TMP_0 = (u32 )(o->metaid);               {             }  280           struct tcf_meta_info *mi;   281           struct tcf_meta_ops *ops;   282           struct tcf_meta_ops *tmp;   283           int ret;   284           void *tmp___0;   281           mi = (struct tcf_meta_info *)0;                 {   160             struct tcf_meta_ops *o;   161             const struct list_head *__mptr;   162             _Bool tmp;   163             int tmp___0;   164             const struct list_head *__mptr___0;   162             _raw_read_lock(&ife_mod_lock) { /* Function call is skipped due to function is undefined */}   163             __mptr = (const struct list_head *)(ifeoplist.next);   163             o = ((struct tcf_meta_ops *)__mptr) + 18446744073709551592UL;   163             goto ldv_49670;   165             goto ldv_49669;   164             ldv_49669:;   164             int __CPAchecker_TMP_0 = (int)(o->metaid);   165             tmp = try_module_get(o->owner) { /* Function call is skipped due to function is undefined */}   165             tmp___0 = 0;   167             _raw_read_unlock(&ife_mod_lock) { /* Function call is skipped due to function is undefined */}                 }  282           ops = tmp;   283           ret = 0;               } |              Source code         
     1 #ifndef __ASM_PREEMPT_H
    2 #define __ASM_PREEMPT_H
    3 
    4 #include <asm/rmwcc.h>
    5 #include <asm/percpu.h>
    6 #include <linux/thread_info.h>
    7 
    8 DECLARE_PER_CPU(int, __preempt_count);
    9 
   10 /*
   11  * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
   12  * that a decrement hitting 0 means we can and should reschedule.
   13  */
   14 #define PREEMPT_ENABLED	(0 + PREEMPT_NEED_RESCHED)
   15 
   16 /*
   17  * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
   18  * that think a non-zero value indicates we cannot preempt.
   19  */
   20 static __always_inline int preempt_count(void)
   21 {
   22 	return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
   23 }
   24 
   25 static __always_inline void preempt_count_set(int pc)
   26 {
   27 	raw_cpu_write_4(__preempt_count, pc);
   28 }
   29 
   30 /*
   31  * must be macros to avoid header recursion hell
   32  */
   33 #define init_task_preempt_count(p) do { } while (0)
   34 
   35 #define init_idle_preempt_count(p, cpu) do { \
   36 	per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
   37 } while (0)
   38 
   39 /*
   40  * We fold the NEED_RESCHED bit into the preempt count such that
   41  * preempt_enable() can decrement and test for needing to reschedule with a
   42  * single instruction.
   43  *
   44  * We invert the actual bit, so that when the decrement hits 0 we know we both
   45  * need to resched (the bit is cleared) and can resched (no preempt count).
   46  */
   47 
   48 static __always_inline void set_preempt_need_resched(void)
   49 {
   50 	raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
   51 }
   52 
   53 static __always_inline void clear_preempt_need_resched(void)
   54 {
   55 	raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
   56 }
   57 
   58 static __always_inline bool test_preempt_need_resched(void)
   59 {
   60 	return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
   61 }
   62 
   63 /*
   64  * The various preempt_count add/sub methods
   65  */
   66 
   67 static __always_inline void __preempt_count_add(int val)
   68 {
   69 	raw_cpu_add_4(__preempt_count, val);
   70 }
   71 
   72 static __always_inline void __preempt_count_sub(int val)
   73 {
   74 	raw_cpu_add_4(__preempt_count, -val);
   75 }
   76 
   77 /*
   78  * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
   79  * a decrement which hits zero means we have no preempt_count and should
   80  * reschedule.
   81  */
   82 static __always_inline bool __preempt_count_dec_and_test(void)
   83 {
   84 	GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
   85 }
   86 
   87 /*
   88  * Returns true when we need to resched and can (barring IRQ state).
   89  */
   90 static __always_inline bool should_resched(int preempt_offset)
   91 {
   92 	return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
   93 }
   94 
   95 #ifdef CONFIG_PREEMPT
   96   extern asmlinkage void ___preempt_schedule(void);
   97 # define __preempt_schedule()					\
   98 ({								\
   99 	register void *__sp asm(_ASM_SP);			\
  100 	asm volatile ("call ___preempt_schedule" : "+r"(__sp));	\
  101 })
  102 
  103   extern asmlinkage void preempt_schedule(void);
  104   extern asmlinkage void ___preempt_schedule_notrace(void);
  105 # define __preempt_schedule_notrace()					\
  106 ({									\
  107 	register void *__sp asm(_ASM_SP);				\
  108 	asm volatile ("call ___preempt_schedule_notrace" : "+r"(__sp));	\
  109 })
  110   extern asmlinkage void preempt_schedule_notrace(void);
  111 #endif
  112 
  113 #endif /* __ASM_PREEMPT_H */                 1 
    2 /*
    3  * net/sched/ife.c	Inter-FE action based on ForCES WG InterFE LFB
    4  *
    5  *		Refer to:
    6  *		draft-ietf-forces-interfelfb-03
    7  *		and
    8  *		netdev01 paper:
    9  *		"Distributing Linux Traffic Control Classifier-Action
   10  *		Subsystem"
   11  *		Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
   12  *
   13  *		This program is free software; you can redistribute it and/or
   14  *		modify it under the terms of the GNU General Public License
   15  *		as published by the Free Software Foundation; either version
   16  *		2 of the License, or (at your option) any later version.
   17  *
   18  * copyright Jamal Hadi Salim (2015)
   19  *
   20 */
   21 
   22 #include <linux/types.h>
   23 #include <linux/kernel.h>
   24 #include <linux/string.h>
   25 #include <linux/errno.h>
   26 #include <linux/skbuff.h>
   27 #include <linux/rtnetlink.h>
   28 #include <linux/module.h>
   29 #include <linux/init.h>
   30 #include <net/net_namespace.h>
   31 #include <net/netlink.h>
   32 #include <net/pkt_sched.h>
   33 #include <uapi/linux/tc_act/tc_ife.h>
   34 #include <net/tc_act/tc_ife.h>
   35 #include <linux/etherdevice.h>
   36 
   37 #define IFE_TAB_MASK 15
   38 
   39 static int ife_net_id;
   40 static int max_metacnt = IFE_META_MAX + 1;
   41 
   42 static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = {
   43 	[TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)},
   44 	[TCA_IFE_DMAC] = { .len = ETH_ALEN},
   45 	[TCA_IFE_SMAC] = { .len = ETH_ALEN},
   46 	[TCA_IFE_TYPE] = { .type = NLA_U16},
   47 };
   48 
   49 /* Caller takes care of presenting data in network order
   50 */
   51 int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
   52 {
   53 	u32 *tlv = (u32 *)(skbdata);
   54 	u16 totlen = nla_total_size(dlen);	/*alignment + hdr */
   55 	char *dptr = (char *)tlv + NLA_HDRLEN;
   56 	u32 htlv = attrtype << 16 | totlen;
   57 
   58 	*tlv = htonl(htlv);
   59 	memset(dptr, 0, totlen - NLA_HDRLEN);
   60 	memcpy(dptr, dval, dlen);
   61 
   62 	return totlen;
   63 }
   64 EXPORT_SYMBOL_GPL(ife_tlv_meta_encode);
   65 
   66 int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi)
   67 {
   68 	if (mi->metaval)
   69 		return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval);
   70 	else
   71 		return nla_put(skb, mi->metaid, 0, NULL);
   72 }
   73 EXPORT_SYMBOL_GPL(ife_get_meta_u32);
   74 
   75 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi)
   76 {
   77 	if (metaval || mi->metaval)
   78 		return 8; /* T+L+V == 2+2+4 */
   79 
   80 	return 0;
   81 }
   82 EXPORT_SYMBOL_GPL(ife_check_meta_u32);
   83 
   84 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi)
   85 {
   86 	u32 edata = metaval;
   87 
   88 	if (mi->metaval)
   89 		edata = *(u32 *)mi->metaval;
   90 	else if (metaval)
   91 		edata = metaval;
   92 
   93 	if (!edata) /* will not encode */
   94 		return 0;
   95 
   96 	edata = htonl(edata);
   97 	return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata);
   98 }
   99 EXPORT_SYMBOL_GPL(ife_encode_meta_u32);
  100 
  101 int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
  102 {
  103 	if (mi->metaval)
  104 		return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval);
  105 	else
  106 		return nla_put(skb, mi->metaid, 0, NULL);
  107 }
  108 EXPORT_SYMBOL_GPL(ife_get_meta_u16);
  109 
  110 int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval)
  111 {
  112 	mi->metaval = kmemdup(metaval, sizeof(u32), GFP_KERNEL);
  113 	if (!mi->metaval)
  114 		return -ENOMEM;
  115 
  116 	return 0;
  117 }
  118 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
  119 
  120 int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval)
  121 {
  122 	mi->metaval = kmemdup(metaval, sizeof(u16), GFP_KERNEL);
  123 	if (!mi->metaval)
  124 		return -ENOMEM;
  125 
  126 	return 0;
  127 }
  128 EXPORT_SYMBOL_GPL(ife_alloc_meta_u16);
  129 
  130 void ife_release_meta_gen(struct tcf_meta_info *mi)
  131 {
  132 	kfree(mi->metaval);
  133 }
  134 EXPORT_SYMBOL_GPL(ife_release_meta_gen);
  135 
  136 int ife_validate_meta_u32(void *val, int len)
  137 {
  138 	if (len == 4)
  139 		return 0;
  140 
  141 	return -EINVAL;
  142 }
  143 EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
  144 
  145 int ife_validate_meta_u16(void *val, int len)
  146 {
  147 	/* length will include padding */
  148 	if (len == NLA_ALIGN(2))
  149 		return 0;
  150 
  151 	return -EINVAL;
  152 }
  153 EXPORT_SYMBOL_GPL(ife_validate_meta_u16);
  154 
  155 static LIST_HEAD(ifeoplist);
  156 static DEFINE_RWLOCK(ife_mod_lock);
  157 
  158 static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
  159 {
  160 	struct tcf_meta_ops *o;
  161 
  162 	read_lock(&ife_mod_lock);
  163 	list_for_each_entry(o, &ifeoplist, list) {
  164 		if (o->metaid == metaid) {
  165 			if (!try_module_get(o->owner))
  166 				o = NULL;
  167 			read_unlock(&ife_mod_lock);
  168 			return o;
  169 		}
  170 	}
  171 	read_unlock(&ife_mod_lock);
  172 
  173 	return NULL;
  174 }
  175 
  176 int register_ife_op(struct tcf_meta_ops *mops)
  177 {
  178 	struct tcf_meta_ops *m;
  179 
  180 	if (!mops->metaid || !mops->metatype || !mops->name ||
  181 	    !mops->check_presence || !mops->encode || !mops->decode ||
  182 	    !mops->get || !mops->alloc)
  183 		return -EINVAL;
  184 
  185 	write_lock(&ife_mod_lock);
  186 
  187 	list_for_each_entry(m, &ifeoplist, list) {
  188 		if (m->metaid == mops->metaid ||
  189 		    (strcmp(mops->name, m->name) == 0)) {
  190 			write_unlock(&ife_mod_lock);
  191 			return -EEXIST;
  192 		}
  193 	}
  194 
  195 	if (!mops->release)
  196 		mops->release = ife_release_meta_gen;
  197 
  198 	list_add_tail(&mops->list, &ifeoplist);
  199 	write_unlock(&ife_mod_lock);
  200 	return 0;
  201 }
  202 EXPORT_SYMBOL_GPL(unregister_ife_op);
  203 
  204 int unregister_ife_op(struct tcf_meta_ops *mops)
  205 {
  206 	struct tcf_meta_ops *m;
  207 	int err = -ENOENT;
  208 
  209 	write_lock(&ife_mod_lock);
  210 	list_for_each_entry(m, &ifeoplist, list) {
  211 		if (m->metaid == mops->metaid) {
  212 			list_del(&mops->list);
  213 			err = 0;
  214 			break;
  215 		}
  216 	}
  217 	write_unlock(&ife_mod_lock);
  218 
  219 	return err;
  220 }
  221 EXPORT_SYMBOL_GPL(register_ife_op);
  222 
  223 static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
  224 {
  225 	int ret = 0;
  226 	/* XXX: unfortunately cant use nla_policy at this point
  227 	* because a length of 0 is valid in the case of
  228 	* "allow". "use" semantics do enforce for proper
  229 	* length and i couldve use nla_policy but it makes it hard
  230 	* to use it just for that..
  231 	*/
  232 	if (ops->validate)
  233 		return ops->validate(val, len);
  234 
  235 	if (ops->metatype == NLA_U32)
  236 		ret = ife_validate_meta_u32(val, len);
  237 	else if (ops->metatype == NLA_U16)
  238 		ret = ife_validate_meta_u16(val, len);
  239 
  240 	return ret;
  241 }
  242 
  243 /* called when adding new meta information
  244  * under ife->tcf_lock
  245 */
  246 static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
  247 				void *val, int len)
  248 {
  249 	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
  250 	int ret = 0;
  251 
  252 	if (!ops) {
  253 		ret = -ENOENT;
  254 #ifdef CONFIG_MODULES
  255 		spin_unlock_bh(&ife->tcf_lock);
  256 		rtnl_unlock();
  257 		request_module("ifemeta%u", metaid);
  258 		rtnl_lock();
  259 		spin_lock_bh(&ife->tcf_lock);
  260 		ops = find_ife_oplist(metaid);
  261 #endif
  262 	}
  263 
  264 	if (ops) {
  265 		ret = 0;
  266 		if (len)
  267 			ret = ife_validate_metatype(ops, val, len);
  268 
  269 		module_put(ops->owner);
  270 	}
  271 
  272 	return ret;
  273 }
  274 
  275 /* called when adding new meta information
  276  * under ife->tcf_lock
  277 */
  278 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
  279 			int len)
  280 {
  281 	struct tcf_meta_info *mi = NULL;
  282 	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
  283 	int ret = 0;
  284 
  285 	if (!ops)
  286 		return -ENOENT;
  287 
  288 	mi = kzalloc(sizeof(*mi), GFP_KERNEL);
  289 	if (!mi) {
  290 		/*put back what find_ife_oplist took */
  291 		module_put(ops->owner);
  292 		return -ENOMEM;
  293 	}
  294 
  295 	mi->metaid = metaid;
  296 	mi->ops = ops;
  297 	if (len > 0) {
  298 		ret = ops->alloc(mi, metaval);
  299 		if (ret != 0) {
  300 			kfree(mi);
  301 			module_put(ops->owner);
  302 			return ret;
  303 		}
  304 	}
  305 
  306 	list_add_tail(&mi->metalist, &ife->metalist);
  307 
  308 	return ret;
  309 }
  310 
  311 static int use_all_metadata(struct tcf_ife_info *ife)
  312 {
  313 	struct tcf_meta_ops *o;
  314 	int rc = 0;
  315 	int installed = 0;
  316 
  317 	list_for_each_entry(o, &ifeoplist, list) {
  318 		rc = add_metainfo(ife, o->metaid, NULL, 0);
  319 		if (rc == 0)
  320 			installed += 1;
  321 	}
  322 
  323 	if (installed)
  324 		return 0;
  325 	else
  326 		return -EINVAL;
  327 }
  328 
  329 static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
  330 {
  331 	struct tcf_meta_info *e;
  332 	struct nlattr *nest;
  333 	unsigned char *b = skb_tail_pointer(skb);
  334 	int total_encoded = 0;
  335 
  336 	/*can only happen on decode */
  337 	if (list_empty(&ife->metalist))
  338 		return 0;
  339 
  340 	nest = nla_nest_start(skb, TCA_IFE_METALST);
  341 	if (!nest)
  342 		goto out_nlmsg_trim;
  343 
  344 	list_for_each_entry(e, &ife->metalist, metalist) {
  345 		if (!e->ops->get(skb, e))
  346 			total_encoded += 1;
  347 	}
  348 
  349 	if (!total_encoded)
  350 		goto out_nlmsg_trim;
  351 
  352 	nla_nest_end(skb, nest);
  353 
  354 	return 0;
  355 
  356 out_nlmsg_trim:
  357 	nlmsg_trim(skb, b);
  358 	return -1;
  359 }
  360 
  361 /* under ife->tcf_lock */
  362 static void _tcf_ife_cleanup(struct tc_action *a, int bind)
  363 {
  364 	struct tcf_ife_info *ife = a->priv;
  365 	struct tcf_meta_info *e, *n;
  366 
  367 	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
  368 		module_put(e->ops->owner);
  369 		list_del(&e->metalist);
  370 		if (e->metaval) {
  371 			if (e->ops->release)
  372 				e->ops->release(e);
  373 			else
  374 				kfree(e->metaval);
  375 		}
  376 		kfree(e);
  377 	}
  378 }
  379 
  380 static void tcf_ife_cleanup(struct tc_action *a, int bind)
  381 {
  382 	struct tcf_ife_info *ife = a->priv;
  383 
  384 	spin_lock_bh(&ife->tcf_lock);
  385 	_tcf_ife_cleanup(a, bind);
  386 	spin_unlock_bh(&ife->tcf_lock);
  387 }
  388 
  389 /* under ife->tcf_lock */
  390 static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb)
  391 {
  392 	int len = 0;
  393 	int rc = 0;
  394 	int i = 0;
  395 	void *val;
  396 
  397 	for (i = 1; i < max_metacnt; i++) {
  398 		if (tb[i]) {
  399 			val = nla_data(tb[i]);
  400 			len = nla_len(tb[i]);
  401 
  402 			rc = load_metaops_and_vet(ife, i, val, len);
  403 			if (rc != 0)
  404 				return rc;
  405 
  406 			rc = add_metainfo(ife, i, val, len);
  407 			if (rc)
  408 				return rc;
  409 		}
  410 	}
  411 
  412 	return rc;
  413 }
  414 
  415 static int tcf_ife_init(struct net *net, struct nlattr *nla,
  416 			struct nlattr *est, struct tc_action *a,
  417 			int ovr, int bind)
  418 {
  419 	struct tc_action_net *tn = net_generic(net, ife_net_id);
  420 	struct nlattr *tb[TCA_IFE_MAX + 1];
  421 	struct nlattr *tb2[IFE_META_MAX + 1];
  422 	struct tcf_ife_info *ife;
  423 	struct tc_ife *parm;
  424 	u16 ife_type = 0;
  425 	u8 *daddr = NULL;
  426 	u8 *saddr = NULL;
  427 	int ret = 0, exists = 0;
  428 	int err;
  429 
  430 	err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy);
  431 	if (err < 0)
  432 		return err;
  433 
  434 	if (!tb[TCA_IFE_PARMS])
  435 		return -EINVAL;
  436 
  437 	parm = nla_data(tb[TCA_IFE_PARMS]);
  438 
  439 	exists = tcf_hash_check(tn, parm->index, a, bind);
  440 	if (exists && bind)
  441 		return 0;
  442 
  443 	if (parm->flags & IFE_ENCODE) {
  444 		/* Until we get issued the ethertype, we cant have
  445 		 * a default..
  446 		**/
  447 		if (!tb[TCA_IFE_TYPE]) {
  448 			if (exists)
  449 				tcf_hash_release(a, bind);
  450 			pr_info("You MUST pass etherype for encoding\n");
  451 			return -EINVAL;
  452 		}
  453 	}
  454 
  455 	if (!exists) {
  456 		ret = tcf_hash_create(tn, parm->index, est, a, sizeof(*ife),
  457 				      bind, false);
  458 		if (ret)
  459 			return ret;
  460 		ret = ACT_P_CREATED;
  461 	} else {
  462 		tcf_hash_release(a, bind);
  463 		if (!ovr)
  464 			return -EEXIST;
  465 	}
  466 
  467 	ife = to_ife(a);
  468 	ife->flags = parm->flags;
  469 
  470 	if (parm->flags & IFE_ENCODE) {
  471 		ife_type = nla_get_u16(tb[TCA_IFE_TYPE]);
  472 		if (tb[TCA_IFE_DMAC])
  473 			daddr = nla_data(tb[TCA_IFE_DMAC]);
  474 		if (tb[TCA_IFE_SMAC])
  475 			saddr = nla_data(tb[TCA_IFE_SMAC]);
  476 	}
  477 
  478 	spin_lock_bh(&ife->tcf_lock);
  479 	ife->tcf_action = parm->action;
  480 
  481 	if (parm->flags & IFE_ENCODE) {
  482 		if (daddr)
  483 			ether_addr_copy(ife->eth_dst, daddr);
  484 		else
  485 			eth_zero_addr(ife->eth_dst);
  486 
  487 		if (saddr)
  488 			ether_addr_copy(ife->eth_src, saddr);
  489 		else
  490 			eth_zero_addr(ife->eth_src);
  491 
  492 		ife->eth_type = ife_type;
  493 	}
  494 
  495 	if (ret == ACT_P_CREATED)
  496 		INIT_LIST_HEAD(&ife->metalist);
  497 
  498 	if (tb[TCA_IFE_METALST]) {
  499 		err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST],
  500 				       NULL);
  501 		if (err) {
  502 metadata_parse_err:
  503 			if (exists)
  504 				tcf_hash_release(a, bind);
  505 			if (ret == ACT_P_CREATED)
  506 				_tcf_ife_cleanup(a, bind);
  507 
  508 			spin_unlock_bh(&ife->tcf_lock);
  509 			return err;
  510 		}
  511 
  512 		err = populate_metalist(ife, tb2);
  513 		if (err)
  514 			goto metadata_parse_err;
  515 
  516 	} else {
  517 		/* if no passed metadata allow list or passed allow-all
  518 		 * then here we process by adding as many supported metadatum
  519 		 * as we can. You better have at least one else we are
  520 		 * going to bail out
  521 		 */
  522 		err = use_all_metadata(ife);
  523 		if (err) {
  524 			if (ret == ACT_P_CREATED)
  525 				_tcf_ife_cleanup(a, bind);
  526 
  527 			spin_unlock_bh(&ife->tcf_lock);
  528 			return err;
  529 		}
  530 	}
  531 
  532 	spin_unlock_bh(&ife->tcf_lock);
  533 
  534 	if (ret == ACT_P_CREATED)
  535 		tcf_hash_insert(tn, a);
  536 
  537 	return ret;
  538 }
  539 
  540 static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
  541 			int ref)
  542 {
  543 	unsigned char *b = skb_tail_pointer(skb);
  544 	struct tcf_ife_info *ife = a->priv;
  545 	struct tc_ife opt = {
  546 		.index = ife->tcf_index,
  547 		.refcnt = ife->tcf_refcnt - ref,
  548 		.bindcnt = ife->tcf_bindcnt - bind,
  549 		.action = ife->tcf_action,
  550 		.flags = ife->flags,
  551 	};
  552 	struct tcf_t t;
  553 
  554 	if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt))
  555 		goto nla_put_failure;
  556 
  557 	t.install = jiffies_to_clock_t(jiffies - ife->tcf_tm.install);
  558 	t.lastuse = jiffies_to_clock_t(jiffies - ife->tcf_tm.lastuse);
  559 	t.expires = jiffies_to_clock_t(ife->tcf_tm.expires);
  560 	if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
  561 		goto nla_put_failure;
  562 
  563 	if (!is_zero_ether_addr(ife->eth_dst)) {
  564 		if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, ife->eth_dst))
  565 			goto nla_put_failure;
  566 	}
  567 
  568 	if (!is_zero_ether_addr(ife->eth_src)) {
  569 		if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, ife->eth_src))
  570 			goto nla_put_failure;
  571 	}
  572 
  573 	if (nla_put(skb, TCA_IFE_TYPE, 2, &ife->eth_type))
  574 		goto nla_put_failure;
  575 
  576 	if (dump_metalist(skb, ife)) {
  577 		/*ignore failure to dump metalist */
  578 		pr_info("Failed to dump metalist\n");
  579 	}
  580 
  581 	return skb->len;
  582 
  583 nla_put_failure:
  584 	nlmsg_trim(skb, b);
  585 	return -1;
  586 }
  587 
  588 int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
  589 		       u16 metaid, u16 mlen, void *mdata)
  590 {
  591 	struct tcf_meta_info *e;
  592 
  593 	/* XXX: use hash to speed up */
  594 	list_for_each_entry(e, &ife->metalist, metalist) {
  595 		if (metaid == e->metaid) {
  596 			if (e->ops) {
  597 				/* We check for decode presence already */
  598 				return e->ops->decode(skb, mdata, mlen);
  599 			}
  600 		}
  601 	}
  602 
  603 	return 0;
  604 }
  605 
  606 struct ifeheadr {
  607 	__be16 metalen;
  608 	u8 tlv_data[];
  609 };
  610 
  611 struct meta_tlvhdr {
  612 	__be16 type;
  613 	__be16 len;
  614 };
  615 
  616 static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
  617 			  struct tcf_result *res)
  618 {
  619 	struct tcf_ife_info *ife = a->priv;
  620 	int action = ife->tcf_action;
  621 	struct ifeheadr *ifehdr = (struct ifeheadr *)skb->data;
  622 	u16 ifehdrln = ifehdr->metalen;
  623 	struct meta_tlvhdr *tlv = (struct meta_tlvhdr *)(ifehdr->tlv_data);
  624 
  625 	spin_lock(&ife->tcf_lock);
  626 	bstats_update(&ife->tcf_bstats, skb);
  627 	ife->tcf_tm.lastuse = jiffies;
  628 	spin_unlock(&ife->tcf_lock);
  629 
  630 	ifehdrln = ntohs(ifehdrln);
  631 	if (unlikely(!pskb_may_pull(skb, ifehdrln))) {
  632 		spin_lock(&ife->tcf_lock);
  633 		ife->tcf_qstats.drops++;
  634 		spin_unlock(&ife->tcf_lock);
  635 		return TC_ACT_SHOT;
  636 	}
  637 
  638 	skb_set_mac_header(skb, ifehdrln);
  639 	__skb_pull(skb, ifehdrln);
  640 	skb->protocol = eth_type_trans(skb, skb->dev);
  641 	ifehdrln -= IFE_METAHDRLEN;
  642 
  643 	while (ifehdrln > 0) {
  644 		u8 *tlvdata = (u8 *)tlv;
  645 		u16 mtype = tlv->type;
  646 		u16 mlen = tlv->len;
  647 
  648 		mtype = ntohs(mtype);
  649 		mlen = ntohs(mlen);
  650 
  651 		if (find_decode_metaid(skb, ife, mtype, (mlen - 4),
  652 				       (void *)(tlvdata + 4))) {
  653 			/* abuse overlimits to count when we receive metadata
  654 			 * but dont have an ops for it
  655 			 */
  656 			pr_info_ratelimited("Unknown metaid %d alnlen %d\n",
  657 					    mtype, mlen);
  658 			ife->tcf_qstats.overlimits++;
  659 		}
  660 
  661 		tlvdata += mlen;
  662 		ifehdrln -= mlen;
  663 		tlv = (struct meta_tlvhdr *)tlvdata;
  664 	}
  665 
  666 	skb_reset_network_header(skb);
  667 	return action;
  668 }
  669 
  670 /*XXX: check if we can do this at install time instead of current
  671  * send data path
  672 **/
  673 static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
  674 {
  675 	struct tcf_meta_info *e, *n;
  676 	int tot_run_sz = 0, run_sz = 0;
  677 
  678 	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
  679 		if (e->ops->check_presence) {
  680 			run_sz = e->ops->check_presence(skb, e);
  681 			tot_run_sz += run_sz;
  682 		}
  683 	}
  684 
  685 	return tot_run_sz;
  686 }
  687 
  688 static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
  689 			  struct tcf_result *res)
  690 {
  691 	struct tcf_ife_info *ife = a->priv;
  692 	int action = ife->tcf_action;
  693 	struct ethhdr *oethh;	/* outer ether header */
  694 	struct ethhdr *iethh;	/* inner eth header */
  695 	struct tcf_meta_info *e;
  696 	/*
  697 	   OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
  698 	   where ORIGDATA = original ethernet header ...
  699 	 */
  700 	u16 metalen = ife_get_sz(skb, ife);
  701 	int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
  702 	unsigned int skboff = skb->dev->hard_header_len;
  703 	u32 at = G_TC_AT(skb->tc_verd);
  704 	int new_len = skb->len + hdrm;
  705 	bool exceed_mtu = false;
  706 	int err;
  707 
  708 	if (at & AT_EGRESS) {
  709 		if (new_len > skb->dev->mtu)
  710 			exceed_mtu = true;
  711 	}
  712 
  713 	spin_lock(&ife->tcf_lock);
  714 	bstats_update(&ife->tcf_bstats, skb);
  715 	ife->tcf_tm.lastuse = jiffies;
  716 
  717 	if (!metalen) {		/* no metadata to send */
  718 		/* abuse overlimits to count when we allow packet
  719 		 * with no metadata
  720 		 */
  721 		ife->tcf_qstats.overlimits++;
  722 		spin_unlock(&ife->tcf_lock);
  723 		return action;
  724 	}
  725 	/* could be stupid policy setup or mtu config
  726 	 * so lets be conservative.. */
  727 	if ((action == TC_ACT_SHOT) || exceed_mtu) {
  728 		ife->tcf_qstats.drops++;
  729 		spin_unlock(&ife->tcf_lock);
  730 		return TC_ACT_SHOT;
  731 	}
  732 
  733 	iethh = eth_hdr(skb);
  734 
  735 	err = skb_cow_head(skb, hdrm);
  736 	if (unlikely(err)) {
  737 		ife->tcf_qstats.drops++;
  738 		spin_unlock(&ife->tcf_lock);
  739 		return TC_ACT_SHOT;
  740 	}
  741 
  742 	if (!(at & AT_EGRESS))
  743 		skb_push(skb, skb->dev->hard_header_len);
  744 
  745 	__skb_push(skb, hdrm);
  746 	memcpy(skb->data, iethh, skb->mac_len);
  747 	skb_reset_mac_header(skb);
  748 	oethh = eth_hdr(skb);
  749 
  750 	/*total metadata length */
  751 	metalen += IFE_METAHDRLEN;
  752 	metalen = htons(metalen);
  753 	memcpy((skb->data + skboff), &metalen, IFE_METAHDRLEN);
  754 	skboff += IFE_METAHDRLEN;
  755 
  756 	/* XXX: we dont have a clever way of telling encode to
  757 	 * not repeat some of the computations that are done by
  758 	 * ops->presence_check...
  759 	 */
  760 	list_for_each_entry(e, &ife->metalist, metalist) {
  761 		if (e->ops->encode) {
  762 			err = e->ops->encode(skb, (void *)(skb->data + skboff),
  763 					     e);
  764 		}
  765 		if (err < 0) {
  766 			/* too corrupt to keep around if overwritten */
  767 			ife->tcf_qstats.drops++;
  768 			spin_unlock(&ife->tcf_lock);
  769 			return TC_ACT_SHOT;
  770 		}
  771 		skboff += err;
  772 	}
  773 
  774 	if (!is_zero_ether_addr(ife->eth_src))
  775 		ether_addr_copy(oethh->h_source, ife->eth_src);
  776 	else
  777 		ether_addr_copy(oethh->h_source, iethh->h_source);
  778 	if (!is_zero_ether_addr(ife->eth_dst))
  779 		ether_addr_copy(oethh->h_dest, ife->eth_dst);
  780 	else
  781 		ether_addr_copy(oethh->h_dest, iethh->h_dest);
  782 	oethh->h_proto = htons(ife->eth_type);
  783 
  784 	if (!(at & AT_EGRESS))
  785 		skb_pull(skb, skb->dev->hard_header_len);
  786 
  787 	spin_unlock(&ife->tcf_lock);
  788 
  789 	return action;
  790 }
  791 
  792 static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
  793 		       struct tcf_result *res)
  794 {
  795 	struct tcf_ife_info *ife = a->priv;
  796 
  797 	if (ife->flags & IFE_ENCODE)
  798 		return tcf_ife_encode(skb, a, res);
  799 
  800 	if (!(ife->flags & IFE_ENCODE))
  801 		return tcf_ife_decode(skb, a, res);
  802 
  803 	pr_info_ratelimited("unknown failure(policy neither de/encode\n");
  804 	spin_lock(&ife->tcf_lock);
  805 	bstats_update(&ife->tcf_bstats, skb);
  806 	ife->tcf_tm.lastuse = jiffies;
  807 	ife->tcf_qstats.drops++;
  808 	spin_unlock(&ife->tcf_lock);
  809 
  810 	return TC_ACT_SHOT;
  811 }
  812 
  813 static int tcf_ife_walker(struct net *net, struct sk_buff *skb,
  814 			  struct netlink_callback *cb, int type,
  815 			  struct tc_action *a)
  816 {
  817 	struct tc_action_net *tn = net_generic(net, ife_net_id);
  818 
  819 	return tcf_generic_walker(tn, skb, cb, type, a);
  820 }
  821 
  822 static int tcf_ife_search(struct net *net, struct tc_action *a, u32 index)
  823 {
  824 	struct tc_action_net *tn = net_generic(net, ife_net_id);
  825 
  826 	return tcf_hash_search(tn, a, index);
  827 }
  828 
  829 static struct tc_action_ops act_ife_ops = {
  830 	.kind = "ife",
  831 	.type = TCA_ACT_IFE,
  832 	.owner = THIS_MODULE,
  833 	.act = tcf_ife_act,
  834 	.dump = tcf_ife_dump,
  835 	.cleanup = tcf_ife_cleanup,
  836 	.init = tcf_ife_init,
  837 	.walk = tcf_ife_walker,
  838 	.lookup = tcf_ife_search,
  839 };
  840 
  841 static __net_init int ife_init_net(struct net *net)
  842 {
  843 	struct tc_action_net *tn = net_generic(net, ife_net_id);
  844 
  845 	return tc_action_net_init(tn, &act_ife_ops, IFE_TAB_MASK);
  846 }
  847 
  848 static void __net_exit ife_exit_net(struct net *net)
  849 {
  850 	struct tc_action_net *tn = net_generic(net, ife_net_id);
  851 
  852 	tc_action_net_exit(tn);
  853 }
  854 
  855 static struct pernet_operations ife_net_ops = {
  856 	.init = ife_init_net,
  857 	.exit = ife_exit_net,
  858 	.id   = &ife_net_id,
  859 	.size = sizeof(struct tc_action_net),
  860 };
  861 
  862 static int __init ife_init_module(void)
  863 {
  864 	return tcf_register_action(&act_ife_ops, &ife_net_ops);
  865 }
  866 
  867 static void __exit ife_cleanup_module(void)
  868 {
  869 	tcf_unregister_action(&act_ife_ops, &ife_net_ops);
  870 }
  871 
  872 module_init(ife_init_module);
  873 module_exit(ife_cleanup_module);
  874 
  875 MODULE_AUTHOR("Jamal Hadi Salim(2015)");
  876 MODULE_DESCRIPTION("Inter-FE LFB action");
  877 MODULE_LICENSE("GPL");
  878 
  879 
  880 
  881 
  882 
  883 /* LDV_COMMENT_BEGIN_MAIN */
  884 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
  885 
  886 /*###########################################################################*/
  887 
  888 /*############## Driver Environment Generator 0.2 output ####################*/
  889 
  890 /*###########################################################################*/
  891 
  892 
  893 
  894 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
  895 void ldv_check_final_state(void);
  896 
  897 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
  898 void ldv_check_return_value(int res);
  899 
  900 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
  901 void ldv_check_return_value_probe(int res);
  902 
  903 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
  904 void ldv_initialize(void);
  905 
  906 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
  907 void ldv_handler_precall(void);
  908 
  909 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
  910 int nondet_int(void);
  911 
  912 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
  913 int LDV_IN_INTERRUPT;
  914 
  915 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
  916 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
  917 
  918 
  919 
  920 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
  921 	/*============================= VARIABLE DECLARATION PART   =============================*/
  922 	/** STRUCT: struct type: tc_action_ops, struct name: act_ife_ops **/
  923 	/* content: static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res)*/
  924 	/* LDV_COMMENT_BEGIN_PREP */
  925 	#define IFE_TAB_MASK 15
  926 	#ifdef CONFIG_MODULES
  927 	#endif
  928 	/* LDV_COMMENT_END_PREP */
  929 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_act" */
  930 	struct sk_buff * var_group1;
  931 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_act" */
  932 	const struct tc_action * var_tcf_ife_act_27_p1;
  933 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_act" */
  934 	struct tcf_result * var_tcf_ife_act_27_p2;
  935 	/* content: static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)*/
  936 	/* LDV_COMMENT_BEGIN_PREP */
  937 	#define IFE_TAB_MASK 15
  938 	#ifdef CONFIG_MODULES
  939 	#endif
  940 	/* LDV_COMMENT_END_PREP */
  941 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_dump" */
  942 	struct tc_action * var_group2;
  943 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_dump" */
  944 	int  var_tcf_ife_dump_22_p2;
  945 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_dump" */
  946 	int  var_tcf_ife_dump_22_p3;
  947 	/* content: static void tcf_ife_cleanup(struct tc_action *a, int bind)*/
  948 	/* LDV_COMMENT_BEGIN_PREP */
  949 	#define IFE_TAB_MASK 15
  950 	#ifdef CONFIG_MODULES
  951 	#endif
  952 	/* LDV_COMMENT_END_PREP */
  953 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_cleanup" */
  954 	int  var_tcf_ife_cleanup_19_p1;
  955 	/* content: static int tcf_ife_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind)*/
  956 	/* LDV_COMMENT_BEGIN_PREP */
  957 	#define IFE_TAB_MASK 15
  958 	#ifdef CONFIG_MODULES
  959 	#endif
  960 	/* LDV_COMMENT_END_PREP */
  961 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_init" */
  962 	struct net * var_group3;
  963 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_init" */
  964 	struct nlattr * var_group4;
  965 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_init" */
  966 	struct nlattr * var_tcf_ife_init_21_p2;
  967 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_init" */
  968 	struct tc_action * var_tcf_ife_init_21_p3;
  969 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_init" */
  970 	int  var_tcf_ife_init_21_p4;
  971 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_init" */
  972 	int  var_tcf_ife_init_21_p5;
  973 	/* content: static int tcf_ife_walker(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, int type, struct tc_action *a)*/
  974 	/* LDV_COMMENT_BEGIN_PREP */
  975 	#define IFE_TAB_MASK 15
  976 	#ifdef CONFIG_MODULES
  977 	#endif
  978 	/* LDV_COMMENT_END_PREP */
  979 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_walker" */
  980 	struct netlink_callback * var_tcf_ife_walker_28_p2;
  981 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_walker" */
  982 	int  var_tcf_ife_walker_28_p3;
  983 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_walker" */
  984 	struct tc_action * var_tcf_ife_walker_28_p4;
  985 	/* content: static int tcf_ife_search(struct net *net, struct tc_action *a, u32 index)*/
  986 	/* LDV_COMMENT_BEGIN_PREP */
  987 	#define IFE_TAB_MASK 15
  988 	#ifdef CONFIG_MODULES
  989 	#endif
  990 	/* LDV_COMMENT_END_PREP */
  991 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_search" */
  992 	u32  var_tcf_ife_search_29_p2;
  993 
  994 	/** STRUCT: struct type: pernet_operations, struct name: ife_net_ops **/
  995 	/* content: static __net_init int ife_init_net(struct net *net)*/
  996 	/* LDV_COMMENT_BEGIN_PREP */
  997 	#define IFE_TAB_MASK 15
  998 	#ifdef CONFIG_MODULES
  999 	#endif
 1000 	/* LDV_COMMENT_END_PREP */
 1001 	/* content: static void __net_exit ife_exit_net(struct net *net)*/
 1002 	/* LDV_COMMENT_BEGIN_PREP */
 1003 	#define IFE_TAB_MASK 15
 1004 	#ifdef CONFIG_MODULES
 1005 	#endif
 1006 	/* LDV_COMMENT_END_PREP */
 1007 
 1008 
 1009 
 1010 
 1011 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 1012 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 1013 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 1014 	LDV_IN_INTERRUPT=1;
 1015 
 1016 
 1017 
 1018 
 1019 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 1020 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 1021 	/*============================= FUNCTION CALL SECTION       =============================*/
 1022 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 1023 	ldv_initialize();
 1024 
 1025 	/** INIT: init_type: ST_MODULE_INIT **/
 1026 	/* content: static int __init ife_init_module(void)*/
 1027 	/* LDV_COMMENT_BEGIN_PREP */
 1028 	#define IFE_TAB_MASK 15
 1029 	#ifdef CONFIG_MODULES
 1030 	#endif
 1031 	/* LDV_COMMENT_END_PREP */
 1032 	/* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
 1033 	ldv_handler_precall();
 1034 	 if(ife_init_module()) 
 1035 		goto ldv_final;
 1036 	
 1037 
 1038 	
 1039 
 1040 
 1041 	while(  nondet_int()
 1042 	) {
 1043 
 1044 		switch(nondet_int()) {
 1045 
 1046 			case 0: {
 1047 
 1048 				/** STRUCT: struct type: tc_action_ops, struct name: act_ife_ops **/
 1049 				
 1050 
 1051 				/* content: static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res)*/
 1052 				/* LDV_COMMENT_BEGIN_PREP */
 1053 				#define IFE_TAB_MASK 15
 1054 				#ifdef CONFIG_MODULES
 1055 				#endif
 1056 				/* LDV_COMMENT_END_PREP */
 1057 				/* LDV_COMMENT_FUNCTION_CALL Function from field "act" from driver structure with callbacks "act_ife_ops" */
 1058 				ldv_handler_precall();
 1059 				tcf_ife_act( var_group1, var_tcf_ife_act_27_p1, var_tcf_ife_act_27_p2);
 1060 				
 1061 
 1062 				
 1063 
 1064 			}
 1065 
 1066 			break;
 1067 			case 1: {
 1068 
 1069 				/** STRUCT: struct type: tc_action_ops, struct name: act_ife_ops **/
 1070 				
 1071 
 1072 				/* content: static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)*/
 1073 				/* LDV_COMMENT_BEGIN_PREP */
 1074 				#define IFE_TAB_MASK 15
 1075 				#ifdef CONFIG_MODULES
 1076 				#endif
 1077 				/* LDV_COMMENT_END_PREP */
 1078 				/* LDV_COMMENT_FUNCTION_CALL Function from field "dump" from driver structure with callbacks "act_ife_ops" */
 1079 				ldv_handler_precall();
 1080 				tcf_ife_dump( var_group1, var_group2, var_tcf_ife_dump_22_p2, var_tcf_ife_dump_22_p3);
 1081 				
 1082 
 1083 				
 1084 
 1085 			}
 1086 
 1087 			break;
 1088 			case 2: {
 1089 
 1090 				/** STRUCT: struct type: tc_action_ops, struct name: act_ife_ops **/
 1091 				
 1092 
 1093 				/* content: static void tcf_ife_cleanup(struct tc_action *a, int bind)*/
 1094 				/* LDV_COMMENT_BEGIN_PREP */
 1095 				#define IFE_TAB_MASK 15
 1096 				#ifdef CONFIG_MODULES
 1097 				#endif
 1098 				/* LDV_COMMENT_END_PREP */
 1099 				/* LDV_COMMENT_FUNCTION_CALL Function from field "cleanup" from driver structure with callbacks "act_ife_ops" */
 1100 				ldv_handler_precall();
 1101 				tcf_ife_cleanup( var_group2, var_tcf_ife_cleanup_19_p1);
 1102 				
 1103 
 1104 				
 1105 
 1106 			}
 1107 
 1108 			break;
 1109 			case 3: {
 1110 
 1111 				/** STRUCT: struct type: tc_action_ops, struct name: act_ife_ops **/
 1112 				
 1113 
 1114 				/* content: static int tcf_ife_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind)*/
 1115 				/* LDV_COMMENT_BEGIN_PREP */
 1116 				#define IFE_TAB_MASK 15
 1117 				#ifdef CONFIG_MODULES
 1118 				#endif
 1119 				/* LDV_COMMENT_END_PREP */
 1120 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "act_ife_ops" */
 1121 				ldv_handler_precall();
 1122 				tcf_ife_init( var_group3, var_group4, var_tcf_ife_init_21_p2, var_tcf_ife_init_21_p3, var_tcf_ife_init_21_p4, var_tcf_ife_init_21_p5);
 1123 				
 1124 
 1125 				
 1126 
 1127 			}
 1128 
 1129 			break;
 1130 			case 4: {
 1131 
 1132 				/** STRUCT: struct type: tc_action_ops, struct name: act_ife_ops **/
 1133 				
 1134 
 1135 				/* content: static int tcf_ife_walker(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, int type, struct tc_action *a)*/
 1136 				/* LDV_COMMENT_BEGIN_PREP */
 1137 				#define IFE_TAB_MASK 15
 1138 				#ifdef CONFIG_MODULES
 1139 				#endif
 1140 				/* LDV_COMMENT_END_PREP */
 1141 				/* LDV_COMMENT_FUNCTION_CALL Function from field "walk" from driver structure with callbacks "act_ife_ops" */
 1142 				ldv_handler_precall();
 1143 				tcf_ife_walker( var_group3, var_group1, var_tcf_ife_walker_28_p2, var_tcf_ife_walker_28_p3, var_tcf_ife_walker_28_p4);
 1144 				
 1145 
 1146 				
 1147 
 1148 			}
 1149 
 1150 			break;
 1151 			case 5: {
 1152 
 1153 				/** STRUCT: struct type: tc_action_ops, struct name: act_ife_ops **/
 1154 				
 1155 
 1156 				/* content: static int tcf_ife_search(struct net *net, struct tc_action *a, u32 index)*/
 1157 				/* LDV_COMMENT_BEGIN_PREP */
 1158 				#define IFE_TAB_MASK 15
 1159 				#ifdef CONFIG_MODULES
 1160 				#endif
 1161 				/* LDV_COMMENT_END_PREP */
 1162 				/* LDV_COMMENT_FUNCTION_CALL Function from field "lookup" from driver structure with callbacks "act_ife_ops" */
 1163 				ldv_handler_precall();
 1164 				tcf_ife_search( var_group3, var_group2, var_tcf_ife_search_29_p2);
 1165 				
 1166 
 1167 				
 1168 
 1169 			}
 1170 
 1171 			break;
 1172 			case 6: {
 1173 
 1174 				/** STRUCT: struct type: pernet_operations, struct name: ife_net_ops **/
 1175 				
 1176 
 1177 				/* content: static __net_init int ife_init_net(struct net *net)*/
 1178 				/* LDV_COMMENT_BEGIN_PREP */
 1179 				#define IFE_TAB_MASK 15
 1180 				#ifdef CONFIG_MODULES
 1181 				#endif
 1182 				/* LDV_COMMENT_END_PREP */
 1183 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "ife_net_ops" */
 1184 				ldv_handler_precall();
 1185 				ife_init_net( var_group3);
 1186 				
 1187 
 1188 				
 1189 
 1190 			}
 1191 
 1192 			break;
 1193 			case 7: {
 1194 
 1195 				/** STRUCT: struct type: pernet_operations, struct name: ife_net_ops **/
 1196 				
 1197 
 1198 				/* content: static void __net_exit ife_exit_net(struct net *net)*/
 1199 				/* LDV_COMMENT_BEGIN_PREP */
 1200 				#define IFE_TAB_MASK 15
 1201 				#ifdef CONFIG_MODULES
 1202 				#endif
 1203 				/* LDV_COMMENT_END_PREP */
 1204 				/* LDV_COMMENT_FUNCTION_CALL Function from field "exit" from driver structure with callbacks "ife_net_ops" */
 1205 				ldv_handler_precall();
 1206 				ife_exit_net( var_group3);
 1207 				
 1208 
 1209 				
 1210 
 1211 			}
 1212 
 1213 			break;
 1214 			default: break;
 1215 
 1216 		}
 1217 
 1218 	}
 1219 
 1220 	ldv_module_exit: 
 1221 
 1222 	/** INIT: init_type: ST_MODULE_EXIT **/
 1223 	/* content: static void __exit ife_cleanup_module(void)*/
 1224 	/* LDV_COMMENT_BEGIN_PREP */
 1225 	#define IFE_TAB_MASK 15
 1226 	#ifdef CONFIG_MODULES
 1227 	#endif
 1228 	/* LDV_COMMENT_END_PREP */
 1229 	/* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
 1230 	ldv_handler_precall();
 1231 	ife_cleanup_module();
 1232 
 1233 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 1234 	ldv_final: ldv_check_final_state();
 1235 
 1236 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 1237 	return;
 1238 
 1239 }
 1240 #endif
 1241 
 1242 /* LDV_COMMENT_END_MAIN */                 1 
    2 #include <linux/kernel.h>
    3 bool ldv_is_err(const void *ptr);
    4 bool ldv_is_err_or_null(const void *ptr);
    5 void* ldv_err_ptr(long error);
    6 long ldv_ptr_err(const void *ptr);
    7 
    8 extern void ldv_spin_lock(void);
    9 extern void ldv_spin_unlock(void);
   10 extern int ldv_spin_trylock(void);
   11 
   12 #include <linux/kernel.h>
   13 #include <verifier/rcv.h>
   14 #include <linux/module.h>
   15 #include <linux/slab.h>
   16 
   17 extern void *ldv_undefined_pointer(void);
   18 extern void ldv_check_alloc_flags(gfp_t flags);
   19 extern void ldv_check_alloc_nonatomic(void);
   20 /* Returns an arbitrary page in addition to checking flags */
   21 extern struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags);
   22 #line 1 "/home/ubuntu/launches/work/current--X--net--X--defaultlinux-4.7-rc1.tar.xz--X--43_1a--X--cpachecker/linux-4.7-rc1.tar.xz/csd_deg_dscv/1526/dscv_tempdir/dscv/ri/43_1a/net/sched/act_ife.c"
   23 
   24 /*
   25  * net/sched/ife.c	Inter-FE action based on ForCES WG InterFE LFB
   26  *
   27  *		Refer to:
   28  *		draft-ietf-forces-interfelfb-03
   29  *		and
   30  *		netdev01 paper:
   31  *		"Distributing Linux Traffic Control Classifier-Action
   32  *		Subsystem"
   33  *		Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
   34  *
   35  *		This program is free software; you can redistribute it and/or
   36  *		modify it under the terms of the GNU General Public License
   37  *		as published by the Free Software Foundation; either version
   38  *		2 of the License, or (at your option) any later version.
   39  *
   40  * copyright Jamal Hadi Salim (2015)
   41  *
   42 */
   43 
   44 #include <linux/types.h>
   45 #include <linux/kernel.h>
   46 #include <linux/string.h>
   47 #include <linux/errno.h>
   48 #include <linux/skbuff.h>
   49 #include <linux/rtnetlink.h>
   50 #include <linux/module.h>
   51 #include <linux/init.h>
   52 #include <net/net_namespace.h>
   53 #include <net/netlink.h>
   54 #include <net/pkt_sched.h>
   55 #include <uapi/linux/tc_act/tc_ife.h>
   56 #include <net/tc_act/tc_ife.h>
   57 #include <linux/etherdevice.h>
   58 
   59 #define IFE_TAB_MASK 15
   60 
   61 static int ife_net_id;
   62 static int max_metacnt = IFE_META_MAX + 1;
   63 
   64 static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = {
   65 	[TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)},
   66 	[TCA_IFE_DMAC] = { .len = ETH_ALEN},
   67 	[TCA_IFE_SMAC] = { .len = ETH_ALEN},
   68 	[TCA_IFE_TYPE] = { .type = NLA_U16},
   69 };
   70 
   71 /* Caller takes care of presenting data in network order
   72 */
   73 int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
   74 {
   75 	u32 *tlv = (u32 *)(skbdata);
   76 	u16 totlen = nla_total_size(dlen);	/*alignment + hdr */
   77 	char *dptr = (char *)tlv + NLA_HDRLEN;
   78 	u32 htlv = attrtype << 16 | totlen;
   79 
   80 	*tlv = htonl(htlv);
   81 	memset(dptr, 0, totlen - NLA_HDRLEN);
   82 	memcpy(dptr, dval, dlen);
   83 
   84 	return totlen;
   85 }
   86 EXPORT_SYMBOL_GPL(ife_tlv_meta_encode);
   87 
   88 int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi)
   89 {
   90 	if (mi->metaval)
   91 		return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval);
   92 	else
   93 		return nla_put(skb, mi->metaid, 0, NULL);
   94 }
   95 EXPORT_SYMBOL_GPL(ife_get_meta_u32);
   96 
   97 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi)
   98 {
   99 	if (metaval || mi->metaval)
  100 		return 8; /* T+L+V == 2+2+4 */
  101 
  102 	return 0;
  103 }
  104 EXPORT_SYMBOL_GPL(ife_check_meta_u32);
  105 
  106 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi)
  107 {
  108 	u32 edata = metaval;
  109 
  110 	if (mi->metaval)
  111 		edata = *(u32 *)mi->metaval;
  112 	else if (metaval)
  113 		edata = metaval;
  114 
  115 	if (!edata) /* will not encode */
  116 		return 0;
  117 
  118 	edata = htonl(edata);
  119 	return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata);
  120 }
  121 EXPORT_SYMBOL_GPL(ife_encode_meta_u32);
  122 
  123 int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
  124 {
  125 	if (mi->metaval)
  126 		return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval);
  127 	else
  128 		return nla_put(skb, mi->metaid, 0, NULL);
  129 }
  130 EXPORT_SYMBOL_GPL(ife_get_meta_u16);
  131 
  132 int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval)
  133 {
  134 	mi->metaval = kmemdup(metaval, sizeof(u32), GFP_KERNEL);
  135 	if (!mi->metaval)
  136 		return -ENOMEM;
  137 
  138 	return 0;
  139 }
  140 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
  141 
  142 int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval)
  143 {
  144 	mi->metaval = kmemdup(metaval, sizeof(u16), GFP_KERNEL);
  145 	if (!mi->metaval)
  146 		return -ENOMEM;
  147 
  148 	return 0;
  149 }
  150 EXPORT_SYMBOL_GPL(ife_alloc_meta_u16);
  151 
  152 void ife_release_meta_gen(struct tcf_meta_info *mi)
  153 {
  154 	kfree(mi->metaval);
  155 }
  156 EXPORT_SYMBOL_GPL(ife_release_meta_gen);
  157 
  158 int ife_validate_meta_u32(void *val, int len)
  159 {
  160 	if (len == 4)
  161 		return 0;
  162 
  163 	return -EINVAL;
  164 }
  165 EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
  166 
  167 int ife_validate_meta_u16(void *val, int len)
  168 {
  169 	/* length will include padding */
  170 	if (len == NLA_ALIGN(2))
  171 		return 0;
  172 
  173 	return -EINVAL;
  174 }
  175 EXPORT_SYMBOL_GPL(ife_validate_meta_u16);
  176 
  177 static LIST_HEAD(ifeoplist);
  178 static DEFINE_RWLOCK(ife_mod_lock);
  179 
  180 static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
  181 {
  182 	struct tcf_meta_ops *o;
  183 
  184 	read_lock(&ife_mod_lock);
  185 	list_for_each_entry(o, &ifeoplist, list) {
  186 		if (o->metaid == metaid) {
  187 			if (!try_module_get(o->owner))
  188 				o = NULL;
  189 			read_unlock(&ife_mod_lock);
  190 			return o;
  191 		}
  192 	}
  193 	read_unlock(&ife_mod_lock);
  194 
  195 	return NULL;
  196 }
  197 
  198 int register_ife_op(struct tcf_meta_ops *mops)
  199 {
  200 	struct tcf_meta_ops *m;
  201 
  202 	if (!mops->metaid || !mops->metatype || !mops->name ||
  203 	    !mops->check_presence || !mops->encode || !mops->decode ||
  204 	    !mops->get || !mops->alloc)
  205 		return -EINVAL;
  206 
  207 	write_lock(&ife_mod_lock);
  208 
  209 	list_for_each_entry(m, &ifeoplist, list) {
  210 		if (m->metaid == mops->metaid ||
  211 		    (strcmp(mops->name, m->name) == 0)) {
  212 			write_unlock(&ife_mod_lock);
  213 			return -EEXIST;
  214 		}
  215 	}
  216 
  217 	if (!mops->release)
  218 		mops->release = ife_release_meta_gen;
  219 
  220 	list_add_tail(&mops->list, &ifeoplist);
  221 	write_unlock(&ife_mod_lock);
  222 	return 0;
  223 }
  224 EXPORT_SYMBOL_GPL(unregister_ife_op);
  225 
  226 int unregister_ife_op(struct tcf_meta_ops *mops)
  227 {
  228 	struct tcf_meta_ops *m;
  229 	int err = -ENOENT;
  230 
  231 	write_lock(&ife_mod_lock);
  232 	list_for_each_entry(m, &ifeoplist, list) {
  233 		if (m->metaid == mops->metaid) {
  234 			list_del(&mops->list);
  235 			err = 0;
  236 			break;
  237 		}
  238 	}
  239 	write_unlock(&ife_mod_lock);
  240 
  241 	return err;
  242 }
  243 EXPORT_SYMBOL_GPL(register_ife_op);
  244 
  245 static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
  246 {
  247 	int ret = 0;
  248 	/* XXX: unfortunately cant use nla_policy at this point
  249 	* because a length of 0 is valid in the case of
  250 	* "allow". "use" semantics do enforce for proper
  251 	* length and i couldve use nla_policy but it makes it hard
  252 	* to use it just for that..
  253 	*/
  254 	if (ops->validate)
  255 		return ops->validate(val, len);
  256 
  257 	if (ops->metatype == NLA_U32)
  258 		ret = ife_validate_meta_u32(val, len);
  259 	else if (ops->metatype == NLA_U16)
  260 		ret = ife_validate_meta_u16(val, len);
  261 
  262 	return ret;
  263 }
  264 
  265 /* called when adding new meta information
  266  * under ife->tcf_lock
  267 */
  268 static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
  269 				void *val, int len)
  270 {
  271 	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
  272 	int ret = 0;
  273 
  274 	if (!ops) {
  275 		ret = -ENOENT;
  276 #ifdef CONFIG_MODULES
  277 		spin_unlock_bh(&ife->tcf_lock);
  278 		rtnl_unlock();
  279 		request_module("ifemeta%u", metaid);
  280 		rtnl_lock();
  281 		spin_lock_bh(&ife->tcf_lock);
  282 		ops = find_ife_oplist(metaid);
  283 #endif
  284 	}
  285 
  286 	if (ops) {
  287 		ret = 0;
  288 		if (len)
  289 			ret = ife_validate_metatype(ops, val, len);
  290 
  291 		module_put(ops->owner);
  292 	}
  293 
  294 	return ret;
  295 }
  296 
  297 /* called when adding new meta information
  298  * under ife->tcf_lock
  299 */
  300 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
  301 			int len)
  302 {
  303 	struct tcf_meta_info *mi = NULL;
  304 	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
  305 	int ret = 0;
  306 
  307 	if (!ops)
  308 		return -ENOENT;
  309 
  310 	mi = kzalloc(sizeof(*mi), GFP_KERNEL);
  311 	if (!mi) {
  312 		/*put back what find_ife_oplist took */
  313 		module_put(ops->owner);
  314 		return -ENOMEM;
  315 	}
  316 
  317 	mi->metaid = metaid;
  318 	mi->ops = ops;
  319 	if (len > 0) {
  320 		ret = ops->alloc(mi, metaval);
  321 		if (ret != 0) {
  322 			kfree(mi);
  323 			module_put(ops->owner);
  324 			return ret;
  325 		}
  326 	}
  327 
  328 	list_add_tail(&mi->metalist, &ife->metalist);
  329 
  330 	return ret;
  331 }
  332 
  333 static int use_all_metadata(struct tcf_ife_info *ife)
  334 {
  335 	struct tcf_meta_ops *o;
  336 	int rc = 0;
  337 	int installed = 0;
  338 
  339 	list_for_each_entry(o, &ifeoplist, list) {
  340 		rc = add_metainfo(ife, o->metaid, NULL, 0);
  341 		if (rc == 0)
  342 			installed += 1;
  343 	}
  344 
  345 	if (installed)
  346 		return 0;
  347 	else
  348 		return -EINVAL;
  349 }
  350 
  351 static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
  352 {
  353 	struct tcf_meta_info *e;
  354 	struct nlattr *nest;
  355 	unsigned char *b = skb_tail_pointer(skb);
  356 	int total_encoded = 0;
  357 
  358 	/*can only happen on decode */
  359 	if (list_empty(&ife->metalist))
  360 		return 0;
  361 
  362 	nest = nla_nest_start(skb, TCA_IFE_METALST);
  363 	if (!nest)
  364 		goto out_nlmsg_trim;
  365 
  366 	list_for_each_entry(e, &ife->metalist, metalist) {
  367 		if (!e->ops->get(skb, e))
  368 			total_encoded += 1;
  369 	}
  370 
  371 	if (!total_encoded)
  372 		goto out_nlmsg_trim;
  373 
  374 	nla_nest_end(skb, nest);
  375 
  376 	return 0;
  377 
  378 out_nlmsg_trim:
  379 	nlmsg_trim(skb, b);
  380 	return -1;
  381 }
  382 
  383 /* under ife->tcf_lock */
  384 static void _tcf_ife_cleanup(struct tc_action *a, int bind)
  385 {
  386 	struct tcf_ife_info *ife = a->priv;
  387 	struct tcf_meta_info *e, *n;
  388 
  389 	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
  390 		module_put(e->ops->owner);
  391 		list_del(&e->metalist);
  392 		if (e->metaval) {
  393 			if (e->ops->release)
  394 				e->ops->release(e);
  395 			else
  396 				kfree(e->metaval);
  397 		}
  398 		kfree(e);
  399 	}
  400 }
  401 
  402 static void tcf_ife_cleanup(struct tc_action *a, int bind)
  403 {
  404 	struct tcf_ife_info *ife = a->priv;
  405 
  406 	spin_lock_bh(&ife->tcf_lock);
  407 	_tcf_ife_cleanup(a, bind);
  408 	spin_unlock_bh(&ife->tcf_lock);
  409 }
  410 
  411 /* under ife->tcf_lock */
  412 static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb)
  413 {
  414 	int len = 0;
  415 	int rc = 0;
  416 	int i = 0;
  417 	void *val;
  418 
  419 	for (i = 1; i < max_metacnt; i++) {
  420 		if (tb[i]) {
  421 			val = nla_data(tb[i]);
  422 			len = nla_len(tb[i]);
  423 
  424 			rc = load_metaops_and_vet(ife, i, val, len);
  425 			if (rc != 0)
  426 				return rc;
  427 
  428 			rc = add_metainfo(ife, i, val, len);
  429 			if (rc)
  430 				return rc;
  431 		}
  432 	}
  433 
  434 	return rc;
  435 }
  436 
  437 static int tcf_ife_init(struct net *net, struct nlattr *nla,
  438 			struct nlattr *est, struct tc_action *a,
  439 			int ovr, int bind)
  440 {
  441 	struct tc_action_net *tn = net_generic(net, ife_net_id);
  442 	struct nlattr *tb[TCA_IFE_MAX + 1];
  443 	struct nlattr *tb2[IFE_META_MAX + 1];
  444 	struct tcf_ife_info *ife;
  445 	struct tc_ife *parm;
  446 	u16 ife_type = 0;
  447 	u8 *daddr = NULL;
  448 	u8 *saddr = NULL;
  449 	int ret = 0, exists = 0;
  450 	int err;
  451 
  452 	err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy);
  453 	if (err < 0)
  454 		return err;
  455 
  456 	if (!tb[TCA_IFE_PARMS])
  457 		return -EINVAL;
  458 
  459 	parm = nla_data(tb[TCA_IFE_PARMS]);
  460 
  461 	exists = tcf_hash_check(tn, parm->index, a, bind);
  462 	if (exists && bind)
  463 		return 0;
  464 
  465 	if (parm->flags & IFE_ENCODE) {
  466 		/* Until we get issued the ethertype, we cant have
  467 		 * a default..
  468 		**/
  469 		if (!tb[TCA_IFE_TYPE]) {
  470 			if (exists)
  471 				tcf_hash_release(a, bind);
  472 			pr_info("You MUST pass etherype for encoding\n");
  473 			return -EINVAL;
  474 		}
  475 	}
  476 
  477 	if (!exists) {
  478 		ret = tcf_hash_create(tn, parm->index, est, a, sizeof(*ife),
  479 				      bind, false);
  480 		if (ret)
  481 			return ret;
  482 		ret = ACT_P_CREATED;
  483 	} else {
  484 		tcf_hash_release(a, bind);
  485 		if (!ovr)
  486 			return -EEXIST;
  487 	}
  488 
  489 	ife = to_ife(a);
  490 	ife->flags = parm->flags;
  491 
  492 	if (parm->flags & IFE_ENCODE) {
  493 		ife_type = nla_get_u16(tb[TCA_IFE_TYPE]);
  494 		if (tb[TCA_IFE_DMAC])
  495 			daddr = nla_data(tb[TCA_IFE_DMAC]);
  496 		if (tb[TCA_IFE_SMAC])
  497 			saddr = nla_data(tb[TCA_IFE_SMAC]);
  498 	}
  499 
  500 	spin_lock_bh(&ife->tcf_lock);
  501 	ife->tcf_action = parm->action;
  502 
  503 	if (parm->flags & IFE_ENCODE) {
  504 		if (daddr)
  505 			ether_addr_copy(ife->eth_dst, daddr);
  506 		else
  507 			eth_zero_addr(ife->eth_dst);
  508 
  509 		if (saddr)
  510 			ether_addr_copy(ife->eth_src, saddr);
  511 		else
  512 			eth_zero_addr(ife->eth_src);
  513 
  514 		ife->eth_type = ife_type;
  515 	}
  516 
  517 	if (ret == ACT_P_CREATED)
  518 		INIT_LIST_HEAD(&ife->metalist);
  519 
  520 	if (tb[TCA_IFE_METALST]) {
  521 		err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST],
  522 				       NULL);
  523 		if (err) {
  524 metadata_parse_err:
  525 			if (exists)
  526 				tcf_hash_release(a, bind);
  527 			if (ret == ACT_P_CREATED)
  528 				_tcf_ife_cleanup(a, bind);
  529 
  530 			spin_unlock_bh(&ife->tcf_lock);
  531 			return err;
  532 		}
  533 
  534 		err = populate_metalist(ife, tb2);
  535 		if (err)
  536 			goto metadata_parse_err;
  537 
  538 	} else {
  539 		/* if no passed metadata allow list or passed allow-all
  540 		 * then here we process by adding as many supported metadatum
  541 		 * as we can. You better have at least one else we are
  542 		 * going to bail out
  543 		 */
  544 		err = use_all_metadata(ife);
  545 		if (err) {
  546 			if (ret == ACT_P_CREATED)
  547 				_tcf_ife_cleanup(a, bind);
  548 
  549 			spin_unlock_bh(&ife->tcf_lock);
  550 			return err;
  551 		}
  552 	}
  553 
  554 	spin_unlock_bh(&ife->tcf_lock);
  555 
  556 	if (ret == ACT_P_CREATED)
  557 		tcf_hash_insert(tn, a);
  558 
  559 	return ret;
  560 }
  561 
  562 static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
  563 			int ref)
  564 {
  565 	unsigned char *b = skb_tail_pointer(skb);
  566 	struct tcf_ife_info *ife = a->priv;
  567 	struct tc_ife opt = {
  568 		.index = ife->tcf_index,
  569 		.refcnt = ife->tcf_refcnt - ref,
  570 		.bindcnt = ife->tcf_bindcnt - bind,
  571 		.action = ife->tcf_action,
  572 		.flags = ife->flags,
  573 	};
  574 	struct tcf_t t;
  575 
  576 	if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt))
  577 		goto nla_put_failure;
  578 
  579 	t.install = jiffies_to_clock_t(jiffies - ife->tcf_tm.install);
  580 	t.lastuse = jiffies_to_clock_t(jiffies - ife->tcf_tm.lastuse);
  581 	t.expires = jiffies_to_clock_t(ife->tcf_tm.expires);
  582 	if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
  583 		goto nla_put_failure;
  584 
  585 	if (!is_zero_ether_addr(ife->eth_dst)) {
  586 		if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, ife->eth_dst))
  587 			goto nla_put_failure;
  588 	}
  589 
  590 	if (!is_zero_ether_addr(ife->eth_src)) {
  591 		if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, ife->eth_src))
  592 			goto nla_put_failure;
  593 	}
  594 
  595 	if (nla_put(skb, TCA_IFE_TYPE, 2, &ife->eth_type))
  596 		goto nla_put_failure;
  597 
  598 	if (dump_metalist(skb, ife)) {
  599 		/*ignore failure to dump metalist */
  600 		pr_info("Failed to dump metalist\n");
  601 	}
  602 
  603 	return skb->len;
  604 
  605 nla_put_failure:
  606 	nlmsg_trim(skb, b);
  607 	return -1;
  608 }
  609 
  610 int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
  611 		       u16 metaid, u16 mlen, void *mdata)
  612 {
  613 	struct tcf_meta_info *e;
  614 
  615 	/* XXX: use hash to speed up */
  616 	list_for_each_entry(e, &ife->metalist, metalist) {
  617 		if (metaid == e->metaid) {
  618 			if (e->ops) {
  619 				/* We check for decode presence already */
  620 				return e->ops->decode(skb, mdata, mlen);
  621 			}
  622 		}
  623 	}
  624 
  625 	return 0;
  626 }
  627 
  628 struct ifeheadr {
  629 	__be16 metalen;
  630 	u8 tlv_data[];
  631 };
  632 
  633 struct meta_tlvhdr {
  634 	__be16 type;
  635 	__be16 len;
  636 };
  637 
  638 static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
  639 			  struct tcf_result *res)
  640 {
  641 	struct tcf_ife_info *ife = a->priv;
  642 	int action = ife->tcf_action;
  643 	struct ifeheadr *ifehdr = (struct ifeheadr *)skb->data;
  644 	u16 ifehdrln = ifehdr->metalen;
  645 	struct meta_tlvhdr *tlv = (struct meta_tlvhdr *)(ifehdr->tlv_data);
  646 
  647 	spin_lock(&ife->tcf_lock);
  648 	bstats_update(&ife->tcf_bstats, skb);
  649 	ife->tcf_tm.lastuse = jiffies;
  650 	spin_unlock(&ife->tcf_lock);
  651 
  652 	ifehdrln = ntohs(ifehdrln);
  653 	if (unlikely(!pskb_may_pull(skb, ifehdrln))) {
  654 		spin_lock(&ife->tcf_lock);
  655 		ife->tcf_qstats.drops++;
  656 		spin_unlock(&ife->tcf_lock);
  657 		return TC_ACT_SHOT;
  658 	}
  659 
  660 	skb_set_mac_header(skb, ifehdrln);
  661 	__skb_pull(skb, ifehdrln);
  662 	skb->protocol = eth_type_trans(skb, skb->dev);
  663 	ifehdrln -= IFE_METAHDRLEN;
  664 
  665 	while (ifehdrln > 0) {
  666 		u8 *tlvdata = (u8 *)tlv;
  667 		u16 mtype = tlv->type;
  668 		u16 mlen = tlv->len;
  669 
  670 		mtype = ntohs(mtype);
  671 		mlen = ntohs(mlen);
  672 
  673 		if (find_decode_metaid(skb, ife, mtype, (mlen - 4),
  674 				       (void *)(tlvdata + 4))) {
  675 			/* abuse overlimits to count when we receive metadata
  676 			 * but dont have an ops for it
  677 			 */
  678 			pr_info_ratelimited("Unknown metaid %d alnlen %d\n",
  679 					    mtype, mlen);
  680 			ife->tcf_qstats.overlimits++;
  681 		}
  682 
  683 		tlvdata += mlen;
  684 		ifehdrln -= mlen;
  685 		tlv = (struct meta_tlvhdr *)tlvdata;
  686 	}
  687 
  688 	skb_reset_network_header(skb);
  689 	return action;
  690 }
  691 
  692 /*XXX: check if we can do this at install time instead of current
  693  * send data path
  694 **/
  695 static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
  696 {
  697 	struct tcf_meta_info *e, *n;
  698 	int tot_run_sz = 0, run_sz = 0;
  699 
  700 	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
  701 		if (e->ops->check_presence) {
  702 			run_sz = e->ops->check_presence(skb, e);
  703 			tot_run_sz += run_sz;
  704 		}
  705 	}
  706 
  707 	return tot_run_sz;
  708 }
  709 
  710 static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
  711 			  struct tcf_result *res)
  712 {
  713 	struct tcf_ife_info *ife = a->priv;
  714 	int action = ife->tcf_action;
  715 	struct ethhdr *oethh;	/* outer ether header */
  716 	struct ethhdr *iethh;	/* inner eth header */
  717 	struct tcf_meta_info *e;
  718 	/*
  719 	   OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
  720 	   where ORIGDATA = original ethernet header ...
  721 	 */
  722 	u16 metalen = ife_get_sz(skb, ife);
  723 	int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
  724 	unsigned int skboff = skb->dev->hard_header_len;
  725 	u32 at = G_TC_AT(skb->tc_verd);
  726 	int new_len = skb->len + hdrm;
  727 	bool exceed_mtu = false;
  728 	int err;
  729 
  730 	if (at & AT_EGRESS) {
  731 		if (new_len > skb->dev->mtu)
  732 			exceed_mtu = true;
  733 	}
  734 
  735 	spin_lock(&ife->tcf_lock);
  736 	bstats_update(&ife->tcf_bstats, skb);
  737 	ife->tcf_tm.lastuse = jiffies;
  738 
  739 	if (!metalen) {		/* no metadata to send */
  740 		/* abuse overlimits to count when we allow packet
  741 		 * with no metadata
  742 		 */
  743 		ife->tcf_qstats.overlimits++;
  744 		spin_unlock(&ife->tcf_lock);
  745 		return action;
  746 	}
  747 	/* could be stupid policy setup or mtu config
  748 	 * so lets be conservative.. */
  749 	if ((action == TC_ACT_SHOT) || exceed_mtu) {
  750 		ife->tcf_qstats.drops++;
  751 		spin_unlock(&ife->tcf_lock);
  752 		return TC_ACT_SHOT;
  753 	}
  754 
  755 	iethh = eth_hdr(skb);
  756 
  757 	err = skb_cow_head(skb, hdrm);
  758 	if (unlikely(err)) {
  759 		ife->tcf_qstats.drops++;
  760 		spin_unlock(&ife->tcf_lock);
  761 		return TC_ACT_SHOT;
  762 	}
  763 
  764 	if (!(at & AT_EGRESS))
  765 		skb_push(skb, skb->dev->hard_header_len);
  766 
  767 	__skb_push(skb, hdrm);
  768 	memcpy(skb->data, iethh, skb->mac_len);
  769 	skb_reset_mac_header(skb);
  770 	oethh = eth_hdr(skb);
  771 
  772 	/*total metadata length */
  773 	metalen += IFE_METAHDRLEN;
  774 	metalen = htons(metalen);
  775 	memcpy((skb->data + skboff), &metalen, IFE_METAHDRLEN);
  776 	skboff += IFE_METAHDRLEN;
  777 
  778 	/* XXX: we dont have a clever way of telling encode to
  779 	 * not repeat some of the computations that are done by
  780 	 * ops->presence_check...
  781 	 */
  782 	list_for_each_entry(e, &ife->metalist, metalist) {
  783 		if (e->ops->encode) {
  784 			err = e->ops->encode(skb, (void *)(skb->data + skboff),
  785 					     e);
  786 		}
  787 		if (err < 0) {
  788 			/* too corrupt to keep around if overwritten */
  789 			ife->tcf_qstats.drops++;
  790 			spin_unlock(&ife->tcf_lock);
  791 			return TC_ACT_SHOT;
  792 		}
  793 		skboff += err;
  794 	}
  795 
  796 	if (!is_zero_ether_addr(ife->eth_src))
  797 		ether_addr_copy(oethh->h_source, ife->eth_src);
  798 	else
  799 		ether_addr_copy(oethh->h_source, iethh->h_source);
  800 	if (!is_zero_ether_addr(ife->eth_dst))
  801 		ether_addr_copy(oethh->h_dest, ife->eth_dst);
  802 	else
  803 		ether_addr_copy(oethh->h_dest, iethh->h_dest);
  804 	oethh->h_proto = htons(ife->eth_type);
  805 
  806 	if (!(at & AT_EGRESS))
  807 		skb_pull(skb, skb->dev->hard_header_len);
  808 
  809 	spin_unlock(&ife->tcf_lock);
  810 
  811 	return action;
  812 }
  813 
  814 static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
  815 		       struct tcf_result *res)
  816 {
  817 	struct tcf_ife_info *ife = a->priv;
  818 
  819 	if (ife->flags & IFE_ENCODE)
  820 		return tcf_ife_encode(skb, a, res);
  821 
  822 	if (!(ife->flags & IFE_ENCODE))
  823 		return tcf_ife_decode(skb, a, res);
  824 
  825 	pr_info_ratelimited("unknown failure(policy neither de/encode\n");
  826 	spin_lock(&ife->tcf_lock);
  827 	bstats_update(&ife->tcf_bstats, skb);
  828 	ife->tcf_tm.lastuse = jiffies;
  829 	ife->tcf_qstats.drops++;
  830 	spin_unlock(&ife->tcf_lock);
  831 
  832 	return TC_ACT_SHOT;
  833 }
  834 
  835 static int tcf_ife_walker(struct net *net, struct sk_buff *skb,
  836 			  struct netlink_callback *cb, int type,
  837 			  struct tc_action *a)
  838 {
  839 	struct tc_action_net *tn = net_generic(net, ife_net_id);
  840 
  841 	return tcf_generic_walker(tn, skb, cb, type, a);
  842 }
  843 
  844 static int tcf_ife_search(struct net *net, struct tc_action *a, u32 index)
  845 {
  846 	struct tc_action_net *tn = net_generic(net, ife_net_id);
  847 
  848 	return tcf_hash_search(tn, a, index);
  849 }
  850 
  851 static struct tc_action_ops act_ife_ops = {
  852 	.kind = "ife",
  853 	.type = TCA_ACT_IFE,
  854 	.owner = THIS_MODULE,
  855 	.act = tcf_ife_act,
  856 	.dump = tcf_ife_dump,
  857 	.cleanup = tcf_ife_cleanup,
  858 	.init = tcf_ife_init,
  859 	.walk = tcf_ife_walker,
  860 	.lookup = tcf_ife_search,
  861 };
  862 
  863 static __net_init int ife_init_net(struct net *net)
  864 {
  865 	struct tc_action_net *tn = net_generic(net, ife_net_id);
  866 
  867 	return tc_action_net_init(tn, &act_ife_ops, IFE_TAB_MASK);
  868 }
  869 
  870 static void __net_exit ife_exit_net(struct net *net)
  871 {
  872 	struct tc_action_net *tn = net_generic(net, ife_net_id);
  873 
  874 	tc_action_net_exit(tn);
  875 }
  876 
  877 static struct pernet_operations ife_net_ops = {
  878 	.init = ife_init_net,
  879 	.exit = ife_exit_net,
  880 	.id   = &ife_net_id,
  881 	.size = sizeof(struct tc_action_net),
  882 };
  883 
  884 static int __init ife_init_module(void)
  885 {
  886 	return tcf_register_action(&act_ife_ops, &ife_net_ops);
  887 }
  888 
  889 static void __exit ife_cleanup_module(void)
  890 {
  891 	tcf_unregister_action(&act_ife_ops, &ife_net_ops);
  892 }
  893 
  894 module_init(ife_init_module);
  895 module_exit(ife_cleanup_module);
  896 
  897 MODULE_AUTHOR("Jamal Hadi Salim(2015)");
  898 MODULE_DESCRIPTION("Inter-FE LFB action");
  899 MODULE_LICENSE("GPL");
  900 
  901 
  902 
  903 
  904 
  905 /* LDV_COMMENT_BEGIN_MAIN */
  906 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
  907 
  908 /*###########################################################################*/
  909 
  910 /*############## Driver Environment Generator 0.2 output ####################*/
  911 
  912 /*###########################################################################*/
  913 
  914 
  915 
  916 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
  917 void ldv_check_final_state(void);
  918 
  919 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
  920 void ldv_check_return_value(int res);
  921 
  922 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
  923 void ldv_check_return_value_probe(int res);
  924 
  925 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
  926 void ldv_initialize(void);
  927 
  928 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
  929 void ldv_handler_precall(void);
  930 
  931 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
  932 int nondet_int(void);
  933 
  934 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
  935 int LDV_IN_INTERRUPT;
  936 
  937 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
  938 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
  939 
  940 
  941 
  942 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
  943 	/*============================= VARIABLE DECLARATION PART   =============================*/
  944 	/** STRUCT: struct type: tc_action_ops, struct name: act_ife_ops **/
  945 	/* content: static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res)*/
  946 	/* LDV_COMMENT_BEGIN_PREP */
  947 	#define IFE_TAB_MASK 15
  948 	#ifdef CONFIG_MODULES
  949 	#endif
  950 	/* LDV_COMMENT_END_PREP */
  951 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_act" */
  952 	struct sk_buff * var_group1;
  953 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_act" */
  954 	const struct tc_action * var_tcf_ife_act_27_p1;
  955 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_act" */
  956 	struct tcf_result * var_tcf_ife_act_27_p2;
  957 	/* content: static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)*/
  958 	/* LDV_COMMENT_BEGIN_PREP */
  959 	#define IFE_TAB_MASK 15
  960 	#ifdef CONFIG_MODULES
  961 	#endif
  962 	/* LDV_COMMENT_END_PREP */
  963 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_dump" */
  964 	struct tc_action * var_group2;
  965 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_dump" */
  966 	int  var_tcf_ife_dump_22_p2;
  967 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_dump" */
  968 	int  var_tcf_ife_dump_22_p3;
  969 	/* content: static void tcf_ife_cleanup(struct tc_action *a, int bind)*/
  970 	/* LDV_COMMENT_BEGIN_PREP */
  971 	#define IFE_TAB_MASK 15
  972 	#ifdef CONFIG_MODULES
  973 	#endif
  974 	/* LDV_COMMENT_END_PREP */
  975 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_cleanup" */
  976 	int  var_tcf_ife_cleanup_19_p1;
  977 	/* content: static int tcf_ife_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind)*/
  978 	/* LDV_COMMENT_BEGIN_PREP */
  979 	#define IFE_TAB_MASK 15
  980 	#ifdef CONFIG_MODULES
  981 	#endif
  982 	/* LDV_COMMENT_END_PREP */
  983 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_init" */
  984 	struct net * var_group3;
  985 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_init" */
  986 	struct nlattr * var_group4;
  987 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_init" */
  988 	struct nlattr * var_tcf_ife_init_21_p2;
  989 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_init" */
  990 	struct tc_action * var_tcf_ife_init_21_p3;
  991 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_init" */
  992 	int  var_tcf_ife_init_21_p4;
  993 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_init" */
  994 	int  var_tcf_ife_init_21_p5;
  995 	/* content: static int tcf_ife_walker(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, int type, struct tc_action *a)*/
  996 	/* LDV_COMMENT_BEGIN_PREP */
  997 	#define IFE_TAB_MASK 15
  998 	#ifdef CONFIG_MODULES
  999 	#endif
 1000 	/* LDV_COMMENT_END_PREP */
 1001 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_walker" */
 1002 	struct netlink_callback * var_tcf_ife_walker_28_p2;
 1003 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_walker" */
 1004 	int  var_tcf_ife_walker_28_p3;
 1005 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_walker" */
 1006 	struct tc_action * var_tcf_ife_walker_28_p4;
 1007 	/* content: static int tcf_ife_search(struct net *net, struct tc_action *a, u32 index)*/
 1008 	/* LDV_COMMENT_BEGIN_PREP */
 1009 	#define IFE_TAB_MASK 15
 1010 	#ifdef CONFIG_MODULES
 1011 	#endif
 1012 	/* LDV_COMMENT_END_PREP */
 1013 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "tcf_ife_search" */
 1014 	u32  var_tcf_ife_search_29_p2;
 1015 
 1016 	/** STRUCT: struct type: pernet_operations, struct name: ife_net_ops **/
 1017 	/* content: static __net_init int ife_init_net(struct net *net)*/
 1018 	/* LDV_COMMENT_BEGIN_PREP */
 1019 	#define IFE_TAB_MASK 15
 1020 	#ifdef CONFIG_MODULES
 1021 	#endif
 1022 	/* LDV_COMMENT_END_PREP */
 1023 	/* content: static void __net_exit ife_exit_net(struct net *net)*/
 1024 	/* LDV_COMMENT_BEGIN_PREP */
 1025 	#define IFE_TAB_MASK 15
 1026 	#ifdef CONFIG_MODULES
 1027 	#endif
 1028 	/* LDV_COMMENT_END_PREP */
 1029 
 1030 
 1031 
 1032 
 1033 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 1034 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 1035 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 1036 	LDV_IN_INTERRUPT=1;
 1037 
 1038 
 1039 
 1040 
 1041 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 1042 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 1043 	/*============================= FUNCTION CALL SECTION       =============================*/
 1044 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 1045 	ldv_initialize();
 1046 
 1047 	/** INIT: init_type: ST_MODULE_INIT **/
 1048 	/* content: static int __init ife_init_module(void)*/
 1049 	/* LDV_COMMENT_BEGIN_PREP */
 1050 	#define IFE_TAB_MASK 15
 1051 	#ifdef CONFIG_MODULES
 1052 	#endif
 1053 	/* LDV_COMMENT_END_PREP */
 1054 	/* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
 1055 	ldv_handler_precall();
 1056 	 if(ife_init_module()) 
 1057 		goto ldv_final;
 1058 	
 1059 
 1060 	
 1061 
 1062 
 1063 	while(  nondet_int()
 1064 	) {
 1065 
 1066 		switch(nondet_int()) {
 1067 
 1068 			case 0: {
 1069 
 1070 				/** STRUCT: struct type: tc_action_ops, struct name: act_ife_ops **/
 1071 				
 1072 
 1073 				/* content: static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res)*/
 1074 				/* LDV_COMMENT_BEGIN_PREP */
 1075 				#define IFE_TAB_MASK 15
 1076 				#ifdef CONFIG_MODULES
 1077 				#endif
 1078 				/* LDV_COMMENT_END_PREP */
 1079 				/* LDV_COMMENT_FUNCTION_CALL Function from field "act" from driver structure with callbacks "act_ife_ops" */
 1080 				ldv_handler_precall();
 1081 				tcf_ife_act( var_group1, var_tcf_ife_act_27_p1, var_tcf_ife_act_27_p2);
 1082 				
 1083 
 1084 				
 1085 
 1086 			}
 1087 
 1088 			break;
 1089 			case 1: {
 1090 
 1091 				/** STRUCT: struct type: tc_action_ops, struct name: act_ife_ops **/
 1092 				
 1093 
 1094 				/* content: static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)*/
 1095 				/* LDV_COMMENT_BEGIN_PREP */
 1096 				#define IFE_TAB_MASK 15
 1097 				#ifdef CONFIG_MODULES
 1098 				#endif
 1099 				/* LDV_COMMENT_END_PREP */
 1100 				/* LDV_COMMENT_FUNCTION_CALL Function from field "dump" from driver structure with callbacks "act_ife_ops" */
 1101 				ldv_handler_precall();
 1102 				tcf_ife_dump( var_group1, var_group2, var_tcf_ife_dump_22_p2, var_tcf_ife_dump_22_p3);
 1103 				
 1104 
 1105 				
 1106 
 1107 			}
 1108 
 1109 			break;
 1110 			case 2: {
 1111 
 1112 				/** STRUCT: struct type: tc_action_ops, struct name: act_ife_ops **/
 1113 				
 1114 
 1115 				/* content: static void tcf_ife_cleanup(struct tc_action *a, int bind)*/
 1116 				/* LDV_COMMENT_BEGIN_PREP */
 1117 				#define IFE_TAB_MASK 15
 1118 				#ifdef CONFIG_MODULES
 1119 				#endif
 1120 				/* LDV_COMMENT_END_PREP */
 1121 				/* LDV_COMMENT_FUNCTION_CALL Function from field "cleanup" from driver structure with callbacks "act_ife_ops" */
 1122 				ldv_handler_precall();
 1123 				tcf_ife_cleanup( var_group2, var_tcf_ife_cleanup_19_p1);
 1124 				
 1125 
 1126 				
 1127 
 1128 			}
 1129 
 1130 			break;
 1131 			case 3: {
 1132 
 1133 				/** STRUCT: struct type: tc_action_ops, struct name: act_ife_ops **/
 1134 				
 1135 
 1136 				/* content: static int tcf_ife_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind)*/
 1137 				/* LDV_COMMENT_BEGIN_PREP */
 1138 				#define IFE_TAB_MASK 15
 1139 				#ifdef CONFIG_MODULES
 1140 				#endif
 1141 				/* LDV_COMMENT_END_PREP */
 1142 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "act_ife_ops" */
 1143 				ldv_handler_precall();
 1144 				tcf_ife_init( var_group3, var_group4, var_tcf_ife_init_21_p2, var_tcf_ife_init_21_p3, var_tcf_ife_init_21_p4, var_tcf_ife_init_21_p5);
 1145 				
 1146 
 1147 				
 1148 
 1149 			}
 1150 
 1151 			break;
 1152 			case 4: {
 1153 
 1154 				/** STRUCT: struct type: tc_action_ops, struct name: act_ife_ops **/
 1155 				
 1156 
 1157 				/* content: static int tcf_ife_walker(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, int type, struct tc_action *a)*/
 1158 				/* LDV_COMMENT_BEGIN_PREP */
 1159 				#define IFE_TAB_MASK 15
 1160 				#ifdef CONFIG_MODULES
 1161 				#endif
 1162 				/* LDV_COMMENT_END_PREP */
 1163 				/* LDV_COMMENT_FUNCTION_CALL Function from field "walk" from driver structure with callbacks "act_ife_ops" */
 1164 				ldv_handler_precall();
 1165 				tcf_ife_walker( var_group3, var_group1, var_tcf_ife_walker_28_p2, var_tcf_ife_walker_28_p3, var_tcf_ife_walker_28_p4);
 1166 				
 1167 
 1168 				
 1169 
 1170 			}
 1171 
 1172 			break;
 1173 			case 5: {
 1174 
 1175 				/** STRUCT: struct type: tc_action_ops, struct name: act_ife_ops **/
 1176 				
 1177 
 1178 				/* content: static int tcf_ife_search(struct net *net, struct tc_action *a, u32 index)*/
 1179 				/* LDV_COMMENT_BEGIN_PREP */
 1180 				#define IFE_TAB_MASK 15
 1181 				#ifdef CONFIG_MODULES
 1182 				#endif
 1183 				/* LDV_COMMENT_END_PREP */
 1184 				/* LDV_COMMENT_FUNCTION_CALL Function from field "lookup" from driver structure with callbacks "act_ife_ops" */
 1185 				ldv_handler_precall();
 1186 				tcf_ife_search( var_group3, var_group2, var_tcf_ife_search_29_p2);
 1187 				
 1188 
 1189 				
 1190 
 1191 			}
 1192 
 1193 			break;
 1194 			case 6: {
 1195 
 1196 				/** STRUCT: struct type: pernet_operations, struct name: ife_net_ops **/
 1197 				
 1198 
 1199 				/* content: static __net_init int ife_init_net(struct net *net)*/
 1200 				/* LDV_COMMENT_BEGIN_PREP */
 1201 				#define IFE_TAB_MASK 15
 1202 				#ifdef CONFIG_MODULES
 1203 				#endif
 1204 				/* LDV_COMMENT_END_PREP */
 1205 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "ife_net_ops" */
 1206 				ldv_handler_precall();
 1207 				ife_init_net( var_group3);
 1208 				
 1209 
 1210 				
 1211 
 1212 			}
 1213 
 1214 			break;
 1215 			case 7: {
 1216 
 1217 				/** STRUCT: struct type: pernet_operations, struct name: ife_net_ops **/
 1218 				
 1219 
 1220 				/* content: static void __net_exit ife_exit_net(struct net *net)*/
 1221 				/* LDV_COMMENT_BEGIN_PREP */
 1222 				#define IFE_TAB_MASK 15
 1223 				#ifdef CONFIG_MODULES
 1224 				#endif
 1225 				/* LDV_COMMENT_END_PREP */
 1226 				/* LDV_COMMENT_FUNCTION_CALL Function from field "exit" from driver structure with callbacks "ife_net_ops" */
 1227 				ldv_handler_precall();
 1228 				ife_exit_net( var_group3);
 1229 				
 1230 
 1231 				
 1232 
 1233 			}
 1234 
 1235 			break;
 1236 			default: break;
 1237 
 1238 		}
 1239 
 1240 	}
 1241 
 1242 	ldv_module_exit: 
 1243 
 1244 	/** INIT: init_type: ST_MODULE_EXIT **/
 1245 	/* content: static void __exit ife_cleanup_module(void)*/
 1246 	/* LDV_COMMENT_BEGIN_PREP */
 1247 	#define IFE_TAB_MASK 15
 1248 	#ifdef CONFIG_MODULES
 1249 	#endif
 1250 	/* LDV_COMMENT_END_PREP */
 1251 	/* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
 1252 	ldv_handler_precall();
 1253 	ife_cleanup_module();
 1254 
 1255 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 1256 	ldv_final: ldv_check_final_state();
 1257 
 1258 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 1259 	return;
 1260 
 1261 }
 1262 #endif
 1263 
 1264 /* LDV_COMMENT_END_MAIN */
 1265 
 1266 #line 22 "/home/ubuntu/launches/work/current--X--net--X--defaultlinux-4.7-rc1.tar.xz--X--43_1a--X--cpachecker/linux-4.7-rc1.tar.xz/csd_deg_dscv/1526/dscv_tempdir/dscv/ri/43_1a/net/sched/act_ife.o.c.prepared"                 1 
    2 
    3 /* Here is the definition of CHECK_WAIT_FLAGS(flags) macro. */
    4 #include "include/gfp.h"
    5 #include <linux/gfp.h>
    6 #include <verifier/rcv.h>
    7 #include <kernel-model/ERR.inc>
    8 
    9 #define LDV_ZERO_STATE 0
   10 
   11 
   12 /* There are 2 possible states of spin lock. */
   13 enum {
   14   LDV_SPIN_UNLOCKED = LDV_ZERO_STATE, /* Spin isn't locked. */
   15   LDV_SPIN_LOCKED /* Spin is locked. */
   16 };
   17 
   18 
   19 /* Spin isn't locked at the beginning. */
   20 int ldv_spin = LDV_SPIN_UNLOCKED;
   21 
   22 
   23 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_alloc_flags') Check that a memory allocating function was called with a correct value of flags in spin locking. */
   24 void ldv_check_alloc_flags(gfp_t flags)
   25 {
   26   /* LDV_COMMENT_ASSERT If spin is locked (ldv_spin != LDV_SPIN_UNLOCKED) then a memory allocating function should be called with __GFP_WAIT flag unset (GFP_ATOMIC or GFP_NOWAIT). */
   27   ldv_assert(ldv_spin == LDV_SPIN_UNLOCKED || CHECK_WAIT_FLAGS(flags));
   28 }
   29 
   30 extern struct page *ldv_some_page(void);
   31 
   32 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_alloc_flags_and_return_some_page') Check that a memory allocating function was called with a correct value of flags in spin locking. */
   33 struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags)
   34 {
   35   /* LDV_COMMENT_ASSERT If spin is locked (ldv_spin != LDV_SPIN_UNLOCKED) then a memory allocating function should be called with __GFP_WAIT flag unset (GFP_ATOMIC or GFP_NOWAIT). */
   36   ldv_assert(ldv_spin == LDV_SPIN_UNLOCKED || CHECK_WAIT_FLAGS(flags));
   37   /* LDV_COMMENT_RETURN Return a page pointer (maybe NULL). */
   38   return ldv_some_page();
   39 }
   40 
   41 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_alloc_nonatomic') Check that a memory allocating function was not calledin spin locking. */
   42 void ldv_check_alloc_nonatomic(void)
   43 {
   44   /* LDV_COMMENT_ASSERT If spin is locked (ldv_spin != LDV_SPIN_UNLOCKED) then the memory allocating function should be called, because it implicitly uses GFP_KERNEL flag. */
   45   ldv_assert(ldv_spin == LDV_SPIN_UNLOCKED);
   46 }
   47 
   48 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock') Lock spin. */
   49 void ldv_spin_lock(void)
   50 {
   51   /* LDV_COMMENT_CHANGE_STATE Lock spin. */
   52   ldv_spin = LDV_SPIN_LOCKED;
   53 }
   54 
   55 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock') Unlock spin. */
   56 void ldv_spin_unlock(void)
   57 {
   58   /* LDV_COMMENT_CHANGE_STATE Unlock spin. */
   59   ldv_spin = LDV_SPIN_UNLOCKED;
   60 }
   61 
   62 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock') Try to lock spin. It should return 0 if spin wasn't locked. */
   63 int ldv_spin_trylock(void)
   64 {
   65   int is_lock;
   66 
   67   /* LDV_COMMENT_OTHER Do this to make nondetermined choice. */
   68   is_lock = ldv_undef_int();
   69 
   70   if (is_lock)
   71   {
   72     /* LDV_COMMENT_RETURN Don't lock spin and return 0. */
   73     return 0;
   74   }
   75   else
   76   {
   77     /* LDV_COMMENT_CHANGE_STATE Lock spin. */
   78     ldv_spin = LDV_SPIN_LOCKED;
   79     /* LDV_COMMENT_RETURN Return 1 since spin was locked. */
   80     return 1;
   81   }
   82 }                 1 #ifndef _LDV_RCV_H_
    2 #define _LDV_RCV_H_
    3 
    4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
    5    label like the standard assert(). */
    6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
    7 
    8 /* The error label wrapper. It is used because of some static verifiers (like
    9    BLAST) don't accept multiple error labels through a program. */
   10 static inline void ldv_error(void)
   11 {
   12   LDV_ERROR: goto LDV_ERROR;
   13 }
   14 
   15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
   16    avoided by verifiers. */
   17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
   18 
   19 /* Infinite loop, that causes verifiers to skip such paths. */
   20 static inline void ldv_stop(void) {
   21   LDV_STOP: goto LDV_STOP;
   22 }
   23 
   24 /* Special nondeterministic functions. */
   25 int ldv_undef_int(void);
   26 void *ldv_undef_ptr(void);
   27 unsigned long ldv_undef_ulong(void);
   28 long ldv_undef_long(void);
   29 /* Return nondeterministic negative integer number. */
   30 static inline int ldv_undef_int_negative(void)
   31 {
   32   int ret = ldv_undef_int();
   33 
   34   ldv_assume(ret < 0);
   35 
   36   return ret;
   37 }
   38 /* Return nondeterministic nonpositive integer number. */
   39 static inline int ldv_undef_int_nonpositive(void)
   40 {
   41   int ret = ldv_undef_int();
   42 
   43   ldv_assume(ret <= 0);
   44 
   45   return ret;
   46 }
   47 
   48 /* Add explicit model for __builin_expect GCC function. Without the model a
   49    return value will be treated as nondetermined by verifiers. */
   50 static inline long __builtin_expect(long exp, long c)
   51 {
   52   return exp;
   53 }
   54 
   55 /* This function causes the program to exit abnormally. GCC implements this
   56 function by using a target-dependent mechanism (such as intentionally executing
   57 an illegal instruction) or by calling abort. The mechanism used may vary from
   58 release to release so you should not rely on any particular implementation.
   59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
   60 static inline void __builtin_trap(void)
   61 {
   62   ldv_assert(0);
   63 }
   64 
   65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
   66 #define LDV_PTR_MAX 2012
   67 
   68 #endif /* _LDV_RCV_H_ */                 1 #ifndef __LINUX_COMPILER_H
    2 #define __LINUX_COMPILER_H
    3 
    4 #ifndef __ASSEMBLY__
    5 
    6 #ifdef __CHECKER__
    7 # define __user		__attribute__((noderef, address_space(1)))
    8 # define __kernel	__attribute__((address_space(0)))
    9 # define __safe		__attribute__((safe))
   10 # define __force	__attribute__((force))
   11 # define __nocast	__attribute__((nocast))
   12 # define __iomem	__attribute__((noderef, address_space(2)))
   13 # define __must_hold(x)	__attribute__((context(x,1,1)))
   14 # define __acquires(x)	__attribute__((context(x,0,1)))
   15 # define __releases(x)	__attribute__((context(x,1,0)))
   16 # define __acquire(x)	__context__(x,1)
   17 # define __release(x)	__context__(x,-1)
   18 # define __cond_lock(x,c)	((c) ? ({ __acquire(x); 1; }) : 0)
   19 # define __percpu	__attribute__((noderef, address_space(3)))
   20 # define __pmem		__attribute__((noderef, address_space(5)))
   21 #ifdef CONFIG_SPARSE_RCU_POINTER
   22 # define __rcu		__attribute__((noderef, address_space(4)))
   23 #else /* CONFIG_SPARSE_RCU_POINTER */
   24 # define __rcu
   25 #endif /* CONFIG_SPARSE_RCU_POINTER */
   26 # define __private	__attribute__((noderef))
   27 extern void __chk_user_ptr(const volatile void __user *);
   28 extern void __chk_io_ptr(const volatile void __iomem *);
   29 # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
   30 #else /* __CHECKER__ */
   31 # define __user
   32 # define __kernel
   33 # define __safe
   34 # define __force
   35 # define __nocast
   36 # define __iomem
   37 # define __chk_user_ptr(x) (void)0
   38 # define __chk_io_ptr(x) (void)0
   39 # define __builtin_warning(x, y...) (1)
   40 # define __must_hold(x)
   41 # define __acquires(x)
   42 # define __releases(x)
   43 # define __acquire(x) (void)0
   44 # define __release(x) (void)0
   45 # define __cond_lock(x,c) (c)
   46 # define __percpu
   47 # define __rcu
   48 # define __pmem
   49 # define __private
   50 # define ACCESS_PRIVATE(p, member) ((p)->member)
   51 #endif /* __CHECKER__ */
   52 
   53 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
   54 #define ___PASTE(a,b) a##b
   55 #define __PASTE(a,b) ___PASTE(a,b)
   56 
   57 #ifdef __KERNEL__
   58 
   59 #ifdef __GNUC__
   60 #include <linux/compiler-gcc.h>
   61 #endif
   62 
   63 #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
   64 #define notrace __attribute__((hotpatch(0,0)))
   65 #else
   66 #define notrace __attribute__((no_instrument_function))
   67 #endif
   68 
   69 /* Intel compiler defines __GNUC__. So we will overwrite implementations
   70  * coming from above header files here
   71  */
   72 #ifdef __INTEL_COMPILER
   73 # include <linux/compiler-intel.h>
   74 #endif
   75 
   76 /* Clang compiler defines __GNUC__. So we will overwrite implementations
   77  * coming from above header files here
   78  */
   79 #ifdef __clang__
   80 #include <linux/compiler-clang.h>
   81 #endif
   82 
   83 /*
   84  * Generic compiler-dependent macros required for kernel
   85  * build go below this comment. Actual compiler/compiler version
   86  * specific implementations come from the above header files
   87  */
   88 
   89 struct ftrace_branch_data {
   90 	const char *func;
   91 	const char *file;
   92 	unsigned line;
   93 	union {
   94 		struct {
   95 			unsigned long correct;
   96 			unsigned long incorrect;
   97 		};
   98 		struct {
   99 			unsigned long miss;
  100 			unsigned long hit;
  101 		};
  102 		unsigned long miss_hit[2];
  103 	};
  104 };
  105 
  106 /*
  107  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
  108  * to disable branch tracing on a per file basis.
  109  */
  110 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
  111     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
  112 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
  113 
  114 #define likely_notrace(x)	__builtin_expect(!!(x), 1)
  115 #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
  116 
  117 #define __branch_check__(x, expect) ({					\
  118 			int ______r;					\
  119 			static struct ftrace_branch_data		\
  120 				__attribute__((__aligned__(4)))		\
  121 				__attribute__((section("_ftrace_annotated_branch"))) \
  122 				______f = {				\
  123 				.func = __func__,			\
  124 				.file = __FILE__,			\
  125 				.line = __LINE__,			\
  126 			};						\
  127 			______r = likely_notrace(x);			\
  128 			ftrace_likely_update(&______f, ______r, expect); \
  129 			______r;					\
  130 		})
  131 
  132 /*
  133  * Using __builtin_constant_p(x) to ignore cases where the return
  134  * value is always the same.  This idea is taken from a similar patch
  135  * written by Daniel Walker.
  136  */
  137 # ifndef likely
  138 #  define likely(x)	(__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
  139 # endif
  140 # ifndef unlikely
  141 #  define unlikely(x)	(__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
  142 # endif
  143 
  144 #ifdef CONFIG_PROFILE_ALL_BRANCHES
  145 /*
  146  * "Define 'is'", Bill Clinton
  147  * "Define 'if'", Steven Rostedt
  148  */
  149 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
  150 #define __trace_if(cond) \
  151 	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
  152 	({								\
  153 		int ______r;						\
  154 		static struct ftrace_branch_data			\
  155 			__attribute__((__aligned__(4)))			\
  156 			__attribute__((section("_ftrace_branch")))	\
  157 			______f = {					\
  158 				.func = __func__,			\
  159 				.file = __FILE__,			\
  160 				.line = __LINE__,			\
  161 			};						\
  162 		______r = !!(cond);					\
  163 		______f.miss_hit[______r]++;					\
  164 		______r;						\
  165 	}))
  166 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
  167 
  168 #else
  169 # define likely(x)	__builtin_expect(!!(x), 1)
  170 # define unlikely(x)	__builtin_expect(!!(x), 0)
  171 #endif
  172 
  173 /* Optimization barrier */
  174 #ifndef barrier
  175 # define barrier() __memory_barrier()
  176 #endif
  177 
  178 #ifndef barrier_data
  179 # define barrier_data(ptr) barrier()
  180 #endif
  181 
  182 /* Unreachable code */
  183 #ifndef unreachable
  184 # define unreachable() do { } while (1)
  185 #endif
  186 
  187 #ifndef RELOC_HIDE
  188 # define RELOC_HIDE(ptr, off)					\
  189   ({ unsigned long __ptr;					\
  190      __ptr = (unsigned long) (ptr);				\
  191     (typeof(ptr)) (__ptr + (off)); })
  192 #endif
  193 
  194 #ifndef OPTIMIZER_HIDE_VAR
  195 #define OPTIMIZER_HIDE_VAR(var) barrier()
  196 #endif
  197 
  198 /* Not-quite-unique ID. */
  199 #ifndef __UNIQUE_ID
  200 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
  201 #endif
  202 
  203 #include <uapi/linux/types.h>
  204 
  205 #define __READ_ONCE_SIZE						\
  206 ({									\
  207 	switch (size) {							\
  208 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
  209 	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
  210 	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
  211 	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
  212 	default:							\
  213 		barrier();						\
  214 		__builtin_memcpy((void *)res, (const void *)p, size);	\
  215 		barrier();						\
  216 	}								\
  217 })
  218 
  219 static __always_inline
  220 void __read_once_size(const volatile void *p, void *res, int size)
  221 {
  222 	__READ_ONCE_SIZE;
  223 }
  224 
  225 #ifdef CONFIG_KASAN
  226 /*
  227  * This function is not 'inline' because __no_sanitize_address confilcts
  228  * with inlining. Attempt to inline it may cause a build failure.
  229  * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
  230  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
  231  */
  232 static __no_sanitize_address __maybe_unused
  233 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
  234 {
  235 	__READ_ONCE_SIZE;
  236 }
  237 #else
  238 static __always_inline
  239 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
  240 {
  241 	__READ_ONCE_SIZE;
  242 }
  243 #endif
  244 
  245 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
  246 {
  247 	switch (size) {
  248 	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
  249 	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
  250 	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
  251 	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
  252 	default:
  253 		barrier();
  254 		__builtin_memcpy((void *)p, (const void *)res, size);
  255 		barrier();
  256 	}
  257 }
  258 
  259 /*
  260  * Prevent the compiler from merging or refetching reads or writes. The
  261  * compiler is also forbidden from reordering successive instances of
  262  * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
  263  * compiler is aware of some particular ordering.  One way to make the
  264  * compiler aware of ordering is to put the two invocations of READ_ONCE,
  265  * WRITE_ONCE or ACCESS_ONCE() in different C statements.
  266  *
  267  * In contrast to ACCESS_ONCE these two macros will also work on aggregate
  268  * data types like structs or unions. If the size of the accessed data
  269  * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
  270  * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
  271  * least two memcpy()s: one for the __builtin_memcpy() and then one for
  272  * the macro doing the copy of variable - '__u' allocated on the stack.
  273  *
  274  * Their two major use cases are: (1) Mediating communication between
  275  * process-level code and irq/NMI handlers, all running on the same CPU,
  276  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
  277  * mutilate accesses that either do not require ordering or that interact
  278  * with an explicit memory barrier or atomic instruction that provides the
  279  * required ordering.
  280  */
  281 
  282 #define __READ_ONCE(x, check)						\
  283 ({									\
  284 	union { typeof(x) __val; char __c[1]; } __u;			\
  285 	if (check)							\
  286 		__read_once_size(&(x), __u.__c, sizeof(x));		\
  287 	else								\
  288 		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
  289 	__u.__val;							\
  290 })
  291 #define READ_ONCE(x) __READ_ONCE(x, 1)
  292 
  293 /*
  294  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
  295  * to hide memory access from KASAN.
  296  */
  297 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
  298 
  299 #define WRITE_ONCE(x, val) \
  300 ({							\
  301 	union { typeof(x) __val; char __c[1]; } __u =	\
  302 		{ .__val = (__force typeof(x)) (val) }; \
  303 	__write_once_size(&(x), __u.__c, sizeof(x));	\
  304 	__u.__val;					\
  305 })
  306 
  307 /**
  308  * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering
  309  * @cond: boolean expression to wait for
  310  *
  311  * Equivalent to using smp_load_acquire() on the condition variable but employs
  312  * the control dependency of the wait to reduce the barrier on many platforms.
  313  *
  314  * The control dependency provides a LOAD->STORE order, the additional RMB
  315  * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
  316  * aka. ACQUIRE.
  317  */
  318 #define smp_cond_acquire(cond)	do {		\
  319 	while (!(cond))				\
  320 		cpu_relax();			\
  321 	smp_rmb(); /* ctrl + rmb := acquire */	\
  322 } while (0)
  323 
  324 #endif /* __KERNEL__ */
  325 
  326 #endif /* __ASSEMBLY__ */
  327 
  328 #ifdef __KERNEL__
  329 /*
  330  * Allow us to mark functions as 'deprecated' and have gcc emit a nice
  331  * warning for each use, in hopes of speeding the functions removal.
  332  * Usage is:
  333  * 		int __deprecated foo(void)
  334  */
  335 #ifndef __deprecated
  336 # define __deprecated		/* unimplemented */
  337 #endif
  338 
  339 #ifdef MODULE
  340 #define __deprecated_for_modules __deprecated
  341 #else
  342 #define __deprecated_for_modules
  343 #endif
  344 
  345 #ifndef __must_check
  346 #define __must_check
  347 #endif
  348 
  349 #ifndef CONFIG_ENABLE_MUST_CHECK
  350 #undef __must_check
  351 #define __must_check
  352 #endif
  353 #ifndef CONFIG_ENABLE_WARN_DEPRECATED
  354 #undef __deprecated
  355 #undef __deprecated_for_modules
  356 #define __deprecated
  357 #define __deprecated_for_modules
  358 #endif
  359 
  360 #ifndef __malloc
  361 #define __malloc
  362 #endif
  363 
  364 /*
  365  * Allow us to avoid 'defined but not used' warnings on functions and data,
  366  * as well as force them to be emitted to the assembly file.
  367  *
  368  * As of gcc 3.4, static functions that are not marked with attribute((used))
  369  * may be elided from the assembly file.  As of gcc 3.4, static data not so
  370  * marked will not be elided, but this may change in a future gcc version.
  371  *
  372  * NOTE: Because distributions shipped with a backported unit-at-a-time
  373  * compiler in gcc 3.3, we must define __used to be __attribute__((used))
  374  * for gcc >=3.3 instead of 3.4.
  375  *
  376  * In prior versions of gcc, such functions and data would be emitted, but
  377  * would be warned about except with attribute((unused)).
  378  *
  379  * Mark functions that are referenced only in inline assembly as __used so
  380  * the code is emitted even though it appears to be unreferenced.
  381  */
  382 #ifndef __used
  383 # define __used			/* unimplemented */
  384 #endif
  385 
  386 #ifndef __maybe_unused
  387 # define __maybe_unused		/* unimplemented */
  388 #endif
  389 
  390 #ifndef __always_unused
  391 # define __always_unused	/* unimplemented */
  392 #endif
  393 
  394 #ifndef noinline
  395 #define noinline
  396 #endif
  397 
  398 /*
  399  * Rather then using noinline to prevent stack consumption, use
  400  * noinline_for_stack instead.  For documentation reasons.
  401  */
  402 #define noinline_for_stack noinline
  403 
  404 #ifndef __always_inline
  405 #define __always_inline inline
  406 #endif
  407 
  408 #endif /* __KERNEL__ */
  409 
  410 /*
  411  * From the GCC manual:
  412  *
  413  * Many functions do not examine any values except their arguments,
  414  * and have no effects except the return value.  Basically this is
  415  * just slightly more strict class than the `pure' attribute above,
  416  * since function is not allowed to read global memory.
  417  *
  418  * Note that a function that has pointer arguments and examines the
  419  * data pointed to must _not_ be declared `const'.  Likewise, a
  420  * function that calls a non-`const' function usually must not be
  421  * `const'.  It does not make sense for a `const' function to return
  422  * `void'.
  423  */
  424 #ifndef __attribute_const__
  425 # define __attribute_const__	/* unimplemented */
  426 #endif
  427 
  428 /*
  429  * Tell gcc if a function is cold. The compiler will assume any path
  430  * directly leading to the call is unlikely.
  431  */
  432 
  433 #ifndef __cold
  434 #define __cold
  435 #endif
  436 
  437 /* Simple shorthand for a section definition */
  438 #ifndef __section
  439 # define __section(S) __attribute__ ((__section__(#S)))
  440 #endif
  441 
  442 #ifndef __visible
  443 #define __visible
  444 #endif
  445 
  446 /*
  447  * Assume alignment of return value.
  448  */
  449 #ifndef __assume_aligned
  450 #define __assume_aligned(a, ...)
  451 #endif
  452 
  453 
  454 /* Are two types/vars the same type (ignoring qualifiers)? */
  455 #ifndef __same_type
  456 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
  457 #endif
  458 
  459 /* Is this type a native word size -- useful for atomic operations */
  460 #ifndef __native_word
  461 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
  462 #endif
  463 
  464 /* Compile time object size, -1 for unknown */
  465 #ifndef __compiletime_object_size
  466 # define __compiletime_object_size(obj) -1
  467 #endif
  468 #ifndef __compiletime_warning
  469 # define __compiletime_warning(message)
  470 #endif
  471 #ifndef __compiletime_error
  472 # define __compiletime_error(message)
  473 /*
  474  * Sparse complains of variable sized arrays due to the temporary variable in
  475  * __compiletime_assert. Unfortunately we can't just expand it out to make
  476  * sparse see a constant array size without breaking compiletime_assert on old
  477  * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
  478  */
  479 # ifndef __CHECKER__
  480 #  define __compiletime_error_fallback(condition) \
  481 	do {  } while (0)
  482 # endif
  483 #endif
  484 #ifndef __compiletime_error_fallback
  485 # define __compiletime_error_fallback(condition) do { } while (0)
  486 #endif
  487 
  488 #define __compiletime_assert(condition, msg, prefix, suffix)		\
  489 	do {								\
  490 		bool __cond = !(condition);				\
  491 		extern void prefix ## suffix(void) __compiletime_error(msg); \
  492 		if (__cond)						\
  493 			prefix ## suffix();				\
  494 		__compiletime_error_fallback(__cond);			\
  495 	} while (0)
  496 
  497 #define _compiletime_assert(condition, msg, prefix, suffix) \
  498 	__compiletime_assert(condition, msg, prefix, suffix)
  499 
  500 /**
  501  * compiletime_assert - break build and emit msg if condition is false
  502  * @condition: a compile-time constant condition to check
  503  * @msg:       a message to emit if condition is false
  504  *
  505  * In tradition of POSIX assert, this macro will break the build if the
  506  * supplied condition is *false*, emitting the supplied error message if the
  507  * compiler has support to do so.
  508  */
  509 #define compiletime_assert(condition, msg) \
  510 	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
  511 
  512 #define compiletime_assert_atomic_type(t)				\
  513 	compiletime_assert(__native_word(t),				\
  514 		"Need native word sized stores/loads for atomicity.")
  515 
  516 /*
  517  * Prevent the compiler from merging or refetching accesses.  The compiler
  518  * is also forbidden from reordering successive instances of ACCESS_ONCE(),
  519  * but only when the compiler is aware of some particular ordering.  One way
  520  * to make the compiler aware of ordering is to put the two invocations of
  521  * ACCESS_ONCE() in different C statements.
  522  *
  523  * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
  524  * on a union member will work as long as the size of the member matches the
  525  * size of the union and the size is smaller than word size.
  526  *
  527  * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
  528  * between process-level code and irq/NMI handlers, all running on the same CPU,
  529  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
  530  * mutilate accesses that either do not require ordering or that interact
  531  * with an explicit memory barrier or atomic instruction that provides the
  532  * required ordering.
  533  *
  534  * If possible use READ_ONCE()/WRITE_ONCE() instead.
  535  */
  536 #define __ACCESS_ONCE(x) ({ \
  537 	 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
  538 	(volatile typeof(x) *)&(x); })
  539 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
  540 
  541 /**
  542  * lockless_dereference() - safely load a pointer for later dereference
  543  * @p: The pointer to load
  544  *
  545  * Similar to rcu_dereference(), but for situations where the pointed-to
  546  * object's lifetime is managed by something other than RCU.  That
  547  * "something other" might be reference counting or simple immortality.
  548  */
  549 #define lockless_dereference(p) \
  550 ({ \
  551 	typeof(p) _________p1 = READ_ONCE(p); \
  552 	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
  553 	(_________p1); \
  554 })
  555 
  556 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
  557 #ifdef CONFIG_KPROBES
  558 # define __kprobes	__attribute__((__section__(".kprobes.text")))
  559 # define nokprobe_inline	__always_inline
  560 #else
  561 # define __kprobes
  562 # define nokprobe_inline	inline
  563 #endif
  564 #endif /* __LINUX_COMPILER_H */                 1 /*
    2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
    3  *		operating system.  NET  is implemented using the  BSD Socket
    4  *		interface as the means of communication with the user level.
    5  *
    6  *		Definitions for the Ethernet handlers.
    7  *
    8  * Version:	@(#)eth.h	1.0.4	05/13/93
    9  *
   10  * Authors:	Ross Biro
   11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
   12  *
   13  *		Relocated to include/linux where it belongs by Alan Cox 
   14  *							<gw4pts@gw4pts.ampr.org>
   15  *
   16  *		This program is free software; you can redistribute it and/or
   17  *		modify it under the terms of the GNU General Public License
   18  *		as published by the Free Software Foundation; either version
   19  *		2 of the License, or (at your option) any later version.
   20  *
   21  */
   22 #ifndef _LINUX_ETHERDEVICE_H
   23 #define _LINUX_ETHERDEVICE_H
   24 
   25 #include <linux/if_ether.h>
   26 #include <linux/netdevice.h>
   27 #include <linux/random.h>
   28 #include <asm/unaligned.h>
   29 #include <asm/bitsperlong.h>
   30 
   31 #ifdef __KERNEL__
   32 struct device;
   33 int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
   34 unsigned char *arch_get_platform_get_mac_address(void);
   35 u32 eth_get_headlen(void *data, unsigned int max_len);
   36 __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
   37 extern const struct header_ops eth_header_ops;
   38 
   39 int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
   40 	       const void *daddr, const void *saddr, unsigned len);
   41 int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
   42 int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
   43 		     __be16 type);
   44 void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
   45 			     const unsigned char *haddr);
   46 int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
   47 void eth_commit_mac_addr_change(struct net_device *dev, void *p);
   48 int eth_mac_addr(struct net_device *dev, void *p);
   49 int eth_change_mtu(struct net_device *dev, int new_mtu);
   50 int eth_validate_addr(struct net_device *dev);
   51 
   52 struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
   53 					    unsigned int rxqs);
   54 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
   55 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
   56 
   57 struct sk_buff **eth_gro_receive(struct sk_buff **head,
   58 				 struct sk_buff *skb);
   59 int eth_gro_complete(struct sk_buff *skb, int nhoff);
   60 
   61 /* Reserved Ethernet Addresses per IEEE 802.1Q */
   62 static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
   63 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
   64 
   65 /**
   66  * is_link_local_ether_addr - Determine if given Ethernet address is link-local
   67  * @addr: Pointer to a six-byte array containing the Ethernet address
   68  *
   69  * Return true if address is link local reserved addr (01:80:c2:00:00:0X) per
   70  * IEEE 802.1Q 8.6.3 Frame filtering.
   71  *
   72  * Please note: addr must be aligned to u16.
   73  */
   74 static inline bool is_link_local_ether_addr(const u8 *addr)
   75 {
   76 	__be16 *a = (__be16 *)addr;
   77 	static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
   78 	static const __be16 m = cpu_to_be16(0xfff0);
   79 
   80 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
   81 	return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
   82 		(__force int)((a[2] ^ b[2]) & m)) == 0;
   83 #else
   84 	return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
   85 #endif
   86 }
   87 
   88 /**
   89  * is_zero_ether_addr - Determine if give Ethernet address is all zeros.
   90  * @addr: Pointer to a six-byte array containing the Ethernet address
   91  *
   92  * Return true if the address is all zeroes.
   93  *
   94  * Please note: addr must be aligned to u16.
   95  */
   96 static inline bool is_zero_ether_addr(const u8 *addr)
   97 {
   98 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
   99 	return ((*(const u32 *)addr) | (*(const u16 *)(addr + 4))) == 0;
  100 #else
  101 	return (*(const u16 *)(addr + 0) |
  102 		*(const u16 *)(addr + 2) |
  103 		*(const u16 *)(addr + 4)) == 0;
  104 #endif
  105 }
  106 
  107 /**
  108  * is_multicast_ether_addr - Determine if the Ethernet address is a multicast.
  109  * @addr: Pointer to a six-byte array containing the Ethernet address
  110  *
  111  * Return true if the address is a multicast address.
  112  * By definition the broadcast address is also a multicast address.
  113  */
  114 static inline bool is_multicast_ether_addr(const u8 *addr)
  115 {
  116 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  117 	u32 a = *(const u32 *)addr;
  118 #else
  119 	u16 a = *(const u16 *)addr;
  120 #endif
  121 #ifdef __BIG_ENDIAN
  122 	return 0x01 & (a >> ((sizeof(a) * 8) - 8));
  123 #else
  124 	return 0x01 & a;
  125 #endif
  126 }
  127 
  128 static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
  129 {
  130 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  131 #ifdef __BIG_ENDIAN
  132 	return 0x01 & ((*(const u64 *)addr) >> 56);
  133 #else
  134 	return 0x01 & (*(const u64 *)addr);
  135 #endif
  136 #else
  137 	return is_multicast_ether_addr(addr);
  138 #endif
  139 }
  140 
  141 /**
  142  * is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802).
  143  * @addr: Pointer to a six-byte array containing the Ethernet address
  144  *
  145  * Return true if the address is a local address.
  146  */
  147 static inline bool is_local_ether_addr(const u8 *addr)
  148 {
  149 	return 0x02 & addr[0];
  150 }
  151 
  152 /**
  153  * is_broadcast_ether_addr - Determine if the Ethernet address is broadcast
  154  * @addr: Pointer to a six-byte array containing the Ethernet address
  155  *
  156  * Return true if the address is the broadcast address.
  157  *
  158  * Please note: addr must be aligned to u16.
  159  */
  160 static inline bool is_broadcast_ether_addr(const u8 *addr)
  161 {
  162 	return (*(const u16 *)(addr + 0) &
  163 		*(const u16 *)(addr + 2) &
  164 		*(const u16 *)(addr + 4)) == 0xffff;
  165 }
  166 
  167 /**
  168  * is_unicast_ether_addr - Determine if the Ethernet address is unicast
  169  * @addr: Pointer to a six-byte array containing the Ethernet address
  170  *
  171  * Return true if the address is a unicast address.
  172  */
  173 static inline bool is_unicast_ether_addr(const u8 *addr)
  174 {
  175 	return !is_multicast_ether_addr(addr);
  176 }
  177 
  178 /**
  179  * is_valid_ether_addr - Determine if the given Ethernet address is valid
  180  * @addr: Pointer to a six-byte array containing the Ethernet address
  181  *
  182  * Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not
  183  * a multicast address, and is not FF:FF:FF:FF:FF:FF.
  184  *
  185  * Return true if the address is valid.
  186  *
  187  * Please note: addr must be aligned to u16.
  188  */
  189 static inline bool is_valid_ether_addr(const u8 *addr)
  190 {
  191 	/* FF:FF:FF:FF:FF:FF is a multicast address so we don't need to
  192 	 * explicitly check for it here. */
  193 	return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
  194 }
  195 
  196 /**
  197  * eth_proto_is_802_3 - Determine if a given Ethertype/length is a protocol
  198  * @proto: Ethertype/length value to be tested
  199  *
  200  * Check that the value from the Ethertype/length field is a valid Ethertype.
  201  *
  202  * Return true if the valid is an 802.3 supported Ethertype.
  203  */
  204 static inline bool eth_proto_is_802_3(__be16 proto)
  205 {
  206 #ifndef __BIG_ENDIAN
  207 	/* if CPU is little endian mask off bits representing LSB */
  208 	proto &= htons(0xFF00);
  209 #endif
  210 	/* cast both to u16 and compare since LSB can be ignored */
  211 	return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN);
  212 }
  213 
  214 /**
  215  * eth_random_addr - Generate software assigned random Ethernet address
  216  * @addr: Pointer to a six-byte array containing the Ethernet address
  217  *
  218  * Generate a random Ethernet address (MAC) that is not multicast
  219  * and has the local assigned bit set.
  220  */
  221 static inline void eth_random_addr(u8 *addr)
  222 {
  223 	get_random_bytes(addr, ETH_ALEN);
  224 	addr[0] &= 0xfe;	/* clear multicast bit */
  225 	addr[0] |= 0x02;	/* set local assignment bit (IEEE802) */
  226 }
  227 
  228 #define random_ether_addr(addr) eth_random_addr(addr)
  229 
  230 /**
  231  * eth_broadcast_addr - Assign broadcast address
  232  * @addr: Pointer to a six-byte array containing the Ethernet address
  233  *
  234  * Assign the broadcast address to the given address array.
  235  */
  236 static inline void eth_broadcast_addr(u8 *addr)
  237 {
  238 	memset(addr, 0xff, ETH_ALEN);
  239 }
  240 
  241 /**
  242  * eth_zero_addr - Assign zero address
  243  * @addr: Pointer to a six-byte array containing the Ethernet address
  244  *
  245  * Assign the zero address to the given address array.
  246  */
  247 static inline void eth_zero_addr(u8 *addr)
  248 {
  249 	memset(addr, 0x00, ETH_ALEN);
  250 }
  251 
  252 /**
  253  * eth_hw_addr_random - Generate software assigned random Ethernet and
  254  * set device flag
  255  * @dev: pointer to net_device structure
  256  *
  257  * Generate a random Ethernet address (MAC) to be used by a net device
  258  * and set addr_assign_type so the state can be read by sysfs and be
  259  * used by userspace.
  260  */
  261 static inline void eth_hw_addr_random(struct net_device *dev)
  262 {
  263 	dev->addr_assign_type = NET_ADDR_RANDOM;
  264 	eth_random_addr(dev->dev_addr);
  265 }
  266 
  267 /**
  268  * ether_addr_copy - Copy an Ethernet address
  269  * @dst: Pointer to a six-byte array Ethernet address destination
  270  * @src: Pointer to a six-byte array Ethernet address source
  271  *
  272  * Please note: dst & src must both be aligned to u16.
  273  */
  274 static inline void ether_addr_copy(u8 *dst, const u8 *src)
  275 {
  276 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  277 	*(u32 *)dst = *(const u32 *)src;
  278 	*(u16 *)(dst + 4) = *(const u16 *)(src + 4);
  279 #else
  280 	u16 *a = (u16 *)dst;
  281 	const u16 *b = (const u16 *)src;
  282 
  283 	a[0] = b[0];
  284 	a[1] = b[1];
  285 	a[2] = b[2];
  286 #endif
  287 }
  288 
  289 /**
  290  * eth_hw_addr_inherit - Copy dev_addr from another net_device
  291  * @dst: pointer to net_device to copy dev_addr to
  292  * @src: pointer to net_device to copy dev_addr from
  293  *
  294  * Copy the Ethernet address from one net_device to another along with
  295  * the address attributes (addr_assign_type).
  296  */
  297 static inline void eth_hw_addr_inherit(struct net_device *dst,
  298 				       struct net_device *src)
  299 {
  300 	dst->addr_assign_type = src->addr_assign_type;
  301 	ether_addr_copy(dst->dev_addr, src->dev_addr);
  302 }
  303 
  304 /**
  305  * ether_addr_equal - Compare two Ethernet addresses
  306  * @addr1: Pointer to a six-byte array containing the Ethernet address
  307  * @addr2: Pointer other six-byte array containing the Ethernet address
  308  *
  309  * Compare two Ethernet addresses, returns true if equal
  310  *
  311  * Please note: addr1 & addr2 must both be aligned to u16.
  312  */
  313 static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
  314 {
  315 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  316 	u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) |
  317 		   ((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4)));
  318 
  319 	return fold == 0;
  320 #else
  321 	const u16 *a = (const u16 *)addr1;
  322 	const u16 *b = (const u16 *)addr2;
  323 
  324 	return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
  325 #endif
  326 }
  327 
  328 /**
  329  * ether_addr_equal_64bits - Compare two Ethernet addresses
  330  * @addr1: Pointer to an array of 8 bytes
  331  * @addr2: Pointer to an other array of 8 bytes
  332  *
  333  * Compare two Ethernet addresses, returns true if equal, false otherwise.
  334  *
  335  * The function doesn't need any conditional branches and possibly uses
  336  * word memory accesses on CPU allowing cheap unaligned memory reads.
  337  * arrays = { byte1, byte2, byte3, byte4, byte5, byte6, pad1, pad2 }
  338  *
  339  * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
  340  */
  341 
  342 static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
  343 					   const u8 addr2[6+2])
  344 {
  345 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  346 	u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
  347 
  348 #ifdef __BIG_ENDIAN
  349 	return (fold >> 16) == 0;
  350 #else
  351 	return (fold << 16) == 0;
  352 #endif
  353 #else
  354 	return ether_addr_equal(addr1, addr2);
  355 #endif
  356 }
  357 
  358 /**
  359  * ether_addr_equal_unaligned - Compare two not u16 aligned Ethernet addresses
  360  * @addr1: Pointer to a six-byte array containing the Ethernet address
  361  * @addr2: Pointer other six-byte array containing the Ethernet address
  362  *
  363  * Compare two Ethernet addresses, returns true if equal
  364  *
  365  * Please note: Use only when any Ethernet address may not be u16 aligned.
  366  */
  367 static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2)
  368 {
  369 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  370 	return ether_addr_equal(addr1, addr2);
  371 #else
  372 	return memcmp(addr1, addr2, ETH_ALEN) == 0;
  373 #endif
  374 }
  375 
  376 /**
  377  * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
  378  * @dev: Pointer to a device structure
  379  * @addr: Pointer to a six-byte array containing the Ethernet address
  380  *
  381  * Compare passed address with all addresses of the device. Return true if the
  382  * address if one of the device addresses.
  383  *
  384  * Note that this function calls ether_addr_equal_64bits() so take care of
  385  * the right padding.
  386  */
  387 static inline bool is_etherdev_addr(const struct net_device *dev,
  388 				    const u8 addr[6 + 2])
  389 {
  390 	struct netdev_hw_addr *ha;
  391 	bool res = false;
  392 
  393 	rcu_read_lock();
  394 	for_each_dev_addr(dev, ha) {
  395 		res = ether_addr_equal_64bits(addr, ha->addr);
  396 		if (res)
  397 			break;
  398 	}
  399 	rcu_read_unlock();
  400 	return res;
  401 }
  402 #endif	/* __KERNEL__ */
  403 
  404 /**
  405  * compare_ether_header - Compare two Ethernet headers
  406  * @a: Pointer to Ethernet header
  407  * @b: Pointer to Ethernet header
  408  *
  409  * Compare two Ethernet headers, returns 0 if equal.
  410  * This assumes that the network header (i.e., IP header) is 4-byte
  411  * aligned OR the platform can handle unaligned access.  This is the
  412  * case for all packets coming into netif_receive_skb or similar
  413  * entry points.
  414  */
  415 
  416 static inline unsigned long compare_ether_header(const void *a, const void *b)
  417 {
  418 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  419 	unsigned long fold;
  420 
  421 	/*
  422 	 * We want to compare 14 bytes:
  423 	 *  [a0 ... a13] ^ [b0 ... b13]
  424 	 * Use two long XOR, ORed together, with an overlap of two bytes.
  425 	 *  [a0  a1  a2  a3  a4  a5  a6  a7 ] ^ [b0  b1  b2  b3  b4  b5  b6  b7 ] |
  426 	 *  [a6  a7  a8  a9  a10 a11 a12 a13] ^ [b6  b7  b8  b9  b10 b11 b12 b13]
  427 	 * This means the [a6 a7] ^ [b6 b7] part is done two times.
  428 	*/
  429 	fold = *(unsigned long *)a ^ *(unsigned long *)b;
  430 	fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6);
  431 	return fold;
  432 #else
  433 	u32 *a32 = (u32 *)((u8 *)a + 2);
  434 	u32 *b32 = (u32 *)((u8 *)b + 2);
  435 
  436 	return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
  437 	       (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
  438 #endif
  439 }
  440 
  441 /**
  442  * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
  443  * @skb: Buffer to pad
  444  *
  445  * An Ethernet frame should have a minimum size of 60 bytes.  This function
  446  * takes short frames and pads them with zeros up to the 60 byte limit.
  447  */
  448 static inline int eth_skb_pad(struct sk_buff *skb)
  449 {
  450 	return skb_put_padto(skb, ETH_ZLEN);
  451 }
  452 
  453 #endif	/* _LINUX_ETHERDEVICE_H */                 1 /*
    2  * Read-Copy Update mechanism for mutual exclusion
    3  *
    4  * This program is free software; you can redistribute it and/or modify
    5  * it under the terms of the GNU General Public License as published by
    6  * the Free Software Foundation; either version 2 of the License, or
    7  * (at your option) any later version.
    8  *
    9  * This program is distributed in the hope that it will be useful,
   10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   12  * GNU General Public License for more details.
   13  *
   14  * You should have received a copy of the GNU General Public License
   15  * along with this program; if not, you can access it online at
   16  * http://www.gnu.org/licenses/gpl-2.0.html.
   17  *
   18  * Copyright IBM Corporation, 2001
   19  *
   20  * Author: Dipankar Sarma <dipankar@in.ibm.com>
   21  *
   22  * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
   23  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
   24  * Papers:
   25  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
   26  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
   27  *
   28  * For detailed explanation of Read-Copy Update mechanism see -
   29  *		http://lse.sourceforge.net/locking/rcupdate.html
   30  *
   31  */
   32 
   33 #ifndef __LINUX_RCUPDATE_H
   34 #define __LINUX_RCUPDATE_H
   35 
   36 #include <linux/types.h>
   37 #include <linux/cache.h>
   38 #include <linux/spinlock.h>
   39 #include <linux/threads.h>
   40 #include <linux/cpumask.h>
   41 #include <linux/seqlock.h>
   42 #include <linux/lockdep.h>
   43 #include <linux/completion.h>
   44 #include <linux/debugobjects.h>
   45 #include <linux/bug.h>
   46 #include <linux/compiler.h>
   47 #include <linux/ktime.h>
   48 
   49 #include <asm/barrier.h>
   50 
   51 #ifndef CONFIG_TINY_RCU
   52 extern int rcu_expedited; /* for sysctl */
   53 extern int rcu_normal;    /* also for sysctl */
   54 #endif /* #ifndef CONFIG_TINY_RCU */
   55 
   56 #ifdef CONFIG_TINY_RCU
   57 /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
   58 static inline bool rcu_gp_is_normal(void)  /* Internal RCU use. */
   59 {
   60 	return true;
   61 }
   62 static inline bool rcu_gp_is_expedited(void)  /* Internal RCU use. */
   63 {
   64 	return false;
   65 }
   66 
   67 static inline void rcu_expedite_gp(void)
   68 {
   69 }
   70 
   71 static inline void rcu_unexpedite_gp(void)
   72 {
   73 }
   74 #else /* #ifdef CONFIG_TINY_RCU */
   75 bool rcu_gp_is_normal(void);     /* Internal RCU use. */
   76 bool rcu_gp_is_expedited(void);  /* Internal RCU use. */
   77 void rcu_expedite_gp(void);
   78 void rcu_unexpedite_gp(void);
   79 #endif /* #else #ifdef CONFIG_TINY_RCU */
   80 
   81 enum rcutorture_type {
   82 	RCU_FLAVOR,
   83 	RCU_BH_FLAVOR,
   84 	RCU_SCHED_FLAVOR,
   85 	RCU_TASKS_FLAVOR,
   86 	SRCU_FLAVOR,
   87 	INVALID_RCU_FLAVOR
   88 };
   89 
   90 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
   91 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
   92 			    unsigned long *gpnum, unsigned long *completed);
   93 void rcutorture_record_test_transition(void);
   94 void rcutorture_record_progress(unsigned long vernum);
   95 void do_trace_rcu_torture_read(const char *rcutorturename,
   96 			       struct rcu_head *rhp,
   97 			       unsigned long secs,
   98 			       unsigned long c_old,
   99 			       unsigned long c);
  100 #else
  101 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
  102 					  int *flags,
  103 					  unsigned long *gpnum,
  104 					  unsigned long *completed)
  105 {
  106 	*flags = 0;
  107 	*gpnum = 0;
  108 	*completed = 0;
  109 }
  110 static inline void rcutorture_record_test_transition(void)
  111 {
  112 }
  113 static inline void rcutorture_record_progress(unsigned long vernum)
  114 {
  115 }
  116 #ifdef CONFIG_RCU_TRACE
  117 void do_trace_rcu_torture_read(const char *rcutorturename,
  118 			       struct rcu_head *rhp,
  119 			       unsigned long secs,
  120 			       unsigned long c_old,
  121 			       unsigned long c);
  122 #else
  123 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
  124 	do { } while (0)
  125 #endif
  126 #endif
  127 
  128 #define UINT_CMP_GE(a, b)	(UINT_MAX / 2 >= (a) - (b))
  129 #define UINT_CMP_LT(a, b)	(UINT_MAX / 2 < (a) - (b))
  130 #define ULONG_CMP_GE(a, b)	(ULONG_MAX / 2 >= (a) - (b))
  131 #define ULONG_CMP_LT(a, b)	(ULONG_MAX / 2 < (a) - (b))
  132 #define ulong2long(a)		(*(long *)(&(a)))
  133 
  134 /* Exported common interfaces */
  135 
  136 #ifdef CONFIG_PREEMPT_RCU
  137 
  138 /**
  139  * call_rcu() - Queue an RCU callback for invocation after a grace period.
  140  * @head: structure to be used for queueing the RCU updates.
  141  * @func: actual callback function to be invoked after the grace period
  142  *
  143  * The callback function will be invoked some time after a full grace
  144  * period elapses, in other words after all pre-existing RCU read-side
  145  * critical sections have completed.  However, the callback function
  146  * might well execute concurrently with RCU read-side critical sections
  147  * that started after call_rcu() was invoked.  RCU read-side critical
  148  * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
  149  * and may be nested.
  150  *
  151  * Note that all CPUs must agree that the grace period extended beyond
  152  * all pre-existing RCU read-side critical section.  On systems with more
  153  * than one CPU, this means that when "func()" is invoked, each CPU is
  154  * guaranteed to have executed a full memory barrier since the end of its
  155  * last RCU read-side critical section whose beginning preceded the call
  156  * to call_rcu().  It also means that each CPU executing an RCU read-side
  157  * critical section that continues beyond the start of "func()" must have
  158  * executed a memory barrier after the call_rcu() but before the beginning
  159  * of that RCU read-side critical section.  Note that these guarantees
  160  * include CPUs that are offline, idle, or executing in user mode, as
  161  * well as CPUs that are executing in the kernel.
  162  *
  163  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
  164  * resulting RCU callback function "func()", then both CPU A and CPU B are
  165  * guaranteed to execute a full memory barrier during the time interval
  166  * between the call to call_rcu() and the invocation of "func()" -- even
  167  * if CPU A and CPU B are the same CPU (but again only if the system has
  168  * more than one CPU).
  169  */
  170 void call_rcu(struct rcu_head *head,
  171 	      rcu_callback_t func);
  172 
  173 #else /* #ifdef CONFIG_PREEMPT_RCU */
  174 
  175 /* In classic RCU, call_rcu() is just call_rcu_sched(). */
  176 #define	call_rcu	call_rcu_sched
  177 
  178 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
  179 
  180 /**
  181  * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
  182  * @head: structure to be used for queueing the RCU updates.
  183  * @func: actual callback function to be invoked after the grace period
  184  *
  185  * The callback function will be invoked some time after a full grace
  186  * period elapses, in other words after all currently executing RCU
  187  * read-side critical sections have completed. call_rcu_bh() assumes
  188  * that the read-side critical sections end on completion of a softirq
  189  * handler. This means that read-side critical sections in process
  190  * context must not be interrupted by softirqs. This interface is to be
  191  * used when most of the read-side critical sections are in softirq context.
  192  * RCU read-side critical sections are delimited by :
  193  *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context.
  194  *  OR
  195  *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
  196  *  These may be nested.
  197  *
  198  * See the description of call_rcu() for more detailed information on
  199  * memory ordering guarantees.
  200  */
  201 void call_rcu_bh(struct rcu_head *head,
  202 		 rcu_callback_t func);
  203 
  204 /**
  205  * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
  206  * @head: structure to be used for queueing the RCU updates.
  207  * @func: actual callback function to be invoked after the grace period
  208  *
  209  * The callback function will be invoked some time after a full grace
  210  * period elapses, in other words after all currently executing RCU
  211  * read-side critical sections have completed. call_rcu_sched() assumes
  212  * that the read-side critical sections end on enabling of preemption
  213  * or on voluntary preemption.
  214  * RCU read-side critical sections are delimited by :
  215  *  - rcu_read_lock_sched() and  rcu_read_unlock_sched(),
  216  *  OR
  217  *  anything that disables preemption.
  218  *  These may be nested.
  219  *
  220  * See the description of call_rcu() for more detailed information on
  221  * memory ordering guarantees.
  222  */
  223 void call_rcu_sched(struct rcu_head *head,
  224 		    rcu_callback_t func);
  225 
  226 void synchronize_sched(void);
  227 
  228 /*
  229  * Structure allowing asynchronous waiting on RCU.
  230  */
  231 struct rcu_synchronize {
  232 	struct rcu_head head;
  233 	struct completion completion;
  234 };
  235 void wakeme_after_rcu(struct rcu_head *head);
  236 
  237 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
  238 		   struct rcu_synchronize *rs_array);
  239 
  240 #define _wait_rcu_gp(checktiny, ...) \
  241 do {									\
  242 	call_rcu_func_t __crcu_array[] = { __VA_ARGS__ };		\
  243 	struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)];	\
  244 	__wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array),		\
  245 			__crcu_array, __rs_array);			\
  246 } while (0)
  247 
  248 #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
  249 
  250 /**
  251  * synchronize_rcu_mult - Wait concurrently for multiple grace periods
  252  * @...: List of call_rcu() functions for the flavors to wait on.
  253  *
  254  * This macro waits concurrently for multiple flavors of RCU grace periods.
  255  * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait
  256  * on concurrent RCU and RCU-bh grace periods.  Waiting on a give SRCU
  257  * domain requires you to write a wrapper function for that SRCU domain's
  258  * call_srcu() function, supplying the corresponding srcu_struct.
  259  *
  260  * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU
  261  * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called
  262  * is automatically a grace period.
  263  */
  264 #define synchronize_rcu_mult(...) \
  265 	_wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
  266 
  267 /**
  268  * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
  269  * @head: structure to be used for queueing the RCU updates.
  270  * @func: actual callback function to be invoked after the grace period
  271  *
  272  * The callback function will be invoked some time after a full grace
  273  * period elapses, in other words after all currently executing RCU
  274  * read-side critical sections have completed. call_rcu_tasks() assumes
  275  * that the read-side critical sections end at a voluntary context
  276  * switch (not a preemption!), entry into idle, or transition to usermode
  277  * execution.  As such, there are no read-side primitives analogous to
  278  * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
  279  * to determine that all tasks have passed through a safe state, not so
  280  * much for data-strcuture synchronization.
  281  *
  282  * See the description of call_rcu() for more detailed information on
  283  * memory ordering guarantees.
  284  */
  285 void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
  286 void synchronize_rcu_tasks(void);
  287 void rcu_barrier_tasks(void);
  288 
  289 #ifdef CONFIG_PREEMPT_RCU
  290 
  291 void __rcu_read_lock(void);
  292 void __rcu_read_unlock(void);
  293 void rcu_read_unlock_special(struct task_struct *t);
  294 void synchronize_rcu(void);
  295 
  296 /*
  297  * Defined as a macro as it is a very low level header included from
  298  * areas that don't even know about current.  This gives the rcu_read_lock()
  299  * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
  300  * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
  301  */
  302 #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
  303 
  304 #else /* #ifdef CONFIG_PREEMPT_RCU */
  305 
  306 static inline void __rcu_read_lock(void)
  307 {
  308 	if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
  309 		preempt_disable();
  310 }
  311 
  312 static inline void __rcu_read_unlock(void)
  313 {
  314 	if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
  315 		preempt_enable();
  316 }
  317 
  318 static inline void synchronize_rcu(void)
  319 {
  320 	synchronize_sched();
  321 }
  322 
  323 static inline int rcu_preempt_depth(void)
  324 {
  325 	return 0;
  326 }
  327 
  328 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
  329 
  330 /* Internal to kernel */
  331 void rcu_init(void);
  332 void rcu_sched_qs(void);
  333 void rcu_bh_qs(void);
  334 void rcu_check_callbacks(int user);
  335 void rcu_report_dead(unsigned int cpu);
  336 
  337 #ifndef CONFIG_TINY_RCU
  338 void rcu_end_inkernel_boot(void);
  339 #else /* #ifndef CONFIG_TINY_RCU */
  340 static inline void rcu_end_inkernel_boot(void) { }
  341 #endif /* #ifndef CONFIG_TINY_RCU */
  342 
  343 #ifdef CONFIG_RCU_STALL_COMMON
  344 void rcu_sysrq_start(void);
  345 void rcu_sysrq_end(void);
  346 #else /* #ifdef CONFIG_RCU_STALL_COMMON */
  347 static inline void rcu_sysrq_start(void)
  348 {
  349 }
  350 static inline void rcu_sysrq_end(void)
  351 {
  352 }
  353 #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
  354 
  355 #ifdef CONFIG_NO_HZ_FULL
  356 void rcu_user_enter(void);
  357 void rcu_user_exit(void);
  358 #else
  359 static inline void rcu_user_enter(void) { }
  360 static inline void rcu_user_exit(void) { }
  361 #endif /* CONFIG_NO_HZ_FULL */
  362 
  363 #ifdef CONFIG_RCU_NOCB_CPU
  364 void rcu_init_nohz(void);
  365 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
  366 static inline void rcu_init_nohz(void)
  367 {
  368 }
  369 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
  370 
  371 /**
  372  * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
  373  * @a: Code that RCU needs to pay attention to.
  374  *
  375  * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
  376  * in the inner idle loop, that is, between the rcu_idle_enter() and
  377  * the rcu_idle_exit() -- RCU will happily ignore any such read-side
  378  * critical sections.  However, things like powertop need tracepoints
  379  * in the inner idle loop.
  380  *
  381  * This macro provides the way out:  RCU_NONIDLE(do_something_with_RCU())
  382  * will tell RCU that it needs to pay attending, invoke its argument
  383  * (in this example, a call to the do_something_with_RCU() function),
  384  * and then tell RCU to go back to ignoring this CPU.  It is permissible
  385  * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
  386  * quite limited.  If deeper nesting is required, it will be necessary
  387  * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
  388  */
  389 #define RCU_NONIDLE(a) \
  390 	do { \
  391 		rcu_irq_enter_irqson(); \
  392 		do { a; } while (0); \
  393 		rcu_irq_exit_irqson(); \
  394 	} while (0)
  395 
  396 /*
  397  * Note a voluntary context switch for RCU-tasks benefit.  This is a
  398  * macro rather than an inline function to avoid #include hell.
  399  */
  400 #ifdef CONFIG_TASKS_RCU
  401 #define TASKS_RCU(x) x
  402 extern struct srcu_struct tasks_rcu_exit_srcu;
  403 #define rcu_note_voluntary_context_switch(t) \
  404 	do { \
  405 		rcu_all_qs(); \
  406 		if (READ_ONCE((t)->rcu_tasks_holdout)) \
  407 			WRITE_ONCE((t)->rcu_tasks_holdout, false); \
  408 	} while (0)
  409 #else /* #ifdef CONFIG_TASKS_RCU */
  410 #define TASKS_RCU(x) do { } while (0)
  411 #define rcu_note_voluntary_context_switch(t)	rcu_all_qs()
  412 #endif /* #else #ifdef CONFIG_TASKS_RCU */
  413 
  414 /**
  415  * cond_resched_rcu_qs - Report potential quiescent states to RCU
  416  *
  417  * This macro resembles cond_resched(), except that it is defined to
  418  * report potential quiescent states to RCU-tasks even if the cond_resched()
  419  * machinery were to be shut off, as some advocate for PREEMPT kernels.
  420  */
  421 #define cond_resched_rcu_qs() \
  422 do { \
  423 	if (!cond_resched()) \
  424 		rcu_note_voluntary_context_switch(current); \
  425 } while (0)
  426 
  427 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
  428 bool __rcu_is_watching(void);
  429 #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
  430 
  431 /*
  432  * Infrastructure to implement the synchronize_() primitives in
  433  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
  434  */
  435 
  436 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
  437 #include <linux/rcutree.h>
  438 #elif defined(CONFIG_TINY_RCU)
  439 #include <linux/rcutiny.h>
  440 #else
  441 #error "Unknown RCU implementation specified to kernel configuration"
  442 #endif
  443 
  444 /*
  445  * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
  446  * initialization and destruction of rcu_head on the stack. rcu_head structures
  447  * allocated dynamically in the heap or defined statically don't need any
  448  * initialization.
  449  */
  450 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
  451 void init_rcu_head(struct rcu_head *head);
  452 void destroy_rcu_head(struct rcu_head *head);
  453 void init_rcu_head_on_stack(struct rcu_head *head);
  454 void destroy_rcu_head_on_stack(struct rcu_head *head);
  455 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
  456 static inline void init_rcu_head(struct rcu_head *head)
  457 {
  458 }
  459 
  460 static inline void destroy_rcu_head(struct rcu_head *head)
  461 {
  462 }
  463 
  464 static inline void init_rcu_head_on_stack(struct rcu_head *head)
  465 {
  466 }
  467 
  468 static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
  469 {
  470 }
  471 #endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
  472 
  473 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
  474 bool rcu_lockdep_current_cpu_online(void);
  475 #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
  476 static inline bool rcu_lockdep_current_cpu_online(void)
  477 {
  478 	return true;
  479 }
  480 #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
  481 
  482 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  483 
  484 static inline void rcu_lock_acquire(struct lockdep_map *map)
  485 {
  486 	lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
  487 }
  488 
  489 static inline void rcu_lock_release(struct lockdep_map *map)
  490 {
  491 	lock_release(map, 1, _THIS_IP_);
  492 }
  493 
  494 extern struct lockdep_map rcu_lock_map;
  495 extern struct lockdep_map rcu_bh_lock_map;
  496 extern struct lockdep_map rcu_sched_lock_map;
  497 extern struct lockdep_map rcu_callback_map;
  498 int debug_lockdep_rcu_enabled(void);
  499 
  500 int rcu_read_lock_held(void);
  501 int rcu_read_lock_bh_held(void);
  502 
  503 /**
  504  * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
  505  *
  506  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
  507  * RCU-sched read-side critical section.  In absence of
  508  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
  509  * critical section unless it can prove otherwise.
  510  */
  511 int rcu_read_lock_sched_held(void);
  512 
  513 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  514 
  515 # define rcu_lock_acquire(a)		do { } while (0)
  516 # define rcu_lock_release(a)		do { } while (0)
  517 
  518 static inline int rcu_read_lock_held(void)
  519 {
  520 	return 1;
  521 }
  522 
  523 static inline int rcu_read_lock_bh_held(void)
  524 {
  525 	return 1;
  526 }
  527 
  528 static inline int rcu_read_lock_sched_held(void)
  529 {
  530 	return !preemptible();
  531 }
  532 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  533 
  534 #ifdef CONFIG_PROVE_RCU
  535 
  536 /**
  537  * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
  538  * @c: condition to check
  539  * @s: informative message
  540  */
  541 #define RCU_LOCKDEP_WARN(c, s)						\
  542 	do {								\
  543 		static bool __section(.data.unlikely) __warned;		\
  544 		if (debug_lockdep_rcu_enabled() && !__warned && (c)) {	\
  545 			__warned = true;				\
  546 			lockdep_rcu_suspicious(__FILE__, __LINE__, s);	\
  547 		}							\
  548 	} while (0)
  549 
  550 #if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
  551 static inline void rcu_preempt_sleep_check(void)
  552 {
  553 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
  554 			 "Illegal context switch in RCU read-side critical section");
  555 }
  556 #else /* #ifdef CONFIG_PROVE_RCU */
  557 static inline void rcu_preempt_sleep_check(void)
  558 {
  559 }
  560 #endif /* #else #ifdef CONFIG_PROVE_RCU */
  561 
  562 #define rcu_sleep_check()						\
  563 	do {								\
  564 		rcu_preempt_sleep_check();				\
  565 		RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),	\
  566 				 "Illegal context switch in RCU-bh read-side critical section"); \
  567 		RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),	\
  568 				 "Illegal context switch in RCU-sched read-side critical section"); \
  569 	} while (0)
  570 
  571 #else /* #ifdef CONFIG_PROVE_RCU */
  572 
  573 #define RCU_LOCKDEP_WARN(c, s) do { } while (0)
  574 #define rcu_sleep_check() do { } while (0)
  575 
  576 #endif /* #else #ifdef CONFIG_PROVE_RCU */
  577 
  578 /*
  579  * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
  580  * and rcu_assign_pointer().  Some of these could be folded into their
  581  * callers, but they are left separate in order to ease introduction of
  582  * multiple flavors of pointers to match the multiple flavors of RCU
  583  * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
  584  * the future.
  585  */
  586 
  587 #ifdef __CHECKER__
  588 #define rcu_dereference_sparse(p, space) \
  589 	((void)(((typeof(*p) space *)p) == p))
  590 #else /* #ifdef __CHECKER__ */
  591 #define rcu_dereference_sparse(p, space)
  592 #endif /* #else #ifdef __CHECKER__ */
  593 
  594 #define __rcu_access_pointer(p, space) \
  595 ({ \
  596 	typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
  597 	rcu_dereference_sparse(p, space); \
  598 	((typeof(*p) __force __kernel *)(_________p1)); \
  599 })
  600 #define __rcu_dereference_check(p, c, space) \
  601 ({ \
  602 	/* Dependency order vs. p above. */ \
  603 	typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
  604 	RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
  605 	rcu_dereference_sparse(p, space); \
  606 	((typeof(*p) __force __kernel *)(________p1)); \
  607 })
  608 #define __rcu_dereference_protected(p, c, space) \
  609 ({ \
  610 	RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
  611 	rcu_dereference_sparse(p, space); \
  612 	((typeof(*p) __force __kernel *)(p)); \
  613 })
  614 
  615 /**
  616  * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
  617  * @v: The value to statically initialize with.
  618  */
  619 #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
  620 
  621 /**
  622  * rcu_assign_pointer() - assign to RCU-protected pointer
  623  * @p: pointer to assign to
  624  * @v: value to assign (publish)
  625  *
  626  * Assigns the specified value to the specified RCU-protected
  627  * pointer, ensuring that any concurrent RCU readers will see
  628  * any prior initialization.
  629  *
  630  * Inserts memory barriers on architectures that require them
  631  * (which is most of them), and also prevents the compiler from
  632  * reordering the code that initializes the structure after the pointer
  633  * assignment.  More importantly, this call documents which pointers
  634  * will be dereferenced by RCU read-side code.
  635  *
  636  * In some special cases, you may use RCU_INIT_POINTER() instead
  637  * of rcu_assign_pointer().  RCU_INIT_POINTER() is a bit faster due
  638  * to the fact that it does not constrain either the CPU or the compiler.
  639  * That said, using RCU_INIT_POINTER() when you should have used
  640  * rcu_assign_pointer() is a very bad thing that results in
  641  * impossible-to-diagnose memory corruption.  So please be careful.
  642  * See the RCU_INIT_POINTER() comment header for details.
  643  *
  644  * Note that rcu_assign_pointer() evaluates each of its arguments only
  645  * once, appearances notwithstanding.  One of the "extra" evaluations
  646  * is in typeof() and the other visible only to sparse (__CHECKER__),
  647  * neither of which actually execute the argument.  As with most cpp
  648  * macros, this execute-arguments-only-once property is important, so
  649  * please be careful when making changes to rcu_assign_pointer() and the
  650  * other macros that it invokes.
  651  */
  652 #define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v))
  653 
  654 /**
  655  * rcu_access_pointer() - fetch RCU pointer with no dereferencing
  656  * @p: The pointer to read
  657  *
  658  * Return the value of the specified RCU-protected pointer, but omit the
  659  * smp_read_barrier_depends() and keep the READ_ONCE().  This is useful
  660  * when the value of this pointer is accessed, but the pointer is not
  661  * dereferenced, for example, when testing an RCU-protected pointer against
  662  * NULL.  Although rcu_access_pointer() may also be used in cases where
  663  * update-side locks prevent the value of the pointer from changing, you
  664  * should instead use rcu_dereference_protected() for this use case.
  665  *
  666  * It is also permissible to use rcu_access_pointer() when read-side
  667  * access to the pointer was removed at least one grace period ago, as
  668  * is the case in the context of the RCU callback that is freeing up
  669  * the data, or after a synchronize_rcu() returns.  This can be useful
  670  * when tearing down multi-linked structures after a grace period
  671  * has elapsed.
  672  */
  673 #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
  674 
  675 /**
  676  * rcu_dereference_check() - rcu_dereference with debug checking
  677  * @p: The pointer to read, prior to dereferencing
  678  * @c: The conditions under which the dereference will take place
  679  *
  680  * Do an rcu_dereference(), but check that the conditions under which the
  681  * dereference will take place are correct.  Typically the conditions
  682  * indicate the various locking conditions that should be held at that
  683  * point.  The check should return true if the conditions are satisfied.
  684  * An implicit check for being in an RCU read-side critical section
  685  * (rcu_read_lock()) is included.
  686  *
  687  * For example:
  688  *
  689  *	bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
  690  *
  691  * could be used to indicate to lockdep that foo->bar may only be dereferenced
  692  * if either rcu_read_lock() is held, or that the lock required to replace
  693  * the bar struct at foo->bar is held.
  694  *
  695  * Note that the list of conditions may also include indications of when a lock
  696  * need not be held, for example during initialisation or destruction of the
  697  * target struct:
  698  *
  699  *	bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
  700  *					      atomic_read(&foo->usage) == 0);
  701  *
  702  * Inserts memory barriers on architectures that require them
  703  * (currently only the Alpha), prevents the compiler from refetching
  704  * (and from merging fetches), and, more importantly, documents exactly
  705  * which pointers are protected by RCU and checks that the pointer is
  706  * annotated as __rcu.
  707  */
  708 #define rcu_dereference_check(p, c) \
  709 	__rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
  710 
  711 /**
  712  * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
  713  * @p: The pointer to read, prior to dereferencing
  714  * @c: The conditions under which the dereference will take place
  715  *
  716  * This is the RCU-bh counterpart to rcu_dereference_check().
  717  */
  718 #define rcu_dereference_bh_check(p, c) \
  719 	__rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
  720 
  721 /**
  722  * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
  723  * @p: The pointer to read, prior to dereferencing
  724  * @c: The conditions under which the dereference will take place
  725  *
  726  * This is the RCU-sched counterpart to rcu_dereference_check().
  727  */
  728 #define rcu_dereference_sched_check(p, c) \
  729 	__rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
  730 				__rcu)
  731 
  732 #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
  733 
  734 /*
  735  * The tracing infrastructure traces RCU (we want that), but unfortunately
  736  * some of the RCU checks causes tracing to lock up the system.
  737  *
  738  * The no-tracing version of rcu_dereference_raw() must not call
  739  * rcu_read_lock_held().
  740  */
  741 #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
  742 
  743 /**
  744  * rcu_dereference_protected() - fetch RCU pointer when updates prevented
  745  * @p: The pointer to read, prior to dereferencing
  746  * @c: The conditions under which the dereference will take place
  747  *
  748  * Return the value of the specified RCU-protected pointer, but omit
  749  * both the smp_read_barrier_depends() and the READ_ONCE().  This
  750  * is useful in cases where update-side locks prevent the value of the
  751  * pointer from changing.  Please note that this primitive does -not-
  752  * prevent the compiler from repeating this reference or combining it
  753  * with other references, so it should not be used without protection
  754  * of appropriate locks.
  755  *
  756  * This function is only for update-side use.  Using this function
  757  * when protected only by rcu_read_lock() will result in infrequent
  758  * but very ugly failures.
  759  */
  760 #define rcu_dereference_protected(p, c) \
  761 	__rcu_dereference_protected((p), (c), __rcu)
  762 
  763 
  764 /**
  765  * rcu_dereference() - fetch RCU-protected pointer for dereferencing
  766  * @p: The pointer to read, prior to dereferencing
  767  *
  768  * This is a simple wrapper around rcu_dereference_check().
  769  */
  770 #define rcu_dereference(p) rcu_dereference_check(p, 0)
  771 
  772 /**
  773  * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
  774  * @p: The pointer to read, prior to dereferencing
  775  *
  776  * Makes rcu_dereference_check() do the dirty work.
  777  */
  778 #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
  779 
  780 /**
  781  * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
  782  * @p: The pointer to read, prior to dereferencing
  783  *
  784  * Makes rcu_dereference_check() do the dirty work.
  785  */
  786 #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
  787 
  788 /**
  789  * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism
  790  * @p: The pointer to hand off
  791  *
  792  * This is simply an identity function, but it documents where a pointer
  793  * is handed off from RCU to some other synchronization mechanism, for
  794  * example, reference counting or locking.  In C11, it would map to
  795  * kill_dependency().  It could be used as follows:
  796  *
  797  *	rcu_read_lock();
  798  *	p = rcu_dereference(gp);
  799  *	long_lived = is_long_lived(p);
  800  *	if (long_lived) {
  801  *		if (!atomic_inc_not_zero(p->refcnt))
  802  *			long_lived = false;
  803  *		else
  804  *			p = rcu_pointer_handoff(p);
  805  *	}
  806  *	rcu_read_unlock();
  807  */
  808 #define rcu_pointer_handoff(p) (p)
  809 
  810 /**
  811  * rcu_read_lock() - mark the beginning of an RCU read-side critical section
  812  *
  813  * When synchronize_rcu() is invoked on one CPU while other CPUs
  814  * are within RCU read-side critical sections, then the
  815  * synchronize_rcu() is guaranteed to block until after all the other
  816  * CPUs exit their critical sections.  Similarly, if call_rcu() is invoked
  817  * on one CPU while other CPUs are within RCU read-side critical
  818  * sections, invocation of the corresponding RCU callback is deferred
  819  * until after the all the other CPUs exit their critical sections.
  820  *
  821  * Note, however, that RCU callbacks are permitted to run concurrently
  822  * with new RCU read-side critical sections.  One way that this can happen
  823  * is via the following sequence of events: (1) CPU 0 enters an RCU
  824  * read-side critical section, (2) CPU 1 invokes call_rcu() to register
  825  * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
  826  * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
  827  * callback is invoked.  This is legal, because the RCU read-side critical
  828  * section that was running concurrently with the call_rcu() (and which
  829  * therefore might be referencing something that the corresponding RCU
  830  * callback would free up) has completed before the corresponding
  831  * RCU callback is invoked.
  832  *
  833  * RCU read-side critical sections may be nested.  Any deferred actions
  834  * will be deferred until the outermost RCU read-side critical section
  835  * completes.
  836  *
  837  * You can avoid reading and understanding the next paragraph by
  838  * following this rule: don't put anything in an rcu_read_lock() RCU
  839  * read-side critical section that would block in a !PREEMPT kernel.
  840  * But if you want the full story, read on!
  841  *
  842  * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
  843  * it is illegal to block while in an RCU read-side critical section.
  844  * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT
  845  * kernel builds, RCU read-side critical sections may be preempted,
  846  * but explicit blocking is illegal.  Finally, in preemptible RCU
  847  * implementations in real-time (with -rt patchset) kernel builds, RCU
  848  * read-side critical sections may be preempted and they may also block, but
  849  * only when acquiring spinlocks that are subject to priority inheritance.
  850  */
  851 static inline void rcu_read_lock(void)
  852 {
  853 	__rcu_read_lock();
  854 	__acquire(RCU);
  855 	rcu_lock_acquire(&rcu_lock_map);
  856 	RCU_LOCKDEP_WARN(!rcu_is_watching(),
  857 			 "rcu_read_lock() used illegally while idle");
  858 }
  859 
  860 /*
  861  * So where is rcu_write_lock()?  It does not exist, as there is no
  862  * way for writers to lock out RCU readers.  This is a feature, not
  863  * a bug -- this property is what provides RCU's performance benefits.
  864  * Of course, writers must coordinate with each other.  The normal
  865  * spinlock primitives work well for this, but any other technique may be
  866  * used as well.  RCU does not care how the writers keep out of each
  867  * others' way, as long as they do so.
  868  */
  869 
  870 /**
  871  * rcu_read_unlock() - marks the end of an RCU read-side critical section.
  872  *
  873  * In most situations, rcu_read_unlock() is immune from deadlock.
  874  * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
  875  * is responsible for deboosting, which it does via rt_mutex_unlock().
  876  * Unfortunately, this function acquires the scheduler's runqueue and
  877  * priority-inheritance spinlocks.  This means that deadlock could result
  878  * if the caller of rcu_read_unlock() already holds one of these locks or
  879  * any lock that is ever acquired while holding them; or any lock which
  880  * can be taken from interrupt context because rcu_boost()->rt_mutex_lock()
  881  * does not disable irqs while taking ->wait_lock.
  882  *
  883  * That said, RCU readers are never priority boosted unless they were
  884  * preempted.  Therefore, one way to avoid deadlock is to make sure
  885  * that preemption never happens within any RCU read-side critical
  886  * section whose outermost rcu_read_unlock() is called with one of
  887  * rt_mutex_unlock()'s locks held.  Such preemption can be avoided in
  888  * a number of ways, for example, by invoking preempt_disable() before
  889  * critical section's outermost rcu_read_lock().
  890  *
  891  * Given that the set of locks acquired by rt_mutex_unlock() might change
  892  * at any time, a somewhat more future-proofed approach is to make sure
  893  * that that preemption never happens within any RCU read-side critical
  894  * section whose outermost rcu_read_unlock() is called with irqs disabled.
  895  * This approach relies on the fact that rt_mutex_unlock() currently only
  896  * acquires irq-disabled locks.
  897  *
  898  * The second of these two approaches is best in most situations,
  899  * however, the first approach can also be useful, at least to those
  900  * developers willing to keep abreast of the set of locks acquired by
  901  * rt_mutex_unlock().
  902  *
  903  * See rcu_read_lock() for more information.
  904  */
  905 static inline void rcu_read_unlock(void)
  906 {
  907 	RCU_LOCKDEP_WARN(!rcu_is_watching(),
  908 			 "rcu_read_unlock() used illegally while idle");
  909 	__release(RCU);
  910 	__rcu_read_unlock();
  911 	rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
  912 }
  913 
  914 /**
  915  * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
  916  *
  917  * This is equivalent of rcu_read_lock(), but to be used when updates
  918  * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
  919  * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
  920  * softirq handler to be a quiescent state, a process in RCU read-side
  921  * critical section must be protected by disabling softirqs. Read-side
  922  * critical sections in interrupt context can use just rcu_read_lock(),
  923  * though this should at least be commented to avoid confusing people
  924  * reading the code.
  925  *
  926  * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
  927  * must occur in the same context, for example, it is illegal to invoke
  928  * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
  929  * was invoked from some other task.
  930  */
  931 static inline void rcu_read_lock_bh(void)
  932 {
  933 	local_bh_disable();
  934 	__acquire(RCU_BH);
  935 	rcu_lock_acquire(&rcu_bh_lock_map);
  936 	RCU_LOCKDEP_WARN(!rcu_is_watching(),
  937 			 "rcu_read_lock_bh() used illegally while idle");
  938 }
  939 
  940 /*
  941  * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
  942  *
  943  * See rcu_read_lock_bh() for more information.
  944  */
  945 static inline void rcu_read_unlock_bh(void)
  946 {
  947 	RCU_LOCKDEP_WARN(!rcu_is_watching(),
  948 			 "rcu_read_unlock_bh() used illegally while idle");
  949 	rcu_lock_release(&rcu_bh_lock_map);
  950 	__release(RCU_BH);
  951 	local_bh_enable();
  952 }
  953 
  954 /**
  955  * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
  956  *
  957  * This is equivalent of rcu_read_lock(), but to be used when updates
  958  * are being done using call_rcu_sched() or synchronize_rcu_sched().
  959  * Read-side critical sections can also be introduced by anything that
  960  * disables preemption, including local_irq_disable() and friends.
  961  *
  962  * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
  963  * must occur in the same context, for example, it is illegal to invoke
  964  * rcu_read_unlock_sched() from process context if the matching
  965  * rcu_read_lock_sched() was invoked from an NMI handler.
  966  */
  967 static inline void rcu_read_lock_sched(void)
  968 {
  969 	preempt_disable();
  970 	__acquire(RCU_SCHED);
  971 	rcu_lock_acquire(&rcu_sched_lock_map);
  972 	RCU_LOCKDEP_WARN(!rcu_is_watching(),
  973 			 "rcu_read_lock_sched() used illegally while idle");
  974 }
  975 
  976 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
  977 static inline notrace void rcu_read_lock_sched_notrace(void)
  978 {
  979 	preempt_disable_notrace();
  980 	__acquire(RCU_SCHED);
  981 }
  982 
  983 /*
  984  * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
  985  *
  986  * See rcu_read_lock_sched for more information.
  987  */
  988 static inline void rcu_read_unlock_sched(void)
  989 {
  990 	RCU_LOCKDEP_WARN(!rcu_is_watching(),
  991 			 "rcu_read_unlock_sched() used illegally while idle");
  992 	rcu_lock_release(&rcu_sched_lock_map);
  993 	__release(RCU_SCHED);
  994 	preempt_enable();
  995 }
  996 
  997 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
  998 static inline notrace void rcu_read_unlock_sched_notrace(void)
  999 {
 1000 	__release(RCU_SCHED);
 1001 	preempt_enable_notrace();
 1002 }
 1003 
 1004 /**
 1005  * RCU_INIT_POINTER() - initialize an RCU protected pointer
 1006  *
 1007  * Initialize an RCU-protected pointer in special cases where readers
 1008  * do not need ordering constraints on the CPU or the compiler.  These
 1009  * special cases are:
 1010  *
 1011  * 1.	This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
 1012  * 2.	The caller has taken whatever steps are required to prevent
 1013  *	RCU readers from concurrently accessing this pointer -or-
 1014  * 3.	The referenced data structure has already been exposed to
 1015  *	readers either at compile time or via rcu_assign_pointer() -and-
 1016  *	a.	You have not made -any- reader-visible changes to
 1017  *		this structure since then -or-
 1018  *	b.	It is OK for readers accessing this structure from its
 1019  *		new location to see the old state of the structure.  (For
 1020  *		example, the changes were to statistical counters or to
 1021  *		other state where exact synchronization is not required.)
 1022  *
 1023  * Failure to follow these rules governing use of RCU_INIT_POINTER() will
 1024  * result in impossible-to-diagnose memory corruption.  As in the structures
 1025  * will look OK in crash dumps, but any concurrent RCU readers might
 1026  * see pre-initialized values of the referenced data structure.  So
 1027  * please be very careful how you use RCU_INIT_POINTER()!!!
 1028  *
 1029  * If you are creating an RCU-protected linked structure that is accessed
 1030  * by a single external-to-structure RCU-protected pointer, then you may
 1031  * use RCU_INIT_POINTER() to initialize the internal RCU-protected
 1032  * pointers, but you must use rcu_assign_pointer() to initialize the
 1033  * external-to-structure pointer -after- you have completely initialized
 1034  * the reader-accessible portions of the linked structure.
 1035  *
 1036  * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
 1037  * ordering guarantees for either the CPU or the compiler.
 1038  */
 1039 #define RCU_INIT_POINTER(p, v) \
 1040 	do { \
 1041 		rcu_dereference_sparse(p, __rcu); \
 1042 		WRITE_ONCE(p, RCU_INITIALIZER(v)); \
 1043 	} while (0)
 1044 
 1045 /**
 1046  * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
 1047  *
 1048  * GCC-style initialization for an RCU-protected pointer in a structure field.
 1049  */
 1050 #define RCU_POINTER_INITIALIZER(p, v) \
 1051 		.p = RCU_INITIALIZER(v)
 1052 
 1053 /*
 1054  * Does the specified offset indicate that the corresponding rcu_head
 1055  * structure can be handled by kfree_rcu()?
 1056  */
 1057 #define __is_kfree_rcu_offset(offset) ((offset) < 4096)
 1058 
 1059 /*
 1060  * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
 1061  */
 1062 #define __kfree_rcu(head, offset) \
 1063 	do { \
 1064 		BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
 1065 		kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
 1066 	} while (0)
 1067 
 1068 /**
 1069  * kfree_rcu() - kfree an object after a grace period.
 1070  * @ptr:	pointer to kfree
 1071  * @rcu_head:	the name of the struct rcu_head within the type of @ptr.
 1072  *
 1073  * Many rcu callbacks functions just call kfree() on the base structure.
 1074  * These functions are trivial, but their size adds up, and furthermore
 1075  * when they are used in a kernel module, that module must invoke the
 1076  * high-latency rcu_barrier() function at module-unload time.
 1077  *
 1078  * The kfree_rcu() function handles this issue.  Rather than encoding a
 1079  * function address in the embedded rcu_head structure, kfree_rcu() instead
 1080  * encodes the offset of the rcu_head structure within the base structure.
 1081  * Because the functions are not allowed in the low-order 4096 bytes of
 1082  * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
 1083  * If the offset is larger than 4095 bytes, a compile-time error will
 1084  * be generated in __kfree_rcu().  If this error is triggered, you can
 1085  * either fall back to use of call_rcu() or rearrange the structure to
 1086  * position the rcu_head structure into the first 4096 bytes.
 1087  *
 1088  * Note that the allowable offset might decrease in the future, for example,
 1089  * to allow something like kmem_cache_free_rcu().
 1090  *
 1091  * The BUILD_BUG_ON check must not involve any function calls, hence the
 1092  * checks are done in macros here.
 1093  */
 1094 #define kfree_rcu(ptr, rcu_head)					\
 1095 	__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
 1096 
 1097 #ifdef CONFIG_TINY_RCU
 1098 static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
 1099 {
 1100 	*nextevt = KTIME_MAX;
 1101 	return 0;
 1102 }
 1103 #endif /* #ifdef CONFIG_TINY_RCU */
 1104 
 1105 #if defined(CONFIG_RCU_NOCB_CPU_ALL)
 1106 static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
 1107 #elif defined(CONFIG_RCU_NOCB_CPU)
 1108 bool rcu_is_nocb_cpu(int cpu);
 1109 #else
 1110 static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
 1111 #endif
 1112 
 1113 
 1114 /* Only for use by adaptive-ticks code. */
 1115 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
 1116 bool rcu_sys_is_idle(void);
 1117 void rcu_sysidle_force_exit(void);
 1118 #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 1119 
 1120 static inline bool rcu_sys_is_idle(void)
 1121 {
 1122 	return false;
 1123 }
 1124 
 1125 static inline void rcu_sysidle_force_exit(void)
 1126 {
 1127 }
 1128 
 1129 #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 1130 
 1131 
 1132 /*
 1133  * Dump the ftrace buffer, but only one time per callsite per boot.
 1134  */
 1135 #define rcu_ftrace_dump(oops_dump_mode) \
 1136 do { \
 1137 	static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
 1138 	\
 1139 	if (!atomic_read(&___rfd_beenhere) && \
 1140 	    !atomic_xchg(&___rfd_beenhere, 1)) \
 1141 		ftrace_dump(oops_dump_mode); \
 1142 } while (0)
 1143 
 1144 
 1145 #endif /* __LINUX_RCUPDATE_H */                 1 #ifndef __LINUX_SPINLOCK_H
    2 #define __LINUX_SPINLOCK_H
    3 
    4 /*
    5  * include/linux/spinlock.h - generic spinlock/rwlock declarations
    6  *
    7  * here's the role of the various spinlock/rwlock related include files:
    8  *
    9  * on SMP builds:
   10  *
   11  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
   12  *                        initializers
   13  *
   14  *  linux/spinlock_types.h:
   15  *                        defines the generic type and initializers
   16  *
   17  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
   18  *                        implementations, mostly inline assembly code
   19  *
   20  *   (also included on UP-debug builds:)
   21  *
   22  *  linux/spinlock_api_smp.h:
   23  *                        contains the prototypes for the _spin_*() APIs.
   24  *
   25  *  linux/spinlock.h:     builds the final spin_*() APIs.
   26  *
   27  * on UP builds:
   28  *
   29  *  linux/spinlock_type_up.h:
   30  *                        contains the generic, simplified UP spinlock type.
   31  *                        (which is an empty structure on non-debug builds)
   32  *
   33  *  linux/spinlock_types.h:
   34  *                        defines the generic type and initializers
   35  *
   36  *  linux/spinlock_up.h:
   37  *                        contains the arch_spin_*()/etc. version of UP
   38  *                        builds. (which are NOPs on non-debug, non-preempt
   39  *                        builds)
   40  *
   41  *   (included on UP-non-debug builds:)
   42  *
   43  *  linux/spinlock_api_up.h:
   44  *                        builds the _spin_*() APIs.
   45  *
   46  *  linux/spinlock.h:     builds the final spin_*() APIs.
   47  */
   48 
   49 #include <linux/typecheck.h>
   50 #include <linux/preempt.h>
   51 #include <linux/linkage.h>
   52 #include <linux/compiler.h>
   53 #include <linux/irqflags.h>
   54 #include <linux/thread_info.h>
   55 #include <linux/kernel.h>
   56 #include <linux/stringify.h>
   57 #include <linux/bottom_half.h>
   58 #include <asm/barrier.h>
   59 
   60 
   61 /*
   62  * Must define these before including other files, inline functions need them
   63  */
   64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
   65 
   66 #define LOCK_SECTION_START(extra)               \
   67         ".subsection 1\n\t"                     \
   68         extra                                   \
   69         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
   70         LOCK_SECTION_NAME ":\n\t"               \
   71         ".endif\n"
   72 
   73 #define LOCK_SECTION_END                        \
   74         ".previous\n\t"
   75 
   76 #define __lockfunc __attribute__((section(".spinlock.text")))
   77 
   78 /*
   79  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
   80  */
   81 #include <linux/spinlock_types.h>
   82 
   83 /*
   84  * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
   85  */
   86 #ifdef CONFIG_SMP
   87 # include <asm/spinlock.h>
   88 #else
   89 # include <linux/spinlock_up.h>
   90 #endif
   91 
   92 #ifdef CONFIG_DEBUG_SPINLOCK
   93   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
   94 				   struct lock_class_key *key);
   95 # define raw_spin_lock_init(lock)				\
   96 do {								\
   97 	static struct lock_class_key __key;			\
   98 								\
   99 	__raw_spin_lock_init((lock), #lock, &__key);		\
  100 } while (0)
  101 
  102 #else
  103 # define raw_spin_lock_init(lock)				\
  104 	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
  105 #endif
  106 
  107 #define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
  108 
  109 #ifdef CONFIG_GENERIC_LOCKBREAK
  110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
  111 #else
  112 
  113 #ifdef arch_spin_is_contended
  114 #define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
  115 #else
  116 #define raw_spin_is_contended(lock)	(((void)(lock), 0))
  117 #endif /*arch_spin_is_contended*/
  118 #endif
  119 
  120 /*
  121  * Despite its name it doesn't necessarily has to be a full barrier.
  122  * It should only guarantee that a STORE before the critical section
  123  * can not be reordered with LOADs and STOREs inside this section.
  124  * spin_lock() is the one-way barrier, this LOAD can not escape out
  125  * of the region. So the default implementation simply ensures that
  126  * a STORE can not move into the critical section, smp_wmb() should
  127  * serialize it with another STORE done by spin_lock().
  128  */
  129 #ifndef smp_mb__before_spinlock
  130 #define smp_mb__before_spinlock()	smp_wmb()
  131 #endif
  132 
  133 /**
  134  * raw_spin_unlock_wait - wait until the spinlock gets unlocked
  135  * @lock: the spinlock in question.
  136  */
  137 #define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
  138 
  139 #ifdef CONFIG_DEBUG_SPINLOCK
  140  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
  141 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  142  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  143  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
  144 #else
  145 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
  146 {
  147 	__acquire(lock);
  148 	arch_spin_lock(&lock->raw_lock);
  149 }
  150 
  151 static inline void
  152 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
  153 {
  154 	__acquire(lock);
  155 	arch_spin_lock_flags(&lock->raw_lock, *flags);
  156 }
  157 
  158 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
  159 {
  160 	return arch_spin_trylock(&(lock)->raw_lock);
  161 }
  162 
  163 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
  164 {
  165 	arch_spin_unlock(&lock->raw_lock);
  166 	__release(lock);
  167 }
  168 #endif
  169 
  170 /*
  171  * Define the various spin_lock methods.  Note we define these
  172  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
  173  * various methods are defined as nops in the case they are not
  174  * required.
  175  */
  176 #define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
  177 
  178 #define raw_spin_lock(lock)	_raw_spin_lock(lock)
  179 
  180 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  181 # define raw_spin_lock_nested(lock, subclass) \
  182 	_raw_spin_lock_nested(lock, subclass)
  183 # define raw_spin_lock_bh_nested(lock, subclass) \
  184 	_raw_spin_lock_bh_nested(lock, subclass)
  185 
  186 # define raw_spin_lock_nest_lock(lock, nest_lock)			\
  187 	 do {								\
  188 		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
  189 		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
  190 	 } while (0)
  191 #else
  192 /*
  193  * Always evaluate the 'subclass' argument to avoid that the compiler
  194  * warns about set-but-not-used variables when building with
  195  * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
  196  */
  197 # define raw_spin_lock_nested(lock, subclass)		\
  198 	_raw_spin_lock(((void)(subclass), (lock)))
  199 # define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
  200 # define raw_spin_lock_bh_nested(lock, subclass)	_raw_spin_lock_bh(lock)
  201 #endif
  202 
  203 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  204 
  205 #define raw_spin_lock_irqsave(lock, flags)			\
  206 	do {						\
  207 		typecheck(unsigned long, flags);	\
  208 		flags = _raw_spin_lock_irqsave(lock);	\
  209 	} while (0)
  210 
  211 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  212 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  213 	do {								\
  214 		typecheck(unsigned long, flags);			\
  215 		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
  216 	} while (0)
  217 #else
  218 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  219 	do {								\
  220 		typecheck(unsigned long, flags);			\
  221 		flags = _raw_spin_lock_irqsave(lock);			\
  222 	} while (0)
  223 #endif
  224 
  225 #else
  226 
  227 #define raw_spin_lock_irqsave(lock, flags)		\
  228 	do {						\
  229 		typecheck(unsigned long, flags);	\
  230 		_raw_spin_lock_irqsave(lock, flags);	\
  231 	} while (0)
  232 
  233 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
  234 	raw_spin_lock_irqsave(lock, flags)
  235 
  236 #endif
  237 
  238 #define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
  239 #define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
  240 #define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
  241 #define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
  242 
  243 #define raw_spin_unlock_irqrestore(lock, flags)		\
  244 	do {							\
  245 		typecheck(unsigned long, flags);		\
  246 		_raw_spin_unlock_irqrestore(lock, flags);	\
  247 	} while (0)
  248 #define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
  249 
  250 #define raw_spin_trylock_bh(lock) \
  251 	__cond_lock(lock, _raw_spin_trylock_bh(lock))
  252 
  253 #define raw_spin_trylock_irq(lock) \
  254 ({ \
  255 	local_irq_disable(); \
  256 	raw_spin_trylock(lock) ? \
  257 	1 : ({ local_irq_enable(); 0;  }); \
  258 })
  259 
  260 #define raw_spin_trylock_irqsave(lock, flags) \
  261 ({ \
  262 	local_irq_save(flags); \
  263 	raw_spin_trylock(lock) ? \
  264 	1 : ({ local_irq_restore(flags); 0; }); \
  265 })
  266 
  267 /**
  268  * raw_spin_can_lock - would raw_spin_trylock() succeed?
  269  * @lock: the spinlock in question.
  270  */
  271 #define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))
  272 
  273 /* Include rwlock functions */
  274 #include <linux/rwlock.h>
  275 
  276 /*
  277  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  278  */
  279 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  280 # include <linux/spinlock_api_smp.h>
  281 #else
  282 # include <linux/spinlock_api_up.h>
  283 #endif
  284 
  285 /*
  286  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  287  */
  288 
  289 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
  290 {
  291 	return &lock->rlock;
  292 }
  293 
  294 #define spin_lock_init(_lock)				\
  295 do {							\
  296 	spinlock_check(_lock);				\
  297 	raw_spin_lock_init(&(_lock)->rlock);		\
  298 } while (0)
  299 
  300 static __always_inline void spin_lock(spinlock_t *lock)
  301 {
  302 	raw_spin_lock(&lock->rlock);
  303 }
  304 
  305 static __always_inline void spin_lock_bh(spinlock_t *lock)
  306 {
  307 	raw_spin_lock_bh(&lock->rlock);
  308 }
  309 
  310 static __always_inline int spin_trylock(spinlock_t *lock)
  311 {
  312 	return raw_spin_trylock(&lock->rlock);
  313 }
  314 
  315 #define spin_lock_nested(lock, subclass)			\
  316 do {								\
  317 	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
  318 } while (0)
  319 
  320 #define spin_lock_bh_nested(lock, subclass)			\
  321 do {								\
  322 	raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
  323 } while (0)
  324 
  325 #define spin_lock_nest_lock(lock, nest_lock)				\
  326 do {									\
  327 	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
  328 } while (0)
  329 
  330 static __always_inline void spin_lock_irq(spinlock_t *lock)
  331 {
  332 	raw_spin_lock_irq(&lock->rlock);
  333 }
  334 
  335 #define spin_lock_irqsave(lock, flags)				\
  336 do {								\
  337 	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
  338 } while (0)
  339 
  340 #define spin_lock_irqsave_nested(lock, flags, subclass)			\
  341 do {									\
  342 	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
  343 } while (0)
  344 
  345 static __always_inline void spin_unlock(spinlock_t *lock)
  346 {
  347 	raw_spin_unlock(&lock->rlock);
  348 }
  349 
  350 static __always_inline void spin_unlock_bh(spinlock_t *lock)
  351 {
  352 	raw_spin_unlock_bh(&lock->rlock);
  353 }
  354 
  355 static __always_inline void spin_unlock_irq(spinlock_t *lock)
  356 {
  357 	raw_spin_unlock_irq(&lock->rlock);
  358 }
  359 
  360 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  361 {
  362 	raw_spin_unlock_irqrestore(&lock->rlock, flags);
  363 }
  364 
  365 static __always_inline int spin_trylock_bh(spinlock_t *lock)
  366 {
  367 	return raw_spin_trylock_bh(&lock->rlock);
  368 }
  369 
  370 static __always_inline int spin_trylock_irq(spinlock_t *lock)
  371 {
  372 	return raw_spin_trylock_irq(&lock->rlock);
  373 }
  374 
  375 #define spin_trylock_irqsave(lock, flags)			\
  376 ({								\
  377 	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
  378 })
  379 
  380 static __always_inline void spin_unlock_wait(spinlock_t *lock)
  381 {
  382 	raw_spin_unlock_wait(&lock->rlock);
  383 }
  384 
  385 static __always_inline int spin_is_locked(spinlock_t *lock)
  386 {
  387 	return raw_spin_is_locked(&lock->rlock);
  388 }
  389 
  390 static __always_inline int spin_is_contended(spinlock_t *lock)
  391 {
  392 	return raw_spin_is_contended(&lock->rlock);
  393 }
  394 
  395 static __always_inline int spin_can_lock(spinlock_t *lock)
  396 {
  397 	return raw_spin_can_lock(&lock->rlock);
  398 }
  399 
  400 #define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
  401 
  402 /*
  403  * Pull the atomic_t declaration:
  404  * (asm-mips/atomic.h needs above definitions)
  405  */
  406 #include <linux/atomic.h>
  407 /**
  408  * atomic_dec_and_lock - lock on reaching reference count zero
  409  * @atomic: the atomic counter
  410  * @lock: the spinlock in question
  411  *
  412  * Decrements @atomic by 1.  If the result is 0, returns true and locks
  413  * @lock.  Returns false for all other cases.
  414  */
  415 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  416 #define atomic_dec_and_lock(atomic, lock) \
  417 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  418 
  419 #endif /* __LINUX_SPINLOCK_H */                 1 #ifndef __NET_ACT_API_H
    2 #define __NET_ACT_API_H
    3 
    4 /*
    5  * Public police action API for classifiers/qdiscs
    6  */
    7 
    8 #include <net/sch_generic.h>
    9 #include <net/pkt_sched.h>
   10 #include <net/net_namespace.h>
   11 #include <net/netns/generic.h>
   12 
   13 struct tcf_common {
   14 	struct hlist_node		tcfc_head;
   15 	u32				tcfc_index;
   16 	int				tcfc_refcnt;
   17 	int				tcfc_bindcnt;
   18 	u32				tcfc_capab;
   19 	int				tcfc_action;
   20 	struct tcf_t			tcfc_tm;
   21 	struct gnet_stats_basic_packed	tcfc_bstats;
   22 	struct gnet_stats_queue		tcfc_qstats;
   23 	struct gnet_stats_rate_est64	tcfc_rate_est;
   24 	spinlock_t			tcfc_lock;
   25 	struct rcu_head			tcfc_rcu;
   26 	struct gnet_stats_basic_cpu __percpu *cpu_bstats;
   27 	struct gnet_stats_queue __percpu *cpu_qstats;
   28 };
   29 #define tcf_head	common.tcfc_head
   30 #define tcf_index	common.tcfc_index
   31 #define tcf_refcnt	common.tcfc_refcnt
   32 #define tcf_bindcnt	common.tcfc_bindcnt
   33 #define tcf_capab	common.tcfc_capab
   34 #define tcf_action	common.tcfc_action
   35 #define tcf_tm		common.tcfc_tm
   36 #define tcf_bstats	common.tcfc_bstats
   37 #define tcf_qstats	common.tcfc_qstats
   38 #define tcf_rate_est	common.tcfc_rate_est
   39 #define tcf_lock	common.tcfc_lock
   40 #define tcf_rcu		common.tcfc_rcu
   41 
   42 struct tcf_hashinfo {
   43 	struct hlist_head	*htab;
   44 	unsigned int		hmask;
   45 	spinlock_t		lock;
   46 	u32			index;
   47 };
   48 
   49 static inline unsigned int tcf_hash(u32 index, unsigned int hmask)
   50 {
   51 	return index & hmask;
   52 }
   53 
   54 static inline int tcf_hashinfo_init(struct tcf_hashinfo *hf, unsigned int mask)
   55 {
   56 	int i;
   57 
   58 	spin_lock_init(&hf->lock);
   59 	hf->index = 0;
   60 	hf->hmask = mask;
   61 	hf->htab = kzalloc((mask + 1) * sizeof(struct hlist_head),
   62 			   GFP_KERNEL);
   63 	if (!hf->htab)
   64 		return -ENOMEM;
   65 	for (i = 0; i < mask + 1; i++)
   66 		INIT_HLIST_HEAD(&hf->htab[i]);
   67 	return 0;
   68 }
   69 
   70 /* Update lastuse only if needed, to avoid dirtying a cache line.
   71  * We use a temp variable to avoid fetching jiffies twice.
   72  */
   73 static inline void tcf_lastuse_update(struct tcf_t *tm)
   74 {
   75 	unsigned long now = jiffies;
   76 
   77 	if (tm->lastuse != now)
   78 		tm->lastuse = now;
   79 }
   80 
   81 struct tc_action {
   82 	void			*priv;
   83 	const struct tc_action_ops	*ops;
   84 	__u32			type; /* for backward compat(TCA_OLD_COMPAT) */
   85 	__u32			order;
   86 	struct list_head	list;
   87 	struct tcf_hashinfo	*hinfo;
   88 };
   89 
   90 #ifdef CONFIG_NET_CLS_ACT
   91 
   92 #define ACT_P_CREATED 1
   93 #define ACT_P_DELETED 1
   94 
   95 struct tc_action_ops {
   96 	struct list_head head;
   97 	char    kind[IFNAMSIZ];
   98 	__u32   type; /* TBD to match kind */
   99 	struct module		*owner;
  100 	int     (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *);
  101 	int     (*dump)(struct sk_buff *, struct tc_action *, int, int);
  102 	void	(*cleanup)(struct tc_action *, int bind);
  103 	int     (*lookup)(struct net *, struct tc_action *, u32);
  104 	int     (*init)(struct net *net, struct nlattr *nla,
  105 			struct nlattr *est, struct tc_action *act, int ovr,
  106 			int bind);
  107 	int     (*walk)(struct net *, struct sk_buff *,
  108 			struct netlink_callback *, int, struct tc_action *);
  109 	void	(*stats_update)(struct tc_action *, u64, u32, u64);
  110 };
  111 
  112 struct tc_action_net {
  113 	struct tcf_hashinfo *hinfo;
  114 	const struct tc_action_ops *ops;
  115 };
  116 
  117 static inline
  118 int tc_action_net_init(struct tc_action_net *tn, const struct tc_action_ops *ops,
  119 		       unsigned int mask)
  120 {
  121 	int err = 0;
  122 
  123 	tn->hinfo = kmalloc(sizeof(*tn->hinfo), GFP_KERNEL);
  124 	if (!tn->hinfo)
  125 		return -ENOMEM;
  126 	tn->ops = ops;
  127 	err = tcf_hashinfo_init(tn->hinfo, mask);
  128 	if (err)
  129 		kfree(tn->hinfo);
  130 	return err;
  131 }
  132 
  133 void tcf_hashinfo_destroy(const struct tc_action_ops *ops,
  134 			  struct tcf_hashinfo *hinfo);
  135 
  136 static inline void tc_action_net_exit(struct tc_action_net *tn)
  137 {
  138 	tcf_hashinfo_destroy(tn->ops, tn->hinfo);
  139 	kfree(tn->hinfo);
  140 }
  141 
  142 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
  143 		       struct netlink_callback *cb, int type,
  144 		       struct tc_action *a);
  145 int tcf_hash_search(struct tc_action_net *tn, struct tc_action *a, u32 index);
  146 u32 tcf_hash_new_index(struct tc_action_net *tn);
  147 int tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action *a,
  148 		   int bind);
  149 int tcf_hash_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
  150 		    struct tc_action *a, int size, int bind, bool cpustats);
  151 void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
  152 void tcf_hash_insert(struct tc_action_net *tn, struct tc_action *a);
  153 
  154 int __tcf_hash_release(struct tc_action *a, bool bind, bool strict);
  155 
  156 static inline int tcf_hash_release(struct tc_action *a, bool bind)
  157 {
  158 	return __tcf_hash_release(a, bind, false);
  159 }
  160 
  161 int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
  162 int tcf_unregister_action(struct tc_action_ops *a, struct pernet_operations *ops);
  163 int tcf_action_destroy(struct list_head *actions, int bind);
  164 int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
  165 		    struct tcf_result *res);
  166 int tcf_action_init(struct net *net, struct nlattr *nla,
  167 				  struct nlattr *est, char *n, int ovr,
  168 				  int bind, struct list_head *);
  169 struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
  170 				    struct nlattr *est, char *n, int ovr,
  171 				    int bind);
  172 int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int);
  173 int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
  174 int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
  175 int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
  176 
  177 #define tc_no_actions(_exts) \
  178 	(list_empty(&(_exts)->actions))
  179 
  180 #define tc_for_each_action(_a, _exts) \
  181 	list_for_each_entry(a, &(_exts)->actions, list)
  182 
  183 static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
  184 					   u64 packets, u64 lastuse)
  185 {
  186 	if (!a->ops->stats_update)
  187 		return;
  188 
  189 	a->ops->stats_update(a, bytes, packets, lastuse);
  190 }
  191 
  192 #else /* CONFIG_NET_CLS_ACT */
  193 
  194 #define tc_no_actions(_exts) true
  195 #define tc_for_each_action(_a, _exts) while ((void)(_a), 0)
  196 #define tcf_action_stats_update(a, bytes, packets, lastuse)
  197 
  198 #endif /* CONFIG_NET_CLS_ACT */
  199 #endif                 1 #ifndef __NET_NETLINK_H
    2 #define __NET_NETLINK_H
    3 
    4 #include <linux/types.h>
    5 #include <linux/netlink.h>
    6 #include <linux/jiffies.h>
    7 #include <linux/in6.h>
    8 
    9 /* ========================================================================
   10  *         Netlink Messages and Attributes Interface (As Seen On TV)
   11  * ------------------------------------------------------------------------
   12  *                          Messages Interface
   13  * ------------------------------------------------------------------------
   14  *
   15  * Message Format:
   16  *    <--- nlmsg_total_size(payload)  --->
   17  *    <-- nlmsg_msg_size(payload) ->
   18  *   +----------+- - -+-------------+- - -+-------- - -
   19  *   | nlmsghdr | Pad |   Payload   | Pad | nlmsghdr
   20  *   +----------+- - -+-------------+- - -+-------- - -
   21  *   nlmsg_data(nlh)---^                   ^
   22  *   nlmsg_next(nlh)-----------------------+
   23  *
   24  * Payload Format:
   25  *    <---------------------- nlmsg_len(nlh) --------------------->
   26  *    <------ hdrlen ------>       <- nlmsg_attrlen(nlh, hdrlen) ->
   27  *   +----------------------+- - -+--------------------------------+
   28  *   |     Family Header    | Pad |           Attributes           |
   29  *   +----------------------+- - -+--------------------------------+
   30  *   nlmsg_attrdata(nlh, hdrlen)---^
   31  *
   32  * Data Structures:
   33  *   struct nlmsghdr			netlink message header
   34  *
   35  * Message Construction:
   36  *   nlmsg_new()			create a new netlink message
   37  *   nlmsg_put()			add a netlink message to an skb
   38  *   nlmsg_put_answer()			callback based nlmsg_put()
   39  *   nlmsg_end()			finalize netlink message
   40  *   nlmsg_get_pos()			return current position in message
   41  *   nlmsg_trim()			trim part of message
   42  *   nlmsg_cancel()			cancel message construction
   43  *   nlmsg_free()			free a netlink message
   44  *
   45  * Message Sending:
   46  *   nlmsg_multicast()			multicast message to several groups
   47  *   nlmsg_unicast()			unicast a message to a single socket
   48  *   nlmsg_notify()			send notification message
   49  *
   50  * Message Length Calculations:
   51  *   nlmsg_msg_size(payload)		length of message w/o padding
   52  *   nlmsg_total_size(payload)		length of message w/ padding
   53  *   nlmsg_padlen(payload)		length of padding at tail
   54  *
   55  * Message Payload Access:
   56  *   nlmsg_data(nlh)			head of message payload
   57  *   nlmsg_len(nlh)			length of message payload
   58  *   nlmsg_attrdata(nlh, hdrlen)	head of attributes data
   59  *   nlmsg_attrlen(nlh, hdrlen)		length of attributes data
   60  *
   61  * Message Parsing:
   62  *   nlmsg_ok(nlh, remaining)		does nlh fit into remaining bytes?
   63  *   nlmsg_next(nlh, remaining)		get next netlink message
   64  *   nlmsg_parse()			parse attributes of a message
   65  *   nlmsg_find_attr()			find an attribute in a message
   66  *   nlmsg_for_each_msg()		loop over all messages
   67  *   nlmsg_validate()			validate netlink message incl. attrs
   68  *   nlmsg_for_each_attr()		loop over all attributes
   69  *
   70  * Misc:
   71  *   nlmsg_report()			report back to application?
   72  *
   73  * ------------------------------------------------------------------------
   74  *                          Attributes Interface
   75  * ------------------------------------------------------------------------
   76  *
   77  * Attribute Format:
   78  *    <------- nla_total_size(payload) ------->
   79  *    <---- nla_attr_size(payload) ----->
   80  *   +----------+- - -+- - - - - - - - - +- - -+-------- - -
   81  *   |  Header  | Pad |     Payload      | Pad |  Header
   82  *   +----------+- - -+- - - - - - - - - +- - -+-------- - -
   83  *                     <- nla_len(nla) ->      ^
   84  *   nla_data(nla)----^                        |
   85  *   nla_next(nla)-----------------------------'
   86  *
   87  * Data Structures:
   88  *   struct nlattr			netlink attribute header
   89  *
   90  * Attribute Construction:
   91  *   nla_reserve(skb, type, len)	reserve room for an attribute
   92  *   nla_reserve_nohdr(skb, len)	reserve room for an attribute w/o hdr
   93  *   nla_put(skb, type, len, data)	add attribute to skb
   94  *   nla_put_nohdr(skb, len, data)	add attribute w/o hdr
   95  *   nla_append(skb, len, data)		append data to skb
   96  *
   97  * Attribute Construction for Basic Types:
   98  *   nla_put_u8(skb, type, value)	add u8 attribute to skb
   99  *   nla_put_u16(skb, type, value)	add u16 attribute to skb
  100  *   nla_put_u32(skb, type, value)	add u32 attribute to skb
  101  *   nla_put_u64_64bits(skb, type,
  102  *			value, padattr)	add u64 attribute to skb
  103  *   nla_put_s8(skb, type, value)	add s8 attribute to skb
  104  *   nla_put_s16(skb, type, value)	add s16 attribute to skb
  105  *   nla_put_s32(skb, type, value)	add s32 attribute to skb
  106  *   nla_put_s64(skb, type, value,
  107  *               padattr)		add s64 attribute to skb
  108  *   nla_put_string(skb, type, str)	add string attribute to skb
  109  *   nla_put_flag(skb, type)		add flag attribute to skb
  110  *   nla_put_msecs(skb, type, jiffies,
  111  *                 padattr)		add msecs attribute to skb
  112  *   nla_put_in_addr(skb, type, addr)	add IPv4 address attribute to skb
  113  *   nla_put_in6_addr(skb, type, addr)	add IPv6 address attribute to skb
  114  *
  115  * Nested Attributes Construction:
  116  *   nla_nest_start(skb, type)		start a nested attribute
  117  *   nla_nest_end(skb, nla)		finalize a nested attribute
  118  *   nla_nest_cancel(skb, nla)		cancel nested attribute construction
  119  *
  120  * Attribute Length Calculations:
  121  *   nla_attr_size(payload)		length of attribute w/o padding
  122  *   nla_total_size(payload)		length of attribute w/ padding
  123  *   nla_padlen(payload)		length of padding
  124  *
  125  * Attribute Payload Access:
  126  *   nla_data(nla)			head of attribute payload
  127  *   nla_len(nla)			length of attribute payload
  128  *
  129  * Attribute Payload Access for Basic Types:
  130  *   nla_get_u8(nla)			get payload for a u8 attribute
  131  *   nla_get_u16(nla)			get payload for a u16 attribute
  132  *   nla_get_u32(nla)			get payload for a u32 attribute
  133  *   nla_get_u64(nla)			get payload for a u64 attribute
  134  *   nla_get_s8(nla)			get payload for a s8 attribute
  135  *   nla_get_s16(nla)			get payload for a s16 attribute
  136  *   nla_get_s32(nla)			get payload for a s32 attribute
  137  *   nla_get_s64(nla)			get payload for a s64 attribute
  138  *   nla_get_flag(nla)			return 1 if flag is true
  139  *   nla_get_msecs(nla)			get payload for a msecs attribute
  140  *
  141  * Attribute Misc:
  142  *   nla_memcpy(dest, nla, count)	copy attribute into memory
  143  *   nla_memcmp(nla, data, size)	compare attribute with memory area
  144  *   nla_strlcpy(dst, nla, size)	copy attribute to a sized string
  145  *   nla_strcmp(nla, str)		compare attribute with string
  146  *
  147  * Attribute Parsing:
  148  *   nla_ok(nla, remaining)		does nla fit into remaining bytes?
  149  *   nla_next(nla, remaining)		get next netlink attribute
  150  *   nla_validate()			validate a stream of attributes
  151  *   nla_validate_nested()		validate a stream of nested attributes
  152  *   nla_find()				find attribute in stream of attributes
  153  *   nla_find_nested()			find attribute in nested attributes
  154  *   nla_parse()			parse and validate stream of attrs
  155  *   nla_parse_nested()			parse nested attribuets
  156  *   nla_for_each_attr()		loop over all attributes
  157  *   nla_for_each_nested()		loop over the nested attributes
  158  *=========================================================================
  159  */
  160 
  161  /**
  162   * Standard attribute types to specify validation policy
  163   */
  164 enum {
  165 	NLA_UNSPEC,
  166 	NLA_U8,
  167 	NLA_U16,
  168 	NLA_U32,
  169 	NLA_U64,
  170 	NLA_STRING,
  171 	NLA_FLAG,
  172 	NLA_MSECS,
  173 	NLA_NESTED,
  174 	NLA_NESTED_COMPAT,
  175 	NLA_NUL_STRING,
  176 	NLA_BINARY,
  177 	NLA_S8,
  178 	NLA_S16,
  179 	NLA_S32,
  180 	NLA_S64,
  181 	__NLA_TYPE_MAX,
  182 };
  183 
  184 #define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1)
  185 
  186 /**
  187  * struct nla_policy - attribute validation policy
  188  * @type: Type of attribute or NLA_UNSPEC
  189  * @len: Type specific length of payload
  190  *
  191  * Policies are defined as arrays of this struct, the array must be
  192  * accessible by attribute type up to the highest identifier to be expected.
  193  *
  194  * Meaning of `len' field:
  195  *    NLA_STRING           Maximum length of string
  196  *    NLA_NUL_STRING       Maximum length of string (excluding NUL)
  197  *    NLA_FLAG             Unused
  198  *    NLA_BINARY           Maximum length of attribute payload
  199  *    NLA_NESTED           Don't use `len' field -- length verification is
  200  *                         done by checking len of nested header (or empty)
  201  *    NLA_NESTED_COMPAT    Minimum length of structure payload
  202  *    NLA_U8, NLA_U16,
  203  *    NLA_U32, NLA_U64,
  204  *    NLA_S8, NLA_S16,
  205  *    NLA_S32, NLA_S64,
  206  *    NLA_MSECS            Leaving the length field zero will verify the
  207  *                         given type fits, using it verifies minimum length
  208  *                         just like "All other"
  209  *    All other            Minimum length of attribute payload
  210  *
  211  * Example:
  212  * static const struct nla_policy my_policy[ATTR_MAX+1] = {
  213  * 	[ATTR_FOO] = { .type = NLA_U16 },
  214  *	[ATTR_BAR] = { .type = NLA_STRING, .len = BARSIZ },
  215  *	[ATTR_BAZ] = { .len = sizeof(struct mystruct) },
  216  * };
  217  */
  218 struct nla_policy {
  219 	u16		type;
  220 	u16		len;
  221 };
  222 
  223 /**
  224  * struct nl_info - netlink source information
  225  * @nlh: Netlink message header of original request
  226  * @portid: Netlink PORTID of requesting application
  227  */
  228 struct nl_info {
  229 	struct nlmsghdr		*nlh;
  230 	struct net		*nl_net;
  231 	u32			portid;
  232 };
  233 
  234 int netlink_rcv_skb(struct sk_buff *skb,
  235 		    int (*cb)(struct sk_buff *, struct nlmsghdr *));
  236 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
  237 		 unsigned int group, int report, gfp_t flags);
  238 
  239 int nla_validate(const struct nlattr *head, int len, int maxtype,
  240 		 const struct nla_policy *policy);
  241 int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
  242 	      int len, const struct nla_policy *policy);
  243 int nla_policy_len(const struct nla_policy *, int);
  244 struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
  245 size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize);
  246 int nla_memcpy(void *dest, const struct nlattr *src, int count);
  247 int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
  248 int nla_strcmp(const struct nlattr *nla, const char *str);
  249 struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
  250 struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
  251 				   int attrlen, int padattr);
  252 void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
  253 struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
  254 struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype,
  255 				 int attrlen, int padattr);
  256 void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
  257 void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
  258 	       const void *data);
  259 void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
  260 		     const void *data, int padattr);
  261 void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
  262 int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
  263 int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
  264 		  const void *data, int padattr);
  265 int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
  266 int nla_append(struct sk_buff *skb, int attrlen, const void *data);
  267 
  268 /**************************************************************************
  269  * Netlink Messages
  270  **************************************************************************/
  271 
  272 /**
  273  * nlmsg_msg_size - length of netlink message not including padding
  274  * @payload: length of message payload
  275  */
  276 static inline int nlmsg_msg_size(int payload)
  277 {
  278 	return NLMSG_HDRLEN + payload;
  279 }
  280 
  281 /**
  282  * nlmsg_total_size - length of netlink message including padding
  283  * @payload: length of message payload
  284  */
  285 static inline int nlmsg_total_size(int payload)
  286 {
  287 	return NLMSG_ALIGN(nlmsg_msg_size(payload));
  288 }
  289 
  290 /**
  291  * nlmsg_padlen - length of padding at the message's tail
  292  * @payload: length of message payload
  293  */
  294 static inline int nlmsg_padlen(int payload)
  295 {
  296 	return nlmsg_total_size(payload) - nlmsg_msg_size(payload);
  297 }
  298 
  299 /**
  300  * nlmsg_data - head of message payload
  301  * @nlh: netlink message header
  302  */
  303 static inline void *nlmsg_data(const struct nlmsghdr *nlh)
  304 {
  305 	return (unsigned char *) nlh + NLMSG_HDRLEN;
  306 }
  307 
  308 /**
  309  * nlmsg_len - length of message payload
  310  * @nlh: netlink message header
  311  */
  312 static inline int nlmsg_len(const struct nlmsghdr *nlh)
  313 {
  314 	return nlh->nlmsg_len - NLMSG_HDRLEN;
  315 }
  316 
  317 /**
  318  * nlmsg_attrdata - head of attributes data
  319  * @nlh: netlink message header
  320  * @hdrlen: length of family specific header
  321  */
  322 static inline struct nlattr *nlmsg_attrdata(const struct nlmsghdr *nlh,
  323 					    int hdrlen)
  324 {
  325 	unsigned char *data = nlmsg_data(nlh);
  326 	return (struct nlattr *) (data + NLMSG_ALIGN(hdrlen));
  327 }
  328 
  329 /**
  330  * nlmsg_attrlen - length of attributes data
  331  * @nlh: netlink message header
  332  * @hdrlen: length of family specific header
  333  */
  334 static inline int nlmsg_attrlen(const struct nlmsghdr *nlh, int hdrlen)
  335 {
  336 	return nlmsg_len(nlh) - NLMSG_ALIGN(hdrlen);
  337 }
  338 
  339 /**
  340  * nlmsg_ok - check if the netlink message fits into the remaining bytes
  341  * @nlh: netlink message header
  342  * @remaining: number of bytes remaining in message stream
  343  */
  344 static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
  345 {
  346 	return (remaining >= (int) sizeof(struct nlmsghdr) &&
  347 		nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
  348 		nlh->nlmsg_len <= remaining);
  349 }
  350 
  351 /**
  352  * nlmsg_next - next netlink message in message stream
  353  * @nlh: netlink message header
  354  * @remaining: number of bytes remaining in message stream
  355  *
  356  * Returns the next netlink message in the message stream and
  357  * decrements remaining by the size of the current message.
  358  */
  359 static inline struct nlmsghdr *
  360 nlmsg_next(const struct nlmsghdr *nlh, int *remaining)
  361 {
  362 	int totlen = NLMSG_ALIGN(nlh->nlmsg_len);
  363 
  364 	*remaining -= totlen;
  365 
  366 	return (struct nlmsghdr *) ((unsigned char *) nlh + totlen);
  367 }
  368 
  369 /**
  370  * nlmsg_parse - parse attributes of a netlink message
  371  * @nlh: netlink message header
  372  * @hdrlen: length of family specific header
  373  * @tb: destination array with maxtype+1 elements
  374  * @maxtype: maximum attribute type to be expected
  375  * @policy: validation policy
  376  *
  377  * See nla_parse()
  378  */
  379 static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
  380 			      struct nlattr *tb[], int maxtype,
  381 			      const struct nla_policy *policy)
  382 {
  383 	if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
  384 		return -EINVAL;
  385 
  386 	return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
  387 			 nlmsg_attrlen(nlh, hdrlen), policy);
  388 }
  389 
  390 /**
  391  * nlmsg_find_attr - find a specific attribute in a netlink message
  392  * @nlh: netlink message header
  393  * @hdrlen: length of familiy specific header
  394  * @attrtype: type of attribute to look for
  395  *
  396  * Returns the first attribute which matches the specified type.
  397  */
  398 static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
  399 					     int hdrlen, int attrtype)
  400 {
  401 	return nla_find(nlmsg_attrdata(nlh, hdrlen),
  402 			nlmsg_attrlen(nlh, hdrlen), attrtype);
  403 }
  404 
  405 /**
  406  * nlmsg_validate - validate a netlink message including attributes
  407  * @nlh: netlinket message header
  408  * @hdrlen: length of familiy specific header
  409  * @maxtype: maximum attribute type to be expected
  410  * @policy: validation policy
  411  */
  412 static inline int nlmsg_validate(const struct nlmsghdr *nlh,
  413 				 int hdrlen, int maxtype,
  414 				 const struct nla_policy *policy)
  415 {
  416 	if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
  417 		return -EINVAL;
  418 
  419 	return nla_validate(nlmsg_attrdata(nlh, hdrlen),
  420 			    nlmsg_attrlen(nlh, hdrlen), maxtype, policy);
  421 }
  422 
  423 /**
  424  * nlmsg_report - need to report back to application?
  425  * @nlh: netlink message header
  426  *
  427  * Returns 1 if a report back to the application is requested.
  428  */
  429 static inline int nlmsg_report(const struct nlmsghdr *nlh)
  430 {
  431 	return !!(nlh->nlmsg_flags & NLM_F_ECHO);
  432 }
  433 
  434 /**
  435  * nlmsg_for_each_attr - iterate over a stream of attributes
  436  * @pos: loop counter, set to current attribute
  437  * @nlh: netlink message header
  438  * @hdrlen: length of familiy specific header
  439  * @rem: initialized to len, holds bytes currently remaining in stream
  440  */
  441 #define nlmsg_for_each_attr(pos, nlh, hdrlen, rem) \
  442 	nla_for_each_attr(pos, nlmsg_attrdata(nlh, hdrlen), \
  443 			  nlmsg_attrlen(nlh, hdrlen), rem)
  444 
  445 /**
  446  * nlmsg_put - Add a new netlink message to an skb
  447  * @skb: socket buffer to store message in
  448  * @portid: netlink PORTID of requesting application
  449  * @seq: sequence number of message
  450  * @type: message type
  451  * @payload: length of message payload
  452  * @flags: message flags
  453  *
  454  * Returns NULL if the tailroom of the skb is insufficient to store
  455  * the message header and payload.
  456  */
  457 static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
  458 					 int type, int payload, int flags)
  459 {
  460 	if (unlikely(skb_tailroom(skb) < nlmsg_total_size(payload)))
  461 		return NULL;
  462 
  463 	return __nlmsg_put(skb, portid, seq, type, payload, flags);
  464 }
  465 
  466 /**
  467  * nlmsg_put_answer - Add a new callback based netlink message to an skb
  468  * @skb: socket buffer to store message in
  469  * @cb: netlink callback
  470  * @type: message type
  471  * @payload: length of message payload
  472  * @flags: message flags
  473  *
  474  * Returns NULL if the tailroom of the skb is insufficient to store
  475  * the message header and payload.
  476  */
  477 static inline struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb,
  478 						struct netlink_callback *cb,
  479 						int type, int payload,
  480 						int flags)
  481 {
  482 	return nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  483 			 type, payload, flags);
  484 }
  485 
  486 /**
  487  * nlmsg_new - Allocate a new netlink message
  488  * @payload: size of the message payload
  489  * @flags: the type of memory to allocate.
  490  *
  491  * Use NLMSG_DEFAULT_SIZE if the size of the payload isn't known
  492  * and a good default is needed.
  493  */
  494 static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags)
  495 {
  496 	return alloc_skb(nlmsg_total_size(payload), flags);
  497 }
  498 
  499 /**
  500  * nlmsg_end - Finalize a netlink message
  501  * @skb: socket buffer the message is stored in
  502  * @nlh: netlink message header
  503  *
  504  * Corrects the netlink message header to include the appeneded
  505  * attributes. Only necessary if attributes have been added to
  506  * the message.
  507  */
  508 static inline void nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
  509 {
  510 	nlh->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)nlh;
  511 }
  512 
  513 /**
  514  * nlmsg_get_pos - return current position in netlink message
  515  * @skb: socket buffer the message is stored in
  516  *
  517  * Returns a pointer to the current tail of the message.
  518  */
  519 static inline void *nlmsg_get_pos(struct sk_buff *skb)
  520 {
  521 	return skb_tail_pointer(skb);
  522 }
  523 
  524 /**
  525  * nlmsg_trim - Trim message to a mark
  526  * @skb: socket buffer the message is stored in
  527  * @mark: mark to trim to
  528  *
  529  * Trims the message to the provided mark.
  530  */
  531 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
  532 {
  533 	if (mark) {
  534 		WARN_ON((unsigned char *) mark < skb->data);
  535 		skb_trim(skb, (unsigned char *) mark - skb->data);
  536 	}
  537 }
  538 
  539 /**
  540  * nlmsg_cancel - Cancel construction of a netlink message
  541  * @skb: socket buffer the message is stored in
  542  * @nlh: netlink message header
  543  *
  544  * Removes the complete netlink message including all
  545  * attributes from the socket buffer again.
  546  */
  547 static inline void nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh)
  548 {
  549 	nlmsg_trim(skb, nlh);
  550 }
  551 
  552 /**
  553  * nlmsg_free - free a netlink message
  554  * @skb: socket buffer of netlink message
  555  */
  556 static inline void nlmsg_free(struct sk_buff *skb)
  557 {
  558 	kfree_skb(skb);
  559 }
  560 
  561 /**
  562  * nlmsg_multicast - multicast a netlink message
  563  * @sk: netlink socket to spread messages to
  564  * @skb: netlink message as socket buffer
  565  * @portid: own netlink portid to avoid sending to yourself
  566  * @group: multicast group id
  567  * @flags: allocation flags
  568  */
  569 static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
  570 				  u32 portid, unsigned int group, gfp_t flags)
  571 {
  572 	int err;
  573 
  574 	NETLINK_CB(skb).dst_group = group;
  575 
  576 	err = netlink_broadcast(sk, skb, portid, group, flags);
  577 	if (err > 0)
  578 		err = 0;
  579 
  580 	return err;
  581 }
  582 
  583 /**
  584  * nlmsg_unicast - unicast a netlink message
  585  * @sk: netlink socket to spread message to
  586  * @skb: netlink message as socket buffer
  587  * @portid: netlink portid of the destination socket
  588  */
  589 static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 portid)
  590 {
  591 	int err;
  592 
  593 	err = netlink_unicast(sk, skb, portid, MSG_DONTWAIT);
  594 	if (err > 0)
  595 		err = 0;
  596 
  597 	return err;
  598 }
  599 
  600 /**
  601  * nlmsg_for_each_msg - iterate over a stream of messages
  602  * @pos: loop counter, set to current message
  603  * @head: head of message stream
  604  * @len: length of message stream
  605  * @rem: initialized to len, holds bytes currently remaining in stream
  606  */
  607 #define nlmsg_for_each_msg(pos, head, len, rem) \
  608 	for (pos = head, rem = len; \
  609 	     nlmsg_ok(pos, rem); \
  610 	     pos = nlmsg_next(pos, &(rem)))
  611 
  612 /**
  613  * nl_dump_check_consistent - check if sequence is consistent and advertise if not
  614  * @cb: netlink callback structure that stores the sequence number
  615  * @nlh: netlink message header to write the flag to
  616  *
  617  * This function checks if the sequence (generation) number changed during dump
  618  * and if it did, advertises it in the netlink message header.
  619  *
  620  * The correct way to use it is to set cb->seq to the generation counter when
  621  * all locks for dumping have been acquired, and then call this function for
  622  * each message that is generated.
  623  *
  624  * Note that due to initialisation concerns, 0 is an invalid sequence number
  625  * and must not be used by code that uses this functionality.
  626  */
  627 static inline void
  628 nl_dump_check_consistent(struct netlink_callback *cb,
  629 			 struct nlmsghdr *nlh)
  630 {
  631 	if (cb->prev_seq && cb->seq != cb->prev_seq)
  632 		nlh->nlmsg_flags |= NLM_F_DUMP_INTR;
  633 	cb->prev_seq = cb->seq;
  634 }
  635 
  636 /**************************************************************************
  637  * Netlink Attributes
  638  **************************************************************************/
  639 
  640 /**
  641  * nla_attr_size - length of attribute not including padding
  642  * @payload: length of payload
  643  */
  644 static inline int nla_attr_size(int payload)
  645 {
  646 	return NLA_HDRLEN + payload;
  647 }
  648 
  649 /**
  650  * nla_total_size - total length of attribute including padding
  651  * @payload: length of payload
  652  */
  653 static inline int nla_total_size(int payload)
  654 {
  655 	return NLA_ALIGN(nla_attr_size(payload));
  656 }
  657 
  658 /**
  659  * nla_padlen - length of padding at the tail of attribute
  660  * @payload: length of payload
  661  */
  662 static inline int nla_padlen(int payload)
  663 {
  664 	return nla_total_size(payload) - nla_attr_size(payload);
  665 }
  666 
  667 /**
  668  * nla_type - attribute type
  669  * @nla: netlink attribute
  670  */
  671 static inline int nla_type(const struct nlattr *nla)
  672 {
  673 	return nla->nla_type & NLA_TYPE_MASK;
  674 }
  675 
  676 /**
  677  * nla_data - head of payload
  678  * @nla: netlink attribute
  679  */
  680 static inline void *nla_data(const struct nlattr *nla)
  681 {
  682 	return (char *) nla + NLA_HDRLEN;
  683 }
  684 
  685 /**
  686  * nla_len - length of payload
  687  * @nla: netlink attribute
  688  */
  689 static inline int nla_len(const struct nlattr *nla)
  690 {
  691 	return nla->nla_len - NLA_HDRLEN;
  692 }
  693 
  694 /**
  695  * nla_ok - check if the netlink attribute fits into the remaining bytes
  696  * @nla: netlink attribute
  697  * @remaining: number of bytes remaining in attribute stream
  698  */
  699 static inline int nla_ok(const struct nlattr *nla, int remaining)
  700 {
  701 	return remaining >= (int) sizeof(*nla) &&
  702 	       nla->nla_len >= sizeof(*nla) &&
  703 	       nla->nla_len <= remaining;
  704 }
  705 
  706 /**
  707  * nla_next - next netlink attribute in attribute stream
  708  * @nla: netlink attribute
  709  * @remaining: number of bytes remaining in attribute stream
  710  *
  711  * Returns the next netlink attribute in the attribute stream and
  712  * decrements remaining by the size of the current attribute.
  713  */
  714 static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
  715 {
  716 	int totlen = NLA_ALIGN(nla->nla_len);
  717 
  718 	*remaining -= totlen;
  719 	return (struct nlattr *) ((char *) nla + totlen);
  720 }
  721 
  722 /**
  723  * nla_find_nested - find attribute in a set of nested attributes
  724  * @nla: attribute containing the nested attributes
  725  * @attrtype: type of attribute to look for
  726  *
  727  * Returns the first attribute which matches the specified type.
  728  */
  729 static inline struct nlattr *
  730 nla_find_nested(const struct nlattr *nla, int attrtype)
  731 {
  732 	return nla_find(nla_data(nla), nla_len(nla), attrtype);
  733 }
  734 
  735 /**
  736  * nla_parse_nested - parse nested attributes
  737  * @tb: destination array with maxtype+1 elements
  738  * @maxtype: maximum attribute type to be expected
  739  * @nla: attribute containing the nested attributes
  740  * @policy: validation policy
  741  *
  742  * See nla_parse()
  743  */
  744 static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
  745 				   const struct nlattr *nla,
  746 				   const struct nla_policy *policy)
  747 {
  748 	return nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy);
  749 }
  750 
  751 /**
  752  * nla_put_u8 - Add a u8 netlink attribute to a socket buffer
  753  * @skb: socket buffer to add attribute to
  754  * @attrtype: attribute type
  755  * @value: numeric value
  756  */
  757 static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
  758 {
  759 	return nla_put(skb, attrtype, sizeof(u8), &value);
  760 }
  761 
  762 /**
  763  * nla_put_u16 - Add a u16 netlink attribute to a socket buffer
  764  * @skb: socket buffer to add attribute to
  765  * @attrtype: attribute type
  766  * @value: numeric value
  767  */
  768 static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
  769 {
  770 	return nla_put(skb, attrtype, sizeof(u16), &value);
  771 }
  772 
  773 /**
  774  * nla_put_be16 - Add a __be16 netlink attribute to a socket buffer
  775  * @skb: socket buffer to add attribute to
  776  * @attrtype: attribute type
  777  * @value: numeric value
  778  */
  779 static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
  780 {
  781 	return nla_put(skb, attrtype, sizeof(__be16), &value);
  782 }
  783 
  784 /**
  785  * nla_put_net16 - Add 16-bit network byte order netlink attribute to a socket buffer
  786  * @skb: socket buffer to add attribute to
  787  * @attrtype: attribute type
  788  * @value: numeric value
  789  */
  790 static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
  791 {
  792 	return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value);
  793 }
  794 
  795 /**
  796  * nla_put_le16 - Add a __le16 netlink attribute to a socket buffer
  797  * @skb: socket buffer to add attribute to
  798  * @attrtype: attribute type
  799  * @value: numeric value
  800  */
  801 static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
  802 {
  803 	return nla_put(skb, attrtype, sizeof(__le16), &value);
  804 }
  805 
  806 /**
  807  * nla_put_u32 - Add a u32 netlink attribute to a socket buffer
  808  * @skb: socket buffer to add attribute to
  809  * @attrtype: attribute type
  810  * @value: numeric value
  811  */
  812 static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
  813 {
  814 	return nla_put(skb, attrtype, sizeof(u32), &value);
  815 }
  816 
  817 /**
  818  * nla_put_be32 - Add a __be32 netlink attribute to a socket buffer
  819  * @skb: socket buffer to add attribute to
  820  * @attrtype: attribute type
  821  * @value: numeric value
  822  */
  823 static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
  824 {
  825 	return nla_put(skb, attrtype, sizeof(__be32), &value);
  826 }
  827 
  828 /**
  829  * nla_put_net32 - Add 32-bit network byte order netlink attribute to a socket buffer
  830  * @skb: socket buffer to add attribute to
  831  * @attrtype: attribute type
  832  * @value: numeric value
  833  */
  834 static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
  835 {
  836 	return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value);
  837 }
  838 
  839 /**
  840  * nla_put_le32 - Add a __le32 netlink attribute to a socket buffer
  841  * @skb: socket buffer to add attribute to
  842  * @attrtype: attribute type
  843  * @value: numeric value
  844  */
  845 static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
  846 {
  847 	return nla_put(skb, attrtype, sizeof(__le32), &value);
  848 }
  849 
  850 /**
  851  * nla_put_u64_64bit - Add a u64 netlink attribute to a skb and align it
  852  * @skb: socket buffer to add attribute to
  853  * @attrtype: attribute type
  854  * @value: numeric value
  855  * @padattr: attribute type for the padding
  856  */
  857 static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
  858 				    u64 value, int padattr)
  859 {
  860 	return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr);
  861 }
  862 
  863 /**
  864  * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer and align it
  865  * @skb: socket buffer to add attribute to
  866  * @attrtype: attribute type
  867  * @value: numeric value
  868  * @padattr: attribute type for the padding
  869  */
  870 static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
  871 			       int padattr)
  872 {
  873 	return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr);
  874 }
  875 
  876 /**
  877  * nla_put_net64 - Add 64-bit network byte order nlattr to a skb and align it
  878  * @skb: socket buffer to add attribute to
  879  * @attrtype: attribute type
  880  * @value: numeric value
  881  * @padattr: attribute type for the padding
  882  */
  883 static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
  884 				int padattr)
  885 {
  886 	return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value,
  887 			    padattr);
  888 }
  889 
  890 /**
  891  * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer and align it
  892  * @skb: socket buffer to add attribute to
  893  * @attrtype: attribute type
  894  * @value: numeric value
  895  * @padattr: attribute type for the padding
  896  */
  897 static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
  898 			       int padattr)
  899 {
  900 	return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr);
  901 }
  902 
  903 /**
  904  * nla_put_s8 - Add a s8 netlink attribute to a socket buffer
  905  * @skb: socket buffer to add attribute to
  906  * @attrtype: attribute type
  907  * @value: numeric value
  908  */
  909 static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
  910 {
  911 	return nla_put(skb, attrtype, sizeof(s8), &value);
  912 }
  913 
  914 /**
  915  * nla_put_s16 - Add a s16 netlink attribute to a socket buffer
  916  * @skb: socket buffer to add attribute to
  917  * @attrtype: attribute type
  918  * @value: numeric value
  919  */
  920 static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
  921 {
  922 	return nla_put(skb, attrtype, sizeof(s16), &value);
  923 }
  924 
  925 /**
  926  * nla_put_s32 - Add a s32 netlink attribute to a socket buffer
  927  * @skb: socket buffer to add attribute to
  928  * @attrtype: attribute type
  929  * @value: numeric value
  930  */
  931 static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
  932 {
  933 	return nla_put(skb, attrtype, sizeof(s32), &value);
  934 }
  935 
  936 /**
  937  * nla_put_s64 - Add a s64 netlink attribute to a socket buffer and align it
  938  * @skb: socket buffer to add attribute to
  939  * @attrtype: attribute type
  940  * @value: numeric value
  941  * @padattr: attribute type for the padding
  942  */
  943 static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
  944 			      int padattr)
  945 {
  946 	return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr);
  947 }
  948 
  949 /**
  950  * nla_put_string - Add a string netlink attribute to a socket buffer
  951  * @skb: socket buffer to add attribute to
  952  * @attrtype: attribute type
  953  * @str: NUL terminated string
  954  */
  955 static inline int nla_put_string(struct sk_buff *skb, int attrtype,
  956 				 const char *str)
  957 {
  958 	return nla_put(skb, attrtype, strlen(str) + 1, str);
  959 }
  960 
  961 /**
  962  * nla_put_flag - Add a flag netlink attribute to a socket buffer
  963  * @skb: socket buffer to add attribute to
  964  * @attrtype: attribute type
  965  */
  966 static inline int nla_put_flag(struct sk_buff *skb, int attrtype)
  967 {
  968 	return nla_put(skb, attrtype, 0, NULL);
  969 }
  970 
  971 /**
  972  * nla_put_msecs - Add a msecs netlink attribute to a skb and align it
  973  * @skb: socket buffer to add attribute to
  974  * @attrtype: attribute type
  975  * @njiffies: number of jiffies to convert to msecs
  976  * @padattr: attribute type for the padding
  977  */
  978 static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
  979 				unsigned long njiffies, int padattr)
  980 {
  981 	u64 tmp = jiffies_to_msecs(njiffies);
  982 
  983 	return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
  984 }
  985 
  986 /**
  987  * nla_put_in_addr - Add an IPv4 address netlink attribute to a socket
  988  * buffer
  989  * @skb: socket buffer to add attribute to
  990  * @attrtype: attribute type
  991  * @addr: IPv4 address
  992  */
  993 static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
  994 				  __be32 addr)
  995 {
  996 	return nla_put_be32(skb, attrtype, addr);
  997 }
  998 
  999 /**
 1000  * nla_put_in6_addr - Add an IPv6 address netlink attribute to a socket
 1001  * buffer
 1002  * @skb: socket buffer to add attribute to
 1003  * @attrtype: attribute type
 1004  * @addr: IPv6 address
 1005  */
 1006 static inline int nla_put_in6_addr(struct sk_buff *skb, int attrtype,
 1007 				   const struct in6_addr *addr)
 1008 {
 1009 	return nla_put(skb, attrtype, sizeof(*addr), addr);
 1010 }
 1011 
 1012 /**
 1013  * nla_get_u32 - return payload of u32 attribute
 1014  * @nla: u32 netlink attribute
 1015  */
 1016 static inline u32 nla_get_u32(const struct nlattr *nla)
 1017 {
 1018 	return *(u32 *) nla_data(nla);
 1019 }
 1020 
 1021 /**
 1022  * nla_get_be32 - return payload of __be32 attribute
 1023  * @nla: __be32 netlink attribute
 1024  */
 1025 static inline __be32 nla_get_be32(const struct nlattr *nla)
 1026 {
 1027 	return *(__be32 *) nla_data(nla);
 1028 }
 1029 
 1030 /**
 1031  * nla_get_le32 - return payload of __le32 attribute
 1032  * @nla: __le32 netlink attribute
 1033  */
 1034 static inline __le32 nla_get_le32(const struct nlattr *nla)
 1035 {
 1036 	return *(__le32 *) nla_data(nla);
 1037 }
 1038 
 1039 /**
 1040  * nla_get_u16 - return payload of u16 attribute
 1041  * @nla: u16 netlink attribute
 1042  */
 1043 static inline u16 nla_get_u16(const struct nlattr *nla)
 1044 {
 1045 	return *(u16 *) nla_data(nla);
 1046 }
 1047 
 1048 /**
 1049  * nla_get_be16 - return payload of __be16 attribute
 1050  * @nla: __be16 netlink attribute
 1051  */
 1052 static inline __be16 nla_get_be16(const struct nlattr *nla)
 1053 {
 1054 	return *(__be16 *) nla_data(nla);
 1055 }
 1056 
 1057 /**
 1058  * nla_get_le16 - return payload of __le16 attribute
 1059  * @nla: __le16 netlink attribute
 1060  */
 1061 static inline __le16 nla_get_le16(const struct nlattr *nla)
 1062 {
 1063 	return *(__le16 *) nla_data(nla);
 1064 }
 1065 
 1066 /**
 1067  * nla_get_u8 - return payload of u8 attribute
 1068  * @nla: u8 netlink attribute
 1069  */
 1070 static inline u8 nla_get_u8(const struct nlattr *nla)
 1071 {
 1072 	return *(u8 *) nla_data(nla);
 1073 }
 1074 
 1075 /**
 1076  * nla_get_u64 - return payload of u64 attribute
 1077  * @nla: u64 netlink attribute
 1078  */
 1079 static inline u64 nla_get_u64(const struct nlattr *nla)
 1080 {
 1081 	u64 tmp;
 1082 
 1083 	nla_memcpy(&tmp, nla, sizeof(tmp));
 1084 
 1085 	return tmp;
 1086 }
 1087 
 1088 /**
 1089  * nla_get_be64 - return payload of __be64 attribute
 1090  * @nla: __be64 netlink attribute
 1091  */
 1092 static inline __be64 nla_get_be64(const struct nlattr *nla)
 1093 {
 1094 	__be64 tmp;
 1095 
 1096 	nla_memcpy(&tmp, nla, sizeof(tmp));
 1097 
 1098 	return tmp;
 1099 }
 1100 
 1101 /**
 1102  * nla_get_le64 - return payload of __le64 attribute
 1103  * @nla: __le64 netlink attribute
 1104  */
 1105 static inline __le64 nla_get_le64(const struct nlattr *nla)
 1106 {
 1107 	return *(__le64 *) nla_data(nla);
 1108 }
 1109 
 1110 /**
 1111  * nla_get_s32 - return payload of s32 attribute
 1112  * @nla: s32 netlink attribute
 1113  */
 1114 static inline s32 nla_get_s32(const struct nlattr *nla)
 1115 {
 1116 	return *(s32 *) nla_data(nla);
 1117 }
 1118 
 1119 /**
 1120  * nla_get_s16 - return payload of s16 attribute
 1121  * @nla: s16 netlink attribute
 1122  */
 1123 static inline s16 nla_get_s16(const struct nlattr *nla)
 1124 {
 1125 	return *(s16 *) nla_data(nla);
 1126 }
 1127 
 1128 /**
 1129  * nla_get_s8 - return payload of s8 attribute
 1130  * @nla: s8 netlink attribute
 1131  */
 1132 static inline s8 nla_get_s8(const struct nlattr *nla)
 1133 {
 1134 	return *(s8 *) nla_data(nla);
 1135 }
 1136 
 1137 /**
 1138  * nla_get_s64 - return payload of s64 attribute
 1139  * @nla: s64 netlink attribute
 1140  */
 1141 static inline s64 nla_get_s64(const struct nlattr *nla)
 1142 {
 1143 	s64 tmp;
 1144 
 1145 	nla_memcpy(&tmp, nla, sizeof(tmp));
 1146 
 1147 	return tmp;
 1148 }
 1149 
 1150 /**
 1151  * nla_get_flag - return payload of flag attribute
 1152  * @nla: flag netlink attribute
 1153  */
 1154 static inline int nla_get_flag(const struct nlattr *nla)
 1155 {
 1156 	return !!nla;
 1157 }
 1158 
 1159 /**
 1160  * nla_get_msecs - return payload of msecs attribute
 1161  * @nla: msecs netlink attribute
 1162  *
 1163  * Returns the number of milliseconds in jiffies.
 1164  */
 1165 static inline unsigned long nla_get_msecs(const struct nlattr *nla)
 1166 {
 1167 	u64 msecs = nla_get_u64(nla);
 1168 
 1169 	return msecs_to_jiffies((unsigned long) msecs);
 1170 }
 1171 
 1172 /**
 1173  * nla_get_in_addr - return payload of IPv4 address attribute
 1174  * @nla: IPv4 address netlink attribute
 1175  */
 1176 static inline __be32 nla_get_in_addr(const struct nlattr *nla)
 1177 {
 1178 	return *(__be32 *) nla_data(nla);
 1179 }
 1180 
 1181 /**
 1182  * nla_get_in6_addr - return payload of IPv6 address attribute
 1183  * @nla: IPv6 address netlink attribute
 1184  */
 1185 static inline struct in6_addr nla_get_in6_addr(const struct nlattr *nla)
 1186 {
 1187 	struct in6_addr tmp;
 1188 
 1189 	nla_memcpy(&tmp, nla, sizeof(tmp));
 1190 	return tmp;
 1191 }
 1192 
 1193 /**
 1194  * nla_nest_start - Start a new level of nested attributes
 1195  * @skb: socket buffer to add attributes to
 1196  * @attrtype: attribute type of container
 1197  *
 1198  * Returns the container attribute
 1199  */
 1200 static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
 1201 {
 1202 	struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb);
 1203 
 1204 	if (nla_put(skb, attrtype, 0, NULL) < 0)
 1205 		return NULL;
 1206 
 1207 	return start;
 1208 }
 1209 
 1210 /**
 1211  * nla_nest_end - Finalize nesting of attributes
 1212  * @skb: socket buffer the attributes are stored in
 1213  * @start: container attribute
 1214  *
 1215  * Corrects the container attribute header to include the all
 1216  * appeneded attributes.
 1217  *
 1218  * Returns the total data length of the skb.
 1219  */
 1220 static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start)
 1221 {
 1222 	start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start;
 1223 	return skb->len;
 1224 }
 1225 
 1226 /**
 1227  * nla_nest_cancel - Cancel nesting of attributes
 1228  * @skb: socket buffer the message is stored in
 1229  * @start: container attribute
 1230  *
 1231  * Removes the container attribute and including all nested
 1232  * attributes. Returns -EMSGSIZE
 1233  */
 1234 static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
 1235 {
 1236 	nlmsg_trim(skb, start);
 1237 }
 1238 
 1239 /**
 1240  * nla_validate_nested - Validate a stream of nested attributes
 1241  * @start: container attribute
 1242  * @maxtype: maximum attribute type to be expected
 1243  * @policy: validation policy
 1244  *
 1245  * Validates all attributes in the nested attribute stream against the
 1246  * specified policy. Attributes with a type exceeding maxtype will be
 1247  * ignored. See documenation of struct nla_policy for more details.
 1248  *
 1249  * Returns 0 on success or a negative error code.
 1250  */
 1251 static inline int nla_validate_nested(const struct nlattr *start, int maxtype,
 1252 				      const struct nla_policy *policy)
 1253 {
 1254 	return nla_validate(nla_data(start), nla_len(start), maxtype, policy);
 1255 }
 1256 
 1257 /**
 1258  * nla_need_padding_for_64bit - test 64-bit alignment of the next attribute
 1259  * @skb: socket buffer the message is stored in
 1260  *
 1261  * Return true if padding is needed to align the next attribute (nla_data()) to
 1262  * a 64-bit aligned area.
 1263  */
 1264 static inline bool nla_need_padding_for_64bit(struct sk_buff *skb)
 1265 {
 1266 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 1267 	/* The nlattr header is 4 bytes in size, that's why we test
 1268 	 * if the skb->data _is_ aligned.  A NOP attribute, plus
 1269 	 * nlattr header for next attribute, will make nla_data()
 1270 	 * 8-byte aligned.
 1271 	 */
 1272 	if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8))
 1273 		return true;
 1274 #endif
 1275 	return false;
 1276 }
 1277 
 1278 /**
 1279  * nla_align_64bit - 64-bit align the nla_data() of next attribute
 1280  * @skb: socket buffer the message is stored in
 1281  * @padattr: attribute type for the padding
 1282  *
 1283  * Conditionally emit a padding netlink attribute in order to make
 1284  * the next attribute we emit have a 64-bit aligned nla_data() area.
 1285  * This will only be done in architectures which do not have
 1286  * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS defined.
 1287  *
 1288  * Returns zero on success or a negative error code.
 1289  */
 1290 static inline int nla_align_64bit(struct sk_buff *skb, int padattr)
 1291 {
 1292 	if (nla_need_padding_for_64bit(skb) &&
 1293 	    !nla_reserve(skb, padattr, 0))
 1294 		return -EMSGSIZE;
 1295 
 1296 	return 0;
 1297 }
 1298 
 1299 /**
 1300  * nla_total_size_64bit - total length of attribute including padding
 1301  * @payload: length of payload
 1302  */
 1303 static inline int nla_total_size_64bit(int payload)
 1304 {
 1305 	return NLA_ALIGN(nla_attr_size(payload))
 1306 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 1307 		+ NLA_ALIGN(nla_attr_size(0))
 1308 #endif
 1309 		;
 1310 }
 1311 
 1312 /**
 1313  * nla_for_each_attr - iterate over a stream of attributes
 1314  * @pos: loop counter, set to current attribute
 1315  * @head: head of attribute stream
 1316  * @len: length of attribute stream
 1317  * @rem: initialized to len, holds bytes currently remaining in stream
 1318  */
 1319 #define nla_for_each_attr(pos, head, len, rem) \
 1320 	for (pos = head, rem = len; \
 1321 	     nla_ok(pos, rem); \
 1322 	     pos = nla_next(pos, &(rem)))
 1323 
 1324 /**
 1325  * nla_for_each_nested - iterate over nested attributes
 1326  * @pos: loop counter, set to current attribute
 1327  * @nla: attribute containing the nested attributes
 1328  * @rem: initialized to len, holds bytes currently remaining in stream
 1329  */
 1330 #define nla_for_each_nested(pos, nla, rem) \
 1331 	nla_for_each_attr(pos, nla_data(nla), nla_len(nla), rem)
 1332 
 1333 /**
 1334  * nla_is_last - Test if attribute is last in stream
 1335  * @nla: attribute to test
 1336  * @rem: bytes remaining in stream
 1337  */
 1338 static inline bool nla_is_last(const struct nlattr *nla, int rem)
 1339 {
 1340 	return nla->nla_len == rem;
 1341 }
 1342 
 1343 #endif                 1 /*
    2  * generic net pointers
    3  */
    4 
    5 #ifndef __NET_GENERIC_H__
    6 #define __NET_GENERIC_H__
    7 
    8 #include <linux/bug.h>
    9 #include <linux/rcupdate.h>
   10 
   11 /*
   12  * Generic net pointers are to be used by modules to put some private
   13  * stuff on the struct net without explicit struct net modification
   14  *
   15  * The rules are simple:
   16  * 1. set pernet_operations->id.  After register_pernet_device you
   17  *    will have the id of your private pointer.
   18  * 2. set pernet_operations->size to have the code allocate and free
   19  *    a private structure pointed to from struct net.
   20  * 3. do not change this pointer while the net is alive;
   21  * 4. do not try to have any private reference on the net_generic object.
   22  *
   23  * After accomplishing all of the above, the private pointer can be
   24  * accessed with the net_generic() call.
   25  */
   26 
   27 struct net_generic {
   28 	unsigned int len;
   29 	struct rcu_head rcu;
   30 
   31 	void *ptr[0];
   32 };
   33 
   34 static inline void *net_generic(const struct net *net, int id)
   35 {
   36 	struct net_generic *ng;
   37 	void *ptr;
   38 
   39 	rcu_read_lock();
   40 	ng = rcu_dereference(net->gen);
   41 	ptr = ng->ptr[id - 1];
   42 	rcu_read_unlock();
   43 
   44 	return ptr;
   45 }
   46 #endif            | 
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
| Ядро | Модуль | Правило | Верификатор | Вердикт | Статус | Время создания | Описание проблемы | 
| linux-4.7-rc1.tar.xz | net/sched/act_ife.ko | 43_1a | CPAchecker | Bug | Fixed | 2016-06-16 23:56:32 | L0228 | 
Комментарий
reported: 16 Jun 2016
[В начало]