Bug
        
                          [В начало]
Ошибка # 159
Показать/спрятать трассу ошибок|            Error trace     
         {    19     typedef signed char __s8;    20     typedef unsigned char __u8;    23     typedef unsigned short __u16;    25     typedef int __s32;    26     typedef unsigned int __u32;    29     typedef long long __s64;    30     typedef unsigned long long __u64;    15     typedef signed char s8;    16     typedef unsigned char u8;    18     typedef short s16;    19     typedef unsigned short u16;    21     typedef int s32;    22     typedef unsigned int u32;    24     typedef long long s64;    25     typedef unsigned long long u64;    14     typedef long __kernel_long_t;    15     typedef unsigned long __kernel_ulong_t;    27     typedef int __kernel_pid_t;    48     typedef unsigned int __kernel_uid32_t;    49     typedef unsigned int __kernel_gid32_t;    71     typedef __kernel_ulong_t __kernel_size_t;    72     typedef __kernel_long_t __kernel_ssize_t;    87     typedef long long __kernel_loff_t;    88     typedef __kernel_long_t __kernel_time_t;    89     typedef __kernel_long_t __kernel_clock_t;    90     typedef int __kernel_timer_t;    91     typedef int __kernel_clockid_t;    33     typedef __u16 __be16;    35     typedef __u32 __be32;    39     typedef __u16 __sum16;    40     typedef __u32 __wsum;   280     struct kernel_symbol {   unsigned long value;   const char *name; } ;    34     struct module ;    12     typedef __u32 __kernel_dev_t;    15     typedef __kernel_dev_t dev_t;    18     typedef unsigned short umode_t;    21     typedef __kernel_pid_t pid_t;    26     typedef __kernel_clockid_t clockid_t;    29     typedef _Bool bool;    31     typedef __kernel_uid32_t uid_t;    32     typedef __kernel_gid32_t gid_t;    45     typedef __kernel_loff_t loff_t;    54     typedef __kernel_size_t size_t;    59     typedef __kernel_ssize_t ssize_t;    69     typedef __kernel_time_t time_t;   102     typedef __s32 int32_t;   106     typedef __u8 uint8_t;   108     typedef __u32 uint32_t;   111     typedef __u64 uint64_t;   133     typedef unsigned long sector_t;   134     typedef unsigned long blkcnt_t;   152     typedef u64 dma_addr_t;   157     typedef unsigned int gfp_t;   158     typedef unsigned int fmode_t;   161     typedef u64 phys_addr_t;   166     typedef phys_addr_t resource_size_t;   172     typedef unsigned long irq_hw_number_t;   176     struct __anonstruct_atomic_t_6 {   int counter; } ;   176     typedef struct __anonstruct_atomic_t_6 atomic_t;   181     struct __anonstruct_atomic64_t_7 {   long counter; } ;   181     typedef struct __anonstruct_atomic64_t_7 atomic64_t;   182     struct list_head {   struct list_head *next;   struct list_head *prev; } ;   187     struct hlist_node ;   187     struct hlist_head {   struct hlist_node *first; } ;   191     struct hlist_node {   struct hlist_node *next;   struct hlist_node **pprev; } ;   202     struct callback_head {   struct callback_head *next;   void (*func)(struct callback_head *); } ;   115     typedef void (*ctor_fn_t)();    68     struct ctl_table ;   259     struct _ddebug {   const char *modname;   const char *function;   const char *filename;   const char *format;   unsigned int lineno;   unsigned char flags; } ;    58     struct device ;    64     struct net_device ;   450     struct file_operations ;   462     struct completion ;   463     struct pt_regs ;   557     struct task_struct ;    27     union __anonunion___u_9 {   struct list_head *__val;   char __c[1U]; } ;    20     struct qspinlock {   atomic_t val; } ;    33     typedef struct qspinlock arch_spinlock_t;    34     struct qrwlock {   atomic_t cnts;   arch_spinlock_t wait_lock; } ;    14     typedef struct qrwlock arch_rwlock_t;    15     struct lockdep_map ;    26     union __anonunion___u_25 {   int __val;   char __c[1U]; } ;    23     typedef atomic64_t atomic_long_t;   242     struct bug_entry {   int bug_addr_disp;   int file_disp;   unsigned short line;   unsigned short flags; } ;    55     struct stack_trace {   unsigned int nr_entries;   unsigned int max_entries;   unsigned long *entries;   int skip; } ;    28     struct lockdep_subclass_key {   char __one_byte; } ;    53     struct lock_class_key {   struct lockdep_subclass_key subkeys[8U]; } ;    59     struct lock_class {   struct hlist_node hash_entry;   struct list_head lock_entry;   struct lockdep_subclass_key *key;   unsigned int subclass;   unsigned int dep_gen_id;   unsigned long usage_mask;   struct stack_trace usage_traces[13U];   struct list_head locks_after;   struct list_head locks_before;   unsigned int version;   unsigned long ops;   const char *name;   int name_version;   unsigned long contention_point[4U];   unsigned long contending_point[4U]; } ;   144     struct lockdep_map {   struct lock_class_key *key;   struct lock_class *class_cache[2U];   const char *name;   int cpu;   unsigned long ip; } ;   207     struct held_lock {   u64 prev_chain_key;   unsigned long acquire_ip;   struct lockdep_map *instance;   struct lockdep_map *nest_lock;   u64 waittime_stamp;   u64 holdtime_stamp;   unsigned short class_idx;   unsigned char irq_context;   unsigned char trylock;   unsigned char read;   unsigned char check;   unsigned char hardirqs_off;   unsigned short references;   unsigned int pin_count; } ;   572     struct raw_spinlock {   arch_spinlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    32     typedef struct raw_spinlock raw_spinlock_t;    33     struct __anonstruct____missing_field_name_37 {   u8 __padding[24U];   struct lockdep_map dep_map; } ;    33     union __anonunion____missing_field_name_36 {   struct raw_spinlock rlock;   struct __anonstruct____missing_field_name_37 __annonCompField4; } ;    33     struct spinlock {   union __anonunion____missing_field_name_36 __annonCompField5; } ;    76     typedef struct spinlock spinlock_t;    23     struct __anonstruct_rwlock_t_38 {   arch_rwlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    23     typedef struct __anonstruct_rwlock_t_38 rwlock_t;    23     struct mm_struct ;    72     struct pt_regs {   unsigned long r15;   unsigned long r14;   unsigned long r13;   unsigned long r12;   unsigned long bp;   unsigned long bx;   unsigned long r11;   unsigned long r10;   unsigned long r9;   unsigned long r8;   unsigned long ax;   unsigned long cx;   unsigned long dx;   unsigned long si;   unsigned long di;   unsigned long orig_ax;   unsigned long ip;   unsigned long cs;   unsigned long flags;   unsigned long sp;   unsigned long ss; } ;    66     struct __anonstruct____missing_field_name_40 {   unsigned int a;   unsigned int b; } ;    66     struct __anonstruct____missing_field_name_41 {   u16 limit0;   u16 base0;   unsigned char base1;   unsigned char type;   unsigned char s;   unsigned char dpl;   unsigned char p;   unsigned char limit;   unsigned char avl;   unsigned char l;   unsigned char d;   unsigned char g;   unsigned char base2; } ;    66     union __anonunion____missing_field_name_39 {   struct __anonstruct____missing_field_name_40 __annonCompField6;   struct __anonstruct____missing_field_name_41 __annonCompField7; } ;    66     struct desc_struct {   union __anonunion____missing_field_name_39 __annonCompField8; } ;    13     typedef unsigned long pteval_t;    14     typedef unsigned long pmdval_t;    16     typedef unsigned long pgdval_t;    17     typedef unsigned long pgprotval_t;    19     struct __anonstruct_pte_t_42 {   pteval_t pte; } ;    19     typedef struct __anonstruct_pte_t_42 pte_t;    21     struct pgprot {   pgprotval_t pgprot; } ;   256     typedef struct pgprot pgprot_t;   258     struct __anonstruct_pgd_t_43 {   pgdval_t pgd; } ;   258     typedef struct __anonstruct_pgd_t_43 pgd_t;   297     struct __anonstruct_pmd_t_45 {   pmdval_t pmd; } ;   297     typedef struct __anonstruct_pmd_t_45 pmd_t;   423     struct page ;   423     typedef struct page *pgtable_t;   434     struct file ;   445     struct seq_file ;   481     struct thread_struct ;   483     struct cpumask ;   484     struct paravirt_callee_save {   void *func; } ;   181     struct pv_irq_ops {   struct paravirt_callee_save save_fl;   struct paravirt_callee_save restore_fl;   struct paravirt_callee_save irq_disable;   struct paravirt_callee_save irq_enable;   void (*safe_halt)();   void (*halt)();   void (*adjust_exception_frame)(); } ;   247     struct math_emu_info {   long ___orig_eip;   struct pt_regs *regs; } ;    83     struct static_key {   atomic_t enabled; } ;   359     struct cpumask {   unsigned long bits[128U]; } ;    15     typedef struct cpumask cpumask_t;   654     typedef struct cpumask *cpumask_var_t;    22     struct tracepoint_func {   void *func;   void *data;   int prio; } ;    28     struct tracepoint {   const char *name;   struct static_key key;   void (*regfunc)();   void (*unregfunc)();   struct tracepoint_func *funcs; } ;   246     struct fregs_state {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u32 status; } ;    26     struct __anonstruct____missing_field_name_58 {   u64 rip;   u64 rdp; } ;    26     struct __anonstruct____missing_field_name_59 {   u32 fip;   u32 fcs;   u32 foo;   u32 fos; } ;    26     union __anonunion____missing_field_name_57 {   struct __anonstruct____missing_field_name_58 __annonCompField14;   struct __anonstruct____missing_field_name_59 __annonCompField15; } ;    26     union __anonunion____missing_field_name_60 {   u32 padding1[12U];   u32 sw_reserved[12U]; } ;    26     struct fxregs_state {   u16 cwd;   u16 swd;   u16 twd;   u16 fop;   union __anonunion____missing_field_name_57 __annonCompField16;   u32 mxcsr;   u32 mxcsr_mask;   u32 st_space[32U];   u32 xmm_space[64U];   u32 padding[12U];   union __anonunion____missing_field_name_60 __annonCompField17; } ;    66     struct swregs_state {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u8 ftop;   u8 changed;   u8 lookahead;   u8 no_update;   u8 rm;   u8 alimit;   struct math_emu_info *info;   u32 entry_eip; } ;   227     struct xstate_header {   u64 xfeatures;   u64 xcomp_bv;   u64 reserved[6U]; } ;   233     struct xregs_state {   struct fxregs_state i387;   struct xstate_header header;   u8 extended_state_area[0U]; } ;   254     union fpregs_state {   struct fregs_state fsave;   struct fxregs_state fxsave;   struct swregs_state soft;   struct xregs_state xsave;   u8 __padding[4096U]; } ;   271     struct fpu {   unsigned int last_cpu;   unsigned char fpstate_active;   unsigned char fpregs_active;   unsigned char counter;   union fpregs_state state; } ;   169     struct seq_operations ;   372     struct perf_event ;   377     struct __anonstruct_mm_segment_t_72 {   unsigned long seg; } ;   377     typedef struct __anonstruct_mm_segment_t_72 mm_segment_t;   378     struct thread_struct {   struct desc_struct tls_array[3U];   unsigned long sp0;   unsigned long sp;   unsigned short es;   unsigned short ds;   unsigned short fsindex;   unsigned short gsindex;   u32 status;   unsigned long fsbase;   unsigned long gsbase;   struct perf_event *ptrace_bps[4U];   unsigned long debugreg6;   unsigned long ptrace_dr7;   unsigned long cr2;   unsigned long trap_nr;   unsigned long error_code;   unsigned long *io_bitmap_ptr;   unsigned long iopl;   unsigned int io_bitmap_max;   mm_segment_t addr_limit;   unsigned char sig_on_uaccess_err;   unsigned char uaccess_err;   struct fpu fpu; } ;    13     struct optimistic_spin_queue {   atomic_t tail; } ;    39     struct mutex {   atomic_t count;   spinlock_t wait_lock;   struct list_head wait_list;   struct task_struct *owner;   void *magic;   struct lockdep_map dep_map; } ;    67     struct mutex_waiter {   struct list_head list;   struct task_struct *task;   void *magic; } ;   177     struct timespec ;   178     struct compat_timespec ;   179     struct thread_info {   unsigned long flags; } ;    20     struct __anonstruct_futex_74 {   u32 *uaddr;   u32 val;   u32 flags;   u32 bitset;   u64 time;   u32 *uaddr2; } ;    20     struct __anonstruct_nanosleep_75 {   clockid_t clockid;   struct timespec *rmtp;   struct compat_timespec *compat_rmtp;   u64 expires; } ;    20     struct pollfd ;    20     struct __anonstruct_poll_76 {   struct pollfd *ufds;   int nfds;   int has_timeout;   unsigned long tv_sec;   unsigned long tv_nsec; } ;    20     union __anonunion____missing_field_name_73 {   struct __anonstruct_futex_74 futex;   struct __anonstruct_nanosleep_75 nanosleep;   struct __anonstruct_poll_76 poll; } ;    20     struct restart_block {   long int (*fn)(struct restart_block *);   union __anonunion____missing_field_name_73 __annonCompField20; } ;   416     struct rw_semaphore ;   417     struct rw_semaphore {   atomic_long_t count;   struct list_head wait_list;   raw_spinlock_t wait_lock;   struct optimistic_spin_queue osq;   struct task_struct *owner;   struct lockdep_map dep_map; } ;   178     struct seqcount {   unsigned int sequence;   struct lockdep_map dep_map; } ;    52     typedef struct seqcount seqcount_t;   407     struct __anonstruct_seqlock_t_91 {   struct seqcount seqcount;   spinlock_t lock; } ;   407     typedef struct __anonstruct_seqlock_t_91 seqlock_t;    38     struct __wait_queue_head {   spinlock_t lock;   struct list_head task_list; } ;    43     typedef struct __wait_queue_head wait_queue_head_t;  1225     struct completion {   unsigned int done;   wait_queue_head_t wait; } ;   108     struct timespec {   __kernel_time_t tv_sec;   long tv_nsec; } ;     7     typedef __s64 time64_t;   450     union ktime {   s64 tv64; } ;    41     typedef union ktime ktime_t;  1145     struct timer_list {   struct hlist_node entry;   unsigned long expires;   void (*function)(unsigned long);   unsigned long data;   u32 flags;   int start_pid;   void *start_site;   char start_comm[16U];   struct lockdep_map lockdep_map; } ;   254     struct hrtimer ;   255     enum hrtimer_restart ;   256     struct rb_node {   unsigned long __rb_parent_color;   struct rb_node *rb_right;   struct rb_node *rb_left; } ;    41     struct rb_root {   struct rb_node *rb_node; } ;    83     struct user_namespace ;    22     struct __anonstruct_kuid_t_96 {   uid_t val; } ;    22     typedef struct __anonstruct_kuid_t_96 kuid_t;    27     struct __anonstruct_kgid_t_97 {   gid_t val; } ;    27     typedef struct __anonstruct_kgid_t_97 kgid_t;   835     struct nsproxy ;   836     struct ctl_table_root ;   837     struct ctl_table_header ;   838     struct ctl_dir ;    39     typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *);    61     struct ctl_table_poll {   atomic_t event;   wait_queue_head_t wait; } ;   100     struct ctl_table {   const char *procname;   void *data;   int maxlen;   umode_t mode;   struct ctl_table *child;   proc_handler *proc_handler;   struct ctl_table_poll *poll;   void *extra1;   void *extra2; } ;   121     struct ctl_node {   struct rb_node node;   struct ctl_table_header *header; } ;   126     struct __anonstruct____missing_field_name_99 {   struct ctl_table *ctl_table;   int used;   int count;   int nreg; } ;   126     union __anonunion____missing_field_name_98 {   struct __anonstruct____missing_field_name_99 __annonCompField21;   struct callback_head rcu; } ;   126     struct ctl_table_set ;   126     struct ctl_table_header {   union __anonunion____missing_field_name_98 __annonCompField22;   struct completion *unregistering;   struct ctl_table *ctl_table_arg;   struct ctl_table_root *root;   struct ctl_table_set *set;   struct ctl_dir *parent;   struct ctl_node *node; } ;   147     struct ctl_dir {   struct ctl_table_header header;   struct rb_root root; } ;   153     struct ctl_table_set {   int (*is_seen)(struct ctl_table_set *);   struct ctl_dir dir; } ;   158     struct ctl_table_root {   struct ctl_table_set default_set;   struct ctl_table_set * (*lookup)(struct ctl_table_root *);   void (*set_ownership)(struct ctl_table_header *, struct ctl_table *, kuid_t *, kgid_t *);   int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ;   278     struct workqueue_struct ;   279     struct work_struct ;    54     struct work_struct {   atomic_long_t data;   struct list_head entry;   void (*func)(struct work_struct *);   struct lockdep_map lockdep_map; } ;   107     struct delayed_work {   struct work_struct work;   struct timer_list timer;   struct workqueue_struct *wq;   int cpu; } ;   268     struct notifier_block ;    53     struct notifier_block {   int (*notifier_call)(struct notifier_block *, unsigned long, void *);   struct notifier_block *next;   int priority; } ;   215     struct clk ;   503     struct device_node ;   135     struct kstat {   u64 ino;   dev_t dev;   umode_t mode;   unsigned int nlink;   kuid_t uid;   kgid_t gid;   dev_t rdev;   loff_t size;   struct timespec atime;   struct timespec mtime;   struct timespec ctime;   unsigned long blksize;   unsigned long long blocks; } ;    36     struct vm_area_struct ;    97     struct __anonstruct_nodemask_t_100 {   unsigned long bits[16U]; } ;    97     typedef struct __anonstruct_nodemask_t_100 nodemask_t;   249     typedef unsigned int isolate_mode_t;   777     struct resource ;    66     struct resource {   resource_size_t start;   resource_size_t end;   const char *name;   unsigned long flags;   unsigned long desc;   struct resource *parent;   struct resource *sibling;   struct resource *child; } ;    58     struct pm_message {   int event; } ;    64     typedef struct pm_message pm_message_t;    65     struct dev_pm_ops {   int (*prepare)(struct device *);   void (*complete)(struct device *);   int (*suspend)(struct device *);   int (*resume)(struct device *);   int (*freeze)(struct device *);   int (*thaw)(struct device *);   int (*poweroff)(struct device *);   int (*restore)(struct device *);   int (*suspend_late)(struct device *);   int (*resume_early)(struct device *);   int (*freeze_late)(struct device *);   int (*thaw_early)(struct device *);   int (*poweroff_late)(struct device *);   int (*restore_early)(struct device *);   int (*suspend_noirq)(struct device *);   int (*resume_noirq)(struct device *);   int (*freeze_noirq)(struct device *);   int (*thaw_noirq)(struct device *);   int (*poweroff_noirq)(struct device *);   int (*restore_noirq)(struct device *);   int (*runtime_suspend)(struct device *);   int (*runtime_resume)(struct device *);   int (*runtime_idle)(struct device *); } ;   320     enum rpm_status {   RPM_ACTIVE = 0,   RPM_RESUMING = 1,   RPM_SUSPENDED = 2,   RPM_SUSPENDING = 3 } ;   327     enum rpm_request {   RPM_REQ_NONE = 0,   RPM_REQ_IDLE = 1,   RPM_REQ_SUSPEND = 2,   RPM_REQ_AUTOSUSPEND = 3,   RPM_REQ_RESUME = 4 } ;   335     struct wakeup_source ;   336     struct wake_irq ;   337     struct pm_domain_data ;   338     struct pm_subsys_data {   spinlock_t lock;   unsigned int refcount;   struct list_head clock_list;   struct pm_domain_data *domain_data; } ;   556     struct dev_pm_qos ;   556     struct dev_pm_info {   pm_message_t power_state;   unsigned char can_wakeup;   unsigned char async_suspend;   bool is_prepared;   bool is_suspended;   bool is_noirq_suspended;   bool is_late_suspended;   bool early_init;   bool direct_complete;   spinlock_t lock;   struct list_head entry;   struct completion completion;   struct wakeup_source *wakeup;   bool wakeup_path;   bool syscore;   bool no_pm_callbacks;   struct timer_list suspend_timer;   unsigned long timer_expires;   struct work_struct work;   wait_queue_head_t wait_queue;   struct wake_irq *wakeirq;   atomic_t usage_count;   atomic_t child_count;   unsigned char disable_depth;   unsigned char idle_notification;   unsigned char request_pending;   unsigned char deferred_resume;   unsigned char run_wake;   unsigned char runtime_auto;   bool ignore_children;   unsigned char no_callbacks;   unsigned char irq_safe;   unsigned char use_autosuspend;   unsigned char timer_autosuspends;   unsigned char memalloc_noio;   enum rpm_request request;   enum rpm_status runtime_status;   int runtime_error;   int autosuspend_delay;   unsigned long last_busy;   unsigned long active_jiffies;   unsigned long suspended_jiffies;   unsigned long accounting_timestamp;   struct pm_subsys_data *subsys_data;   void (*set_latency_tolerance)(struct device *, s32 );   struct dev_pm_qos *qos; } ;   616     struct dev_pm_domain {   struct dev_pm_ops ops;   void (*detach)(struct device *, bool );   int (*activate)(struct device *);   void (*sync)(struct device *);   void (*dismiss)(struct device *); } ;    34     struct ldt_struct ;    34     struct vdso_image ;    34     struct __anonstruct_mm_context_t_165 {   struct ldt_struct *ldt;   unsigned short ia32_compat;   struct mutex lock;   void *vdso;   const struct vdso_image *vdso_image;   atomic_t perf_rdpmc_allowed;   u16 pkey_allocation_map;   s16 execute_only_pkey; } ;    34     typedef struct __anonstruct_mm_context_t_165 mm_context_t;    22     struct bio_vec ;  1290     struct llist_node ;    64     struct llist_node {   struct llist_node *next; } ;    37     struct cred ;    19     struct inode ;    58     struct arch_uprobe_task {   unsigned long saved_scratch_register;   unsigned int saved_trap_nr;   unsigned int saved_tf; } ;    66     enum uprobe_task_state {   UTASK_RUNNING = 0,   UTASK_SSTEP = 1,   UTASK_SSTEP_ACK = 2,   UTASK_SSTEP_TRAPPED = 3 } ;    73     struct __anonstruct____missing_field_name_211 {   struct arch_uprobe_task autask;   unsigned long vaddr; } ;    73     struct __anonstruct____missing_field_name_212 {   struct callback_head dup_xol_work;   unsigned long dup_xol_addr; } ;    73     union __anonunion____missing_field_name_210 {   struct __anonstruct____missing_field_name_211 __annonCompField35;   struct __anonstruct____missing_field_name_212 __annonCompField36; } ;    73     struct uprobe ;    73     struct return_instance ;    73     struct uprobe_task {   enum uprobe_task_state state;   union __anonunion____missing_field_name_210 __annonCompField37;   struct uprobe *active_uprobe;   unsigned long xol_vaddr;   struct return_instance *return_instances;   unsigned int depth; } ;    94     struct return_instance {   struct uprobe *uprobe;   unsigned long func;   unsigned long stack;   unsigned long orig_ret_vaddr;   bool chained;   struct return_instance *next; } ;   110     struct xol_area ;   111     struct uprobes_state {   struct xol_area *xol_area; } ;   150     struct address_space ;   151     struct mem_cgroup ;   152     union __anonunion____missing_field_name_213 {   struct address_space *mapping;   void *s_mem;   atomic_t compound_mapcount; } ;   152     union __anonunion____missing_field_name_214 {   unsigned long index;   void *freelist; } ;   152     struct __anonstruct____missing_field_name_218 {   unsigned short inuse;   unsigned short objects;   unsigned char frozen; } ;   152     union __anonunion____missing_field_name_217 {   atomic_t _mapcount;   unsigned int active;   struct __anonstruct____missing_field_name_218 __annonCompField40;   int units; } ;   152     struct __anonstruct____missing_field_name_216 {   union __anonunion____missing_field_name_217 __annonCompField41;   atomic_t _refcount; } ;   152     union __anonunion____missing_field_name_215 {   unsigned long counters;   struct __anonstruct____missing_field_name_216 __annonCompField42; } ;   152     struct dev_pagemap ;   152     struct __anonstruct____missing_field_name_220 {   struct page *next;   int pages;   int pobjects; } ;   152     struct __anonstruct____missing_field_name_221 {   unsigned long compound_head;   unsigned int compound_dtor;   unsigned int compound_order; } ;   152     struct __anonstruct____missing_field_name_222 {   unsigned long __pad;   pgtable_t pmd_huge_pte; } ;   152     union __anonunion____missing_field_name_219 {   struct list_head lru;   struct dev_pagemap *pgmap;   struct __anonstruct____missing_field_name_220 __annonCompField44;   struct callback_head callback_head;   struct __anonstruct____missing_field_name_221 __annonCompField45;   struct __anonstruct____missing_field_name_222 __annonCompField46; } ;   152     struct kmem_cache ;   152     union __anonunion____missing_field_name_223 {   unsigned long private;   spinlock_t *ptl;   struct kmem_cache *slab_cache; } ;   152     struct page {   unsigned long flags;   union __anonunion____missing_field_name_213 __annonCompField38;   union __anonunion____missing_field_name_214 __annonCompField39;   union __anonunion____missing_field_name_215 __annonCompField43;   union __anonunion____missing_field_name_219 __annonCompField47;   union __anonunion____missing_field_name_223 __annonCompField48;   struct mem_cgroup *mem_cgroup; } ;   197     struct page_frag {   struct page *page;   __u32 offset;   __u32 size; } ;   282     struct userfaultfd_ctx ;   282     struct vm_userfaultfd_ctx {   struct userfaultfd_ctx *ctx; } ;   289     struct __anonstruct_shared_224 {   struct rb_node rb;   unsigned long rb_subtree_last; } ;   289     struct anon_vma ;   289     struct vm_operations_struct ;   289     struct mempolicy ;   289     struct vm_area_struct {   unsigned long vm_start;   unsigned long vm_end;   struct vm_area_struct *vm_next;   struct vm_area_struct *vm_prev;   struct rb_node vm_rb;   unsigned long rb_subtree_gap;   struct mm_struct *vm_mm;   pgprot_t vm_page_prot;   unsigned long vm_flags;   struct __anonstruct_shared_224 shared;   struct list_head anon_vma_chain;   struct anon_vma *anon_vma;   const struct vm_operations_struct *vm_ops;   unsigned long vm_pgoff;   struct file *vm_file;   void *vm_private_data;   struct mempolicy *vm_policy;   struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;   362     struct core_thread {   struct task_struct *task;   struct core_thread *next; } ;   367     struct core_state {   atomic_t nr_threads;   struct core_thread dumper;   struct completion startup; } ;   381     struct task_rss_stat {   int events;   int count[4U]; } ;   389     struct mm_rss_stat {   atomic_long_t count[4U]; } ;   394     struct kioctx_table ;   395     struct linux_binfmt ;   395     struct mmu_notifier_mm ;   395     struct mm_struct {   struct vm_area_struct *mmap;   struct rb_root mm_rb;   u32 vmacache_seqnum;   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   unsigned long mmap_base;   unsigned long mmap_legacy_base;   unsigned long task_size;   unsigned long highest_vm_end;   pgd_t *pgd;   atomic_t mm_users;   atomic_t mm_count;   atomic_long_t nr_ptes;   atomic_long_t nr_pmds;   int map_count;   spinlock_t page_table_lock;   struct rw_semaphore mmap_sem;   struct list_head mmlist;   unsigned long hiwater_rss;   unsigned long hiwater_vm;   unsigned long total_vm;   unsigned long locked_vm;   unsigned long pinned_vm;   unsigned long data_vm;   unsigned long exec_vm;   unsigned long stack_vm;   unsigned long def_flags;   unsigned long start_code;   unsigned long end_code;   unsigned long start_data;   unsigned long end_data;   unsigned long start_brk;   unsigned long brk;   unsigned long start_stack;   unsigned long arg_start;   unsigned long arg_end;   unsigned long env_start;   unsigned long env_end;   unsigned long saved_auxv[46U];   struct mm_rss_stat rss_stat;   struct linux_binfmt *binfmt;   cpumask_var_t cpu_vm_mask_var;   mm_context_t context;   unsigned long flags;   struct core_state *core_state;   spinlock_t ioctx_lock;   struct kioctx_table *ioctx_table;   struct task_struct *owner;   struct file *exe_file;   struct mmu_notifier_mm *mmu_notifier_mm;   struct cpumask cpumask_allocation;   unsigned long numa_next_scan;   unsigned long numa_scan_offset;   int numa_scan_seq;   bool tlb_flush_pending;   struct uprobes_state uprobes_state;   void *bd_addr;   atomic_long_t hugetlb_usage;   struct work_struct async_put_work; } ;   563     struct vm_fault ;   617     struct vdso_image {   void *data;   unsigned long size;   unsigned long alt;   unsigned long alt_len;   long sym_vvar_start;   long sym_vvar_page;   long sym_hpet_page;   long sym_pvclock_page;   long sym_VDSO32_NOTE_MASK;   long sym___kernel_sigreturn;   long sym___kernel_rt_sigreturn;   long sym___kernel_vsyscall;   long sym_int80_landing_pad; } ;    15     typedef __u64 Elf64_Addr;    16     typedef __u16 Elf64_Half;    18     typedef __u64 Elf64_Off;    20     typedef __u32 Elf64_Word;    21     typedef __u64 Elf64_Xword;   190     struct elf64_sym {   Elf64_Word st_name;   unsigned char st_info;   unsigned char st_other;   Elf64_Half st_shndx;   Elf64_Addr st_value;   Elf64_Xword st_size; } ;   198     typedef struct elf64_sym Elf64_Sym;   219     struct elf64_hdr {   unsigned char e_ident[16U];   Elf64_Half e_type;   Elf64_Half e_machine;   Elf64_Word e_version;   Elf64_Addr e_entry;   Elf64_Off e_phoff;   Elf64_Off e_shoff;   Elf64_Word e_flags;   Elf64_Half e_ehsize;   Elf64_Half e_phentsize;   Elf64_Half e_phnum;   Elf64_Half e_shentsize;   Elf64_Half e_shnum;   Elf64_Half e_shstrndx; } ;   235     typedef struct elf64_hdr Elf64_Ehdr;   314     struct elf64_shdr {   Elf64_Word sh_name;   Elf64_Word sh_type;   Elf64_Xword sh_flags;   Elf64_Addr sh_addr;   Elf64_Off sh_offset;   Elf64_Xword sh_size;   Elf64_Word sh_link;   Elf64_Word sh_info;   Elf64_Xword sh_addralign;   Elf64_Xword sh_entsize; } ;   326     typedef struct elf64_shdr Elf64_Shdr;    53     union __anonunion____missing_field_name_229 {   unsigned long bitmap[4U];   struct callback_head callback_head; } ;    53     struct idr_layer {   int prefix;   int layer;   struct idr_layer *ary[256U];   int count;   union __anonunion____missing_field_name_229 __annonCompField49; } ;    41     struct idr {   struct idr_layer *hint;   struct idr_layer *top;   int layers;   int cur;   spinlock_t lock;   int id_free_cnt;   struct idr_layer *id_free; } ;   124     struct ida_bitmap {   long nr_busy;   unsigned long bitmap[15U]; } ;   167     struct ida {   struct idr idr;   struct ida_bitmap *free_bitmap; } ;   199     struct dentry ;   200     struct iattr ;   201     struct super_block ;   202     struct file_system_type ;   203     struct kernfs_open_node ;   204     struct kernfs_iattrs ;   227     struct kernfs_root ;   227     struct kernfs_elem_dir {   unsigned long subdirs;   struct rb_root children;   struct kernfs_root *root; } ;    85     struct kernfs_node ;    85     struct kernfs_elem_symlink {   struct kernfs_node *target_kn; } ;    89     struct kernfs_ops ;    89     struct kernfs_elem_attr {   const struct kernfs_ops *ops;   struct kernfs_open_node *open;   loff_t size;   struct kernfs_node *notify_next; } ;    96     union __anonunion____missing_field_name_234 {   struct kernfs_elem_dir dir;   struct kernfs_elem_symlink symlink;   struct kernfs_elem_attr attr; } ;    96     struct kernfs_node {   atomic_t count;   atomic_t active;   struct lockdep_map dep_map;   struct kernfs_node *parent;   const char *name;   struct rb_node rb;   const void *ns;   unsigned int hash;   union __anonunion____missing_field_name_234 __annonCompField50;   void *priv;   unsigned short flags;   umode_t mode;   unsigned int ino;   struct kernfs_iattrs *iattr; } ;   138     struct kernfs_syscall_ops {   int (*remount_fs)(struct kernfs_root *, int *, char *);   int (*show_options)(struct seq_file *, struct kernfs_root *);   int (*mkdir)(struct kernfs_node *, const char *, umode_t );   int (*rmdir)(struct kernfs_node *);   int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *);   int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ;   157     struct kernfs_root {   struct kernfs_node *kn;   unsigned int flags;   struct ida ino_ida;   struct kernfs_syscall_ops *syscall_ops;   struct list_head supers;   wait_queue_head_t deactivate_waitq; } ;   173     struct kernfs_open_file {   struct kernfs_node *kn;   struct file *file;   void *priv;   struct mutex mutex;   struct mutex prealloc_mutex;   int event;   struct list_head list;   char *prealloc_buf;   size_t atomic_write_len;   bool mmapped;   const struct vm_operations_struct *vm_ops; } ;   191     struct kernfs_ops {   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   ssize_t  (*read)(struct kernfs_open_file *, char *, size_t , loff_t );   size_t atomic_write_len;   bool prealloc;   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *);   struct lock_class_key lockdep_key; } ;   511     struct sock ;   512     struct kobject ;   513     enum kobj_ns_type {   KOBJ_NS_TYPE_NONE = 0,   KOBJ_NS_TYPE_NET = 1,   KOBJ_NS_TYPES = 2 } ;   519     struct kobj_ns_type_operations {   enum kobj_ns_type type;   bool  (*current_may_mount)();   void * (*grab_current_ns)();   const void * (*netlink_ns)(struct sock *);   const void * (*initial_ns)();   void (*drop_ns)(void *); } ;    59     struct bin_attribute ;    60     struct attribute {   const char *name;   umode_t mode;   bool ignore_lockdep;   struct lock_class_key *key;   struct lock_class_key skey; } ;    37     struct attribute_group {   const char *name;   umode_t  (*is_visible)(struct kobject *, struct attribute *, int);   umode_t  (*is_bin_visible)(struct kobject *, struct bin_attribute *, int);   struct attribute **attrs;   struct bin_attribute **bin_attrs; } ;    92     struct bin_attribute {   struct attribute attr;   size_t size;   void *private;   ssize_t  (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   ssize_t  (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;   165     struct sysfs_ops {   ssize_t  (*show)(struct kobject *, struct attribute *, char *);   ssize_t  (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;   530     struct kref {   atomic_t refcount; } ;    52     struct kset ;    52     struct kobj_type ;    52     struct kobject {   const char *name;   struct list_head entry;   struct kobject *parent;   struct kset *kset;   struct kobj_type *ktype;   struct kernfs_node *sd;   struct kref kref;   struct delayed_work release;   unsigned char state_initialized;   unsigned char state_in_sysfs;   unsigned char state_add_uevent_sent;   unsigned char state_remove_uevent_sent;   unsigned char uevent_suppress; } ;   115     struct kobj_type {   void (*release)(struct kobject *);   const struct sysfs_ops *sysfs_ops;   struct attribute **default_attrs;   const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *);   const void * (*namespace)(struct kobject *); } ;   123     struct kobj_uevent_env {   char *argv[3U];   char *envp[32U];   int envp_idx;   char buf[2048U];   int buflen; } ;   131     struct kset_uevent_ops {   const int (*filter)(struct kset *, struct kobject *);   const const char * (*name)(struct kset *, struct kobject *);   const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;   148     struct kset {   struct list_head list;   spinlock_t list_lock;   struct kobject kobj;   const struct kset_uevent_ops *uevent_ops; } ;   223     struct kernel_param ;   228     struct kernel_param_ops {   unsigned int flags;   int (*set)(const char *, const struct kernel_param *);   int (*get)(char *, const struct kernel_param *);   void (*free)(void *); } ;    62     struct kparam_string ;    62     struct kparam_array ;    62     union __anonunion____missing_field_name_237 {   void *arg;   const struct kparam_string *str;   const struct kparam_array *arr; } ;    62     struct kernel_param {   const char *name;   struct module *mod;   const struct kernel_param_ops *ops;   const u16 perm;   s8 level;   u8 flags;   union __anonunion____missing_field_name_237 __annonCompField51; } ;    83     struct kparam_string {   unsigned int maxlen;   char *string; } ;    89     struct kparam_array {   unsigned int max;   unsigned int elemsize;   unsigned int *num;   const struct kernel_param_ops *ops;   void *elem; } ;   470     struct exception_table_entry ;    24     struct latch_tree_node {   struct rb_node node[2U]; } ;   211     struct mod_arch_specific { } ;    39     struct module_param_attrs ;    39     struct module_kobject {   struct kobject kobj;   struct module *mod;   struct kobject *drivers_dir;   struct module_param_attrs *mp;   struct completion *kobj_completion; } ;    50     struct module_attribute {   struct attribute attr;   ssize_t  (*show)(struct module_attribute *, struct module_kobject *, char *);   ssize_t  (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t );   void (*setup)(struct module *, const char *);   int (*test)(struct module *);   void (*free)(struct module *); } ;   277     enum module_state {   MODULE_STATE_LIVE = 0,   MODULE_STATE_COMING = 1,   MODULE_STATE_GOING = 2,   MODULE_STATE_UNFORMED = 3 } ;   284     struct mod_tree_node {   struct module *mod;   struct latch_tree_node node; } ;   291     struct module_layout {   void *base;   unsigned int size;   unsigned int text_size;   unsigned int ro_size;   unsigned int ro_after_init_size;   struct mod_tree_node mtn; } ;   307     struct mod_kallsyms {   Elf64_Sym *symtab;   unsigned int num_symtab;   char *strtab; } ;   321     struct klp_modinfo {   Elf64_Ehdr hdr;   Elf64_Shdr *sechdrs;   char *secstrings;   unsigned int symndx; } ;   329     struct module_sect_attrs ;   329     struct module_notes_attrs ;   329     struct trace_event_call ;   329     struct trace_enum_map ;   329     struct module {   enum module_state state;   struct list_head list;   char name[56U];   struct module_kobject mkobj;   struct module_attribute *modinfo_attrs;   const char *version;   const char *srcversion;   struct kobject *holders_dir;   const struct kernel_symbol *syms;   const unsigned long *crcs;   unsigned int num_syms;   struct mutex param_lock;   struct kernel_param *kp;   unsigned int num_kp;   unsigned int num_gpl_syms;   const struct kernel_symbol *gpl_syms;   const unsigned long *gpl_crcs;   const struct kernel_symbol *unused_syms;   const unsigned long *unused_crcs;   unsigned int num_unused_syms;   unsigned int num_unused_gpl_syms;   const struct kernel_symbol *unused_gpl_syms;   const unsigned long *unused_gpl_crcs;   bool sig_ok;   bool async_probe_requested;   const struct kernel_symbol *gpl_future_syms;   const unsigned long *gpl_future_crcs;   unsigned int num_gpl_future_syms;   unsigned int num_exentries;   struct exception_table_entry *extable;   int (*init)();   struct module_layout core_layout;   struct module_layout init_layout;   struct mod_arch_specific arch;   unsigned int taints;   unsigned int num_bugs;   struct list_head bug_list;   struct bug_entry *bug_table;   struct mod_kallsyms *kallsyms;   struct mod_kallsyms core_kallsyms;   struct module_sect_attrs *sect_attrs;   struct module_notes_attrs *notes_attrs;   char *args;   void *percpu;   unsigned int percpu_size;   unsigned int num_tracepoints;   const struct tracepoint **tracepoints_ptrs;   unsigned int num_trace_bprintk_fmt;   const char **trace_bprintk_fmt_start;   struct trace_event_call **trace_events;   unsigned int num_trace_events;   struct trace_enum_map **trace_enums;   unsigned int num_trace_enums;   bool klp;   bool klp_alive;   struct klp_modinfo *klp_info;   struct list_head source_list;   struct list_head target_list;   void (*exit)();   atomic_t refcnt;   ctor_fn_t  (**ctors)();   unsigned int num_ctors; } ;    22     struct kernel_cap_struct {   __u32 cap[2U]; } ;    25     typedef struct kernel_cap_struct kernel_cap_t;    84     struct plist_node {   int prio;   struct list_head prio_list;   struct list_head node_list; } ;     4     typedef unsigned long cputime_t;    26     struct sem_undo_list ;    26     struct sysv_sem {   struct sem_undo_list *undo_list; } ;    78     struct user_struct ;    26     struct sysv_shm {   struct list_head shm_clist; } ;    24     struct __anonstruct_sigset_t_245 {   unsigned long sig[1U]; } ;    24     typedef struct __anonstruct_sigset_t_245 sigset_t;    25     struct siginfo ;    17     typedef void __signalfn_t(int);    18     typedef __signalfn_t *__sighandler_t;    20     typedef void __restorefn_t();    21     typedef __restorefn_t *__sigrestore_t;    38     union sigval {   int sival_int;   void *sival_ptr; } ;    10     typedef union sigval sigval_t;    11     struct __anonstruct__kill_247 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid; } ;    11     struct __anonstruct__timer_248 {   __kernel_timer_t _tid;   int _overrun;   char _pad[0U];   sigval_t _sigval;   int _sys_private; } ;    11     struct __anonstruct__rt_249 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   sigval_t _sigval; } ;    11     struct __anonstruct__sigchld_250 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   int _status;   __kernel_clock_t _utime;   __kernel_clock_t _stime; } ;    11     struct __anonstruct__addr_bnd_253 {   void *_lower;   void *_upper; } ;    11     union __anonunion____missing_field_name_252 {   struct __anonstruct__addr_bnd_253 _addr_bnd;   __u32 _pkey; } ;    11     struct __anonstruct__sigfault_251 {   void *_addr;   short _addr_lsb;   union __anonunion____missing_field_name_252 __annonCompField52; } ;    11     struct __anonstruct__sigpoll_254 {   long _band;   int _fd; } ;    11     struct __anonstruct__sigsys_255 {   void *_call_addr;   int _syscall;   unsigned int _arch; } ;    11     union __anonunion__sifields_246 {   int _pad[28U];   struct __anonstruct__kill_247 _kill;   struct __anonstruct__timer_248 _timer;   struct __anonstruct__rt_249 _rt;   struct __anonstruct__sigchld_250 _sigchld;   struct __anonstruct__sigfault_251 _sigfault;   struct __anonstruct__sigpoll_254 _sigpoll;   struct __anonstruct__sigsys_255 _sigsys; } ;    11     struct siginfo {   int si_signo;   int si_errno;   int si_code;   union __anonunion__sifields_246 _sifields; } ;   118     typedef struct siginfo siginfo_t;    22     struct sigpending {   struct list_head list;   sigset_t signal; } ;   257     struct sigaction {   __sighandler_t sa_handler;   unsigned long sa_flags;   __sigrestore_t sa_restorer;   sigset_t sa_mask; } ;   271     struct k_sigaction {   struct sigaction sa; } ;   457     enum pid_type {   PIDTYPE_PID = 0,   PIDTYPE_PGID = 1,   PIDTYPE_SID = 2,   PIDTYPE_MAX = 3 } ;   464     struct pid_namespace ;   464     struct upid {   int nr;   struct pid_namespace *ns;   struct hlist_node pid_chain; } ;    56     struct pid {   atomic_t count;   unsigned int level;   struct hlist_head tasks[3U];   struct callback_head rcu;   struct upid numbers[1U]; } ;    68     struct pid_link {   struct hlist_node node;   struct pid *pid; } ;    43     struct seccomp_filter ;    44     struct seccomp {   int mode;   struct seccomp_filter *filter; } ;    40     struct rt_mutex_waiter ;    41     struct rlimit {   __kernel_ulong_t rlim_cur;   __kernel_ulong_t rlim_max; } ;    11     struct timerqueue_node {   struct rb_node node;   ktime_t expires; } ;    12     struct timerqueue_head {   struct rb_root head;   struct timerqueue_node *next; } ;    50     struct hrtimer_clock_base ;    51     struct hrtimer_cpu_base ;    60     enum hrtimer_restart {   HRTIMER_NORESTART = 0,   HRTIMER_RESTART = 1 } ;    65     struct hrtimer {   struct timerqueue_node node;   ktime_t _softexpires;   enum hrtimer_restart  (*function)(struct hrtimer *);   struct hrtimer_clock_base *base;   u8 state;   u8 is_rel;   int start_pid;   void *start_site;   char start_comm[16U]; } ;   125     struct hrtimer_clock_base {   struct hrtimer_cpu_base *cpu_base;   int index;   clockid_t clockid;   struct timerqueue_head active;   ktime_t  (*get_time)();   ktime_t offset; } ;   158     struct hrtimer_cpu_base {   raw_spinlock_t lock;   seqcount_t seq;   struct hrtimer *running;   unsigned int cpu;   unsigned int active_bases;   unsigned int clock_was_set_seq;   bool migration_enabled;   bool nohz_active;   unsigned char in_hrtirq;   unsigned char hres_active;   unsigned char hang_detected;   ktime_t expires_next;   struct hrtimer *next_timer;   unsigned int nr_events;   unsigned int nr_retries;   unsigned int nr_hangs;   unsigned int max_hang_time;   struct hrtimer_clock_base clock_base[4U]; } ;    12     enum kcov_mode {   KCOV_MODE_DISABLED = 0,   KCOV_MODE_TRACE = 1 } ;    17     struct task_io_accounting {   u64 rchar;   u64 wchar;   u64 syscr;   u64 syscw;   u64 read_bytes;   u64 write_bytes;   u64 cancelled_write_bytes; } ;    45     struct latency_record {   unsigned long backtrace[12U];   unsigned int count;   unsigned long time;   unsigned long max; } ;    41     struct assoc_array_ptr ;    41     struct assoc_array {   struct assoc_array_ptr *root;   unsigned long nr_leaves_on_tree; } ;    31     typedef int32_t key_serial_t;    34     typedef uint32_t key_perm_t;    35     struct key ;    36     struct signal_struct ;    37     struct key_type ;    41     struct keyring_index_key {   struct key_type *type;   const char *description;   size_t desc_len; } ;    91     union key_payload {   void *rcu_data0;   void *data[4U]; } ;   128     union __anonunion____missing_field_name_290 {   struct list_head graveyard_link;   struct rb_node serial_node; } ;   128     struct key_user ;   128     union __anonunion____missing_field_name_291 {   time_t expiry;   time_t revoked_at; } ;   128     struct __anonstruct____missing_field_name_293 {   struct key_type *type;   char *description; } ;   128     union __anonunion____missing_field_name_292 {   struct keyring_index_key index_key;   struct __anonstruct____missing_field_name_293 __annonCompField55; } ;   128     struct __anonstruct____missing_field_name_295 {   struct list_head name_link;   struct assoc_array keys; } ;   128     union __anonunion____missing_field_name_294 {   union key_payload payload;   struct __anonstruct____missing_field_name_295 __annonCompField57;   int reject_error; } ;   128     struct key {   atomic_t usage;   key_serial_t serial;   union __anonunion____missing_field_name_290 __annonCompField53;   struct rw_semaphore sem;   struct key_user *user;   void *security;   union __anonunion____missing_field_name_291 __annonCompField54;   time_t last_used_at;   kuid_t uid;   kgid_t gid;   key_perm_t perm;   unsigned short quotalen;   unsigned short datalen;   unsigned long flags;   union __anonunion____missing_field_name_292 __annonCompField56;   union __anonunion____missing_field_name_294 __annonCompField58;   int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ;   377     struct audit_context ;    27     struct group_info {   atomic_t usage;   int ngroups;   kgid_t gid[0U]; } ;    85     struct cred {   atomic_t usage;   atomic_t subscribers;   void *put_addr;   unsigned int magic;   kuid_t uid;   kgid_t gid;   kuid_t suid;   kgid_t sgid;   kuid_t euid;   kgid_t egid;   kuid_t fsuid;   kgid_t fsgid;   unsigned int securebits;   kernel_cap_t cap_inheritable;   kernel_cap_t cap_permitted;   kernel_cap_t cap_effective;   kernel_cap_t cap_bset;   kernel_cap_t cap_ambient;   unsigned char jit_keyring;   struct key *session_keyring;   struct key *process_keyring;   struct key *thread_keyring;   struct key *request_key_auth;   void *security;   struct user_struct *user;   struct user_namespace *user_ns;   struct group_info *group_info;   struct callback_head rcu; } ;   368     struct percpu_ref ;    55     typedef void percpu_ref_func_t(struct percpu_ref *);    68     struct percpu_ref {   atomic_long_t count;   unsigned long percpu_count_ptr;   percpu_ref_func_t *release;   percpu_ref_func_t *confirm_switch;   bool force_atomic;   struct callback_head rcu; } ;   325     enum rcu_sync_type {   RCU_SYNC = 0,   RCU_SCHED_SYNC = 1,   RCU_BH_SYNC = 2 } ;   331     struct rcu_sync {   int gp_state;   int gp_count;   wait_queue_head_t gp_wait;   int cb_state;   struct callback_head cb_head;   enum rcu_sync_type gp_type; } ;    66     struct percpu_rw_semaphore {   struct rcu_sync rss;   unsigned int *read_count;   struct rw_semaphore rw_sem;   wait_queue_head_t writer;   int readers_block; } ;   144     struct cgroup ;   145     struct cgroup_root ;   146     struct cgroup_subsys ;   147     struct cgroup_taskset ;   191     struct cgroup_file {   struct kernfs_node *kn; } ;    90     struct cgroup_subsys_state {   struct cgroup *cgroup;   struct cgroup_subsys *ss;   struct percpu_ref refcnt;   struct cgroup_subsys_state *parent;   struct list_head sibling;   struct list_head children;   int id;   unsigned int flags;   u64 serial_nr;   atomic_t online_cnt;   struct callback_head callback_head;   struct work_struct destroy_work; } ;   141     struct css_set {   atomic_t refcount;   struct hlist_node hlist;   struct list_head tasks;   struct list_head mg_tasks;   struct list_head cgrp_links;   struct cgroup *dfl_cgrp;   struct cgroup_subsys_state *subsys[13U];   struct list_head mg_preload_node;   struct list_head mg_node;   struct cgroup *mg_src_cgrp;   struct cgroup *mg_dst_cgrp;   struct css_set *mg_dst_cset;   struct list_head e_cset_node[13U];   struct list_head task_iters;   bool dead;   struct callback_head callback_head; } ;   221     struct cgroup {   struct cgroup_subsys_state self;   unsigned long flags;   int id;   int level;   int populated_cnt;   struct kernfs_node *kn;   struct cgroup_file procs_file;   struct cgroup_file events_file;   u16 subtree_control;   u16 subtree_ss_mask;   u16 old_subtree_control;   u16 old_subtree_ss_mask;   struct cgroup_subsys_state *subsys[13U];   struct cgroup_root *root;   struct list_head cset_links;   struct list_head e_csets[13U];   struct list_head pidlists;   struct mutex pidlist_mutex;   wait_queue_head_t offline_waitq;   struct work_struct release_agent_work;   int ancestor_ids[]; } ;   306     struct cgroup_root {   struct kernfs_root *kf_root;   unsigned int subsys_mask;   int hierarchy_id;   struct cgroup cgrp;   int cgrp_ancestor_id_storage;   atomic_t nr_cgrps;   struct list_head root_list;   unsigned int flags;   struct idr cgroup_idr;   char release_agent_path[4096U];   char name[64U]; } ;   345     struct cftype {   char name[64U];   unsigned long private;   size_t max_write_len;   unsigned int flags;   unsigned int file_offset;   struct cgroup_subsys *ss;   struct list_head node;   struct kernfs_ops *kf_ops;   u64  (*read_u64)(struct cgroup_subsys_state *, struct cftype *);   s64  (*read_s64)(struct cgroup_subsys_state *, struct cftype *);   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 );   int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 );   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   struct lock_class_key lockdep_key; } ;   430     struct cgroup_subsys {   struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *);   int (*css_online)(struct cgroup_subsys_state *);   void (*css_offline)(struct cgroup_subsys_state *);   void (*css_released)(struct cgroup_subsys_state *);   void (*css_free)(struct cgroup_subsys_state *);   void (*css_reset)(struct cgroup_subsys_state *);   int (*can_attach)(struct cgroup_taskset *);   void (*cancel_attach)(struct cgroup_taskset *);   void (*attach)(struct cgroup_taskset *);   void (*post_attach)();   int (*can_fork)(struct task_struct *);   void (*cancel_fork)(struct task_struct *);   void (*fork)(struct task_struct *);   void (*exit)(struct task_struct *);   void (*free)(struct task_struct *);   void (*bind)(struct cgroup_subsys_state *);   bool early_init;   bool implicit_on_dfl;   bool broken_hierarchy;   bool warned_broken_hierarchy;   int id;   const char *name;   const char *legacy_name;   struct cgroup_root *root;   struct idr css_idr;   struct list_head cfts;   struct cftype *dfl_cftypes;   struct cftype *legacy_cftypes;   unsigned int depends_on; } ;   128     struct futex_pi_state ;   129     struct robust_list_head ;   130     struct bio_list ;   131     struct fs_struct ;   132     struct perf_event_context ;   133     struct blk_plug ;   135     struct nameidata ;   188     struct cfs_rq ;   189     struct task_group ;   495     struct sighand_struct {   atomic_t count;   struct k_sigaction action[64U];   spinlock_t siglock;   wait_queue_head_t signalfd_wqh; } ;   539     struct pacct_struct {   int ac_flag;   long ac_exitcode;   unsigned long ac_mem;   cputime_t ac_utime;   cputime_t ac_stime;   unsigned long ac_minflt;   unsigned long ac_majflt; } ;   547     struct cpu_itimer {   cputime_t expires;   cputime_t incr;   u32 error;   u32 incr_error; } ;   554     struct prev_cputime {   cputime_t utime;   cputime_t stime;   raw_spinlock_t lock; } ;   579     struct task_cputime {   cputime_t utime;   cputime_t stime;   unsigned long long sum_exec_runtime; } ;   595     struct task_cputime_atomic {   atomic64_t utime;   atomic64_t stime;   atomic64_t sum_exec_runtime; } ;   617     struct thread_group_cputimer {   struct task_cputime_atomic cputime_atomic;   bool running;   bool checking_timer; } ;   662     struct autogroup ;   663     struct tty_struct ;   663     struct taskstats ;   663     struct tty_audit_buf ;   663     struct signal_struct {   atomic_t sigcnt;   atomic_t live;   int nr_threads;   struct list_head thread_head;   wait_queue_head_t wait_chldexit;   struct task_struct *curr_target;   struct sigpending shared_pending;   int group_exit_code;   int notify_count;   struct task_struct *group_exit_task;   int group_stop_count;   unsigned int flags;   unsigned char is_child_subreaper;   unsigned char has_child_subreaper;   int posix_timer_id;   struct list_head posix_timers;   struct hrtimer real_timer;   struct pid *leader_pid;   ktime_t it_real_incr;   struct cpu_itimer it[2U];   struct thread_group_cputimer cputimer;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   struct pid *tty_old_pgrp;   int leader;   struct tty_struct *tty;   struct autogroup *autogroup;   seqlock_t stats_lock;   cputime_t utime;   cputime_t stime;   cputime_t cutime;   cputime_t cstime;   cputime_t gtime;   cputime_t cgtime;   struct prev_cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   unsigned long cnvcsw;   unsigned long cnivcsw;   unsigned long min_flt;   unsigned long maj_flt;   unsigned long cmin_flt;   unsigned long cmaj_flt;   unsigned long inblock;   unsigned long oublock;   unsigned long cinblock;   unsigned long coublock;   unsigned long maxrss;   unsigned long cmaxrss;   struct task_io_accounting ioac;   unsigned long long sum_sched_runtime;   struct rlimit rlim[16U];   struct pacct_struct pacct;   struct taskstats *stats;   unsigned int audit_tty;   struct tty_audit_buf *tty_audit_buf;   bool oom_flag_origin;   short oom_score_adj;   short oom_score_adj_min;   struct mm_struct *oom_mm;   struct mutex cred_guard_mutex; } ;   839     struct user_struct {   atomic_t __count;   atomic_t processes;   atomic_t sigpending;   atomic_t inotify_watches;   atomic_t inotify_devs;   atomic_t fanotify_listeners;   atomic_long_t epoll_watches;   unsigned long mq_bytes;   unsigned long locked_shm;   unsigned long unix_inflight;   atomic_long_t pipe_bufs;   struct key *uid_keyring;   struct key *session_keyring;   struct hlist_node uidhash_node;   kuid_t uid;   atomic_long_t locked_vm; } ;   884     struct backing_dev_info ;   885     struct reclaim_state ;   886     struct sched_info {   unsigned long pcount;   unsigned long long run_delay;   unsigned long long last_arrival;   unsigned long long last_queued; } ;   900     struct task_delay_info {   spinlock_t lock;   unsigned int flags;   u64 blkio_start;   u64 blkio_delay;   u64 swapin_delay;   u32 blkio_count;   u32 swapin_count;   u64 freepages_start;   u64 freepages_delay;   u32 freepages_count; } ;   957     struct wake_q_node {   struct wake_q_node *next; } ;  1200     struct io_context ;  1234     struct pipe_inode_info ;  1235     struct uts_namespace ;  1236     struct load_weight {   unsigned long weight;   u32 inv_weight; } ;  1243     struct sched_avg {   u64 last_update_time;   u64 load_sum;   u32 util_sum;   u32 period_contrib;   unsigned long load_avg;   unsigned long util_avg; } ;  1301     struct sched_statistics {   u64 wait_start;   u64 wait_max;   u64 wait_count;   u64 wait_sum;   u64 iowait_count;   u64 iowait_sum;   u64 sleep_start;   u64 sleep_max;   s64 sum_sleep_runtime;   u64 block_start;   u64 block_max;   u64 exec_max;   u64 slice_max;   u64 nr_migrations_cold;   u64 nr_failed_migrations_affine;   u64 nr_failed_migrations_running;   u64 nr_failed_migrations_hot;   u64 nr_forced_migrations;   u64 nr_wakeups;   u64 nr_wakeups_sync;   u64 nr_wakeups_migrate;   u64 nr_wakeups_local;   u64 nr_wakeups_remote;   u64 nr_wakeups_affine;   u64 nr_wakeups_affine_attempts;   u64 nr_wakeups_passive;   u64 nr_wakeups_idle; } ;  1336     struct sched_entity {   struct load_weight load;   struct rb_node run_node;   struct list_head group_node;   unsigned int on_rq;   u64 exec_start;   u64 sum_exec_runtime;   u64 vruntime;   u64 prev_sum_exec_runtime;   u64 nr_migrations;   struct sched_statistics statistics;   int depth;   struct sched_entity *parent;   struct cfs_rq *cfs_rq;   struct cfs_rq *my_q;   struct sched_avg avg; } ;  1373     struct rt_rq ;  1373     struct sched_rt_entity {   struct list_head run_list;   unsigned long timeout;   unsigned long watchdog_stamp;   unsigned int time_slice;   unsigned short on_rq;   unsigned short on_list;   struct sched_rt_entity *back;   struct sched_rt_entity *parent;   struct rt_rq *rt_rq;   struct rt_rq *my_q; } ;  1391     struct sched_dl_entity {   struct rb_node rb_node;   u64 dl_runtime;   u64 dl_deadline;   u64 dl_period;   u64 dl_bw;   s64 runtime;   u64 deadline;   unsigned int flags;   int dl_throttled;   int dl_boosted;   int dl_yielded;   struct hrtimer dl_timer; } ;  1455     struct tlbflush_unmap_batch {   struct cpumask cpumask;   bool flush_required;   bool writable; } ;  1474     struct sched_class ;  1474     struct files_struct ;  1474     struct compat_robust_list_head ;  1474     struct numa_group ;  1474     struct kcov ;  1474     struct task_struct {   struct thread_info thread_info;   volatile long state;   void *stack;   atomic_t usage;   unsigned int flags;   unsigned int ptrace;   struct llist_node wake_entry;   int on_cpu;   unsigned int cpu;   unsigned int wakee_flips;   unsigned long wakee_flip_decay_ts;   struct task_struct *last_wakee;   int wake_cpu;   int on_rq;   int prio;   int static_prio;   int normal_prio;   unsigned int rt_priority;   const struct sched_class *sched_class;   struct sched_entity se;   struct sched_rt_entity rt;   struct task_group *sched_task_group;   struct sched_dl_entity dl;   struct hlist_head preempt_notifiers;   unsigned int policy;   int nr_cpus_allowed;   cpumask_t cpus_allowed;   unsigned long rcu_tasks_nvcsw;   bool rcu_tasks_holdout;   struct list_head rcu_tasks_holdout_list;   int rcu_tasks_idle_cpu;   struct sched_info sched_info;   struct list_head tasks;   struct plist_node pushable_tasks;   struct rb_node pushable_dl_tasks;   struct mm_struct *mm;   struct mm_struct *active_mm;   u32 vmacache_seqnum;   struct vm_area_struct *vmacache[4U];   struct task_rss_stat rss_stat;   int exit_state;   int exit_code;   int exit_signal;   int pdeath_signal;   unsigned long jobctl;   unsigned int personality;   unsigned char sched_reset_on_fork;   unsigned char sched_contributes_to_load;   unsigned char sched_migrated;   unsigned char sched_remote_wakeup;   unsigned char;   unsigned char in_execve;   unsigned char in_iowait;   unsigned char restore_sigmask;   unsigned char memcg_may_oom;   unsigned char memcg_kmem_skip_account;   unsigned char brk_randomized;   unsigned long atomic_flags;   struct restart_block restart_block;   pid_t pid;   pid_t tgid;   struct task_struct *real_parent;   struct task_struct *parent;   struct list_head children;   struct list_head sibling;   struct task_struct *group_leader;   struct list_head ptraced;   struct list_head ptrace_entry;   struct pid_link pids[3U];   struct list_head thread_group;   struct list_head thread_node;   struct completion *vfork_done;   int *set_child_tid;   int *clear_child_tid;   cputime_t utime;   cputime_t stime;   cputime_t utimescaled;   cputime_t stimescaled;   cputime_t gtime;   struct prev_cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   u64 start_time;   u64 real_start_time;   unsigned long min_flt;   unsigned long maj_flt;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   const struct cred *real_cred;   const struct cred *cred;   char comm[16U];   struct nameidata *nameidata;   struct sysv_sem sysvsem;   struct sysv_shm sysvshm;   unsigned long last_switch_count;   struct fs_struct *fs;   struct files_struct *files;   struct nsproxy *nsproxy;   struct signal_struct *signal;   struct sighand_struct *sighand;   sigset_t blocked;   sigset_t real_blocked;   sigset_t saved_sigmask;   struct sigpending pending;   unsigned long sas_ss_sp;   size_t sas_ss_size;   unsigned int sas_ss_flags;   struct callback_head *task_works;   struct audit_context *audit_context;   kuid_t loginuid;   unsigned int sessionid;   struct seccomp seccomp;   u32 parent_exec_id;   u32 self_exec_id;   spinlock_t alloc_lock;   raw_spinlock_t pi_lock;   struct wake_q_node wake_q;   struct rb_root pi_waiters;   struct rb_node *pi_waiters_leftmost;   struct rt_mutex_waiter *pi_blocked_on;   struct mutex_waiter *blocked_on;   unsigned int irq_events;   unsigned long hardirq_enable_ip;   unsigned long hardirq_disable_ip;   unsigned int hardirq_enable_event;   unsigned int hardirq_disable_event;   int hardirqs_enabled;   int hardirq_context;   unsigned long softirq_disable_ip;   unsigned long softirq_enable_ip;   unsigned int softirq_disable_event;   unsigned int softirq_enable_event;   int softirqs_enabled;   int softirq_context;   u64 curr_chain_key;   int lockdep_depth;   unsigned int lockdep_recursion;   struct held_lock held_locks[48U];   gfp_t lockdep_reclaim_gfp;   unsigned int in_ubsan;   void *journal_info;   struct bio_list *bio_list;   struct blk_plug *plug;   struct reclaim_state *reclaim_state;   struct backing_dev_info *backing_dev_info;   struct io_context *io_context;   unsigned long ptrace_message;   siginfo_t *last_siginfo;   struct task_io_accounting ioac;   u64 acct_rss_mem1;   u64 acct_vm_mem1;   cputime_t acct_timexpd;   nodemask_t mems_allowed;   seqcount_t mems_allowed_seq;   int cpuset_mem_spread_rotor;   int cpuset_slab_spread_rotor;   struct css_set *cgroups;   struct list_head cg_list;   struct robust_list_head *robust_list;   struct compat_robust_list_head *compat_robust_list;   struct list_head pi_state_list;   struct futex_pi_state *pi_state_cache;   struct perf_event_context *perf_event_ctxp[2U];   struct mutex perf_event_mutex;   struct list_head perf_event_list;   struct mempolicy *mempolicy;   short il_next;   short pref_node_fork;   int numa_scan_seq;   unsigned int numa_scan_period;   unsigned int numa_scan_period_max;   int numa_preferred_nid;   unsigned long numa_migrate_retry;   u64 node_stamp;   u64 last_task_numa_placement;   u64 last_sum_exec_runtime;   struct callback_head numa_work;   struct list_head numa_entry;   struct numa_group *numa_group;   unsigned long *numa_faults;   unsigned long total_numa_faults;   unsigned long numa_faults_locality[3U];   unsigned long numa_pages_migrated;   struct tlbflush_unmap_batch tlb_ubc;   struct callback_head rcu;   struct pipe_inode_info *splice_pipe;   struct page_frag task_frag;   struct task_delay_info *delays;   int make_it_fail;   int nr_dirtied;   int nr_dirtied_pause;   unsigned long dirty_paused_when;   int latency_record_count;   struct latency_record latency_record[32U];   u64 timer_slack_ns;   u64 default_timer_slack_ns;   unsigned int kasan_depth;   unsigned long trace;   unsigned long trace_recursion;   enum kcov_mode kcov_mode;   unsigned int kcov_size;   void *kcov_area;   struct kcov *kcov;   struct mem_cgroup *memcg_in_oom;   gfp_t memcg_oom_gfp_mask;   int memcg_oom_order;   unsigned int memcg_nr_pages_over_high;   struct uprobe_task *utask;   unsigned int sequential_io;   unsigned int sequential_io_avg;   unsigned long task_state_change;   int pagefault_disabled;   struct task_struct *oom_reaper_list;   atomic_t stack_refcount;   struct thread_struct thread; } ;    13     typedef unsigned long kernel_ulong_t;   186     struct acpi_device_id {   __u8 id[9U];   kernel_ulong_t driver_data;   __u32 cls;   __u32 cls_msk; } ;   229     struct of_device_id {   char name[32U];   char type[32U];   char compatible[128U];   const void *data; } ;   484     struct platform_device_id {   char name[20U];   kernel_ulong_t driver_data; } ;   674     enum fwnode_type {   FWNODE_INVALID = 0,   FWNODE_OF = 1,   FWNODE_ACPI = 2,   FWNODE_ACPI_DATA = 3,   FWNODE_PDATA = 4,   FWNODE_IRQCHIP = 5 } ;   683     struct fwnode_handle {   enum fwnode_type type;   struct fwnode_handle *secondary; } ;    32     typedef u32 phandle;    34     struct property {   char *name;   int length;   void *value;   struct property *next;   unsigned long _flags;   unsigned int unique_id;   struct bin_attribute attr; } ;    44     struct device_node {   const char *name;   const char *type;   phandle phandle;   const char *full_name;   struct fwnode_handle fwnode;   struct property *properties;   struct property *deadprops;   struct device_node *parent;   struct device_node *child;   struct device_node *sibling;   struct kobject kobj;   unsigned long _flags;   void *data; } ;  1275     struct klist_node ;    37     struct klist_node {   void *n_klist;   struct list_head n_node;   struct kref n_ref; } ;    93     struct hlist_bl_node ;    93     struct hlist_bl_head {   struct hlist_bl_node *first; } ;    36     struct hlist_bl_node {   struct hlist_bl_node *next;   struct hlist_bl_node **pprev; } ;   114     struct __anonstruct____missing_field_name_343 {   spinlock_t lock;   int count; } ;   114     union __anonunion____missing_field_name_342 {   struct __anonstruct____missing_field_name_343 __annonCompField65; } ;   114     struct lockref {   union __anonunion____missing_field_name_342 __annonCompField66; } ;    77     struct path ;    78     struct vfsmount ;    79     struct __anonstruct____missing_field_name_345 {   u32 hash;   u32 len; } ;    79     union __anonunion____missing_field_name_344 {   struct __anonstruct____missing_field_name_345 __annonCompField67;   u64 hash_len; } ;    79     struct qstr {   union __anonunion____missing_field_name_344 __annonCompField68;   const unsigned char *name; } ;    65     struct dentry_operations ;    65     union __anonunion____missing_field_name_346 {   struct list_head d_lru;   wait_queue_head_t *d_wait; } ;    65     union __anonunion_d_u_347 {   struct hlist_node d_alias;   struct hlist_bl_node d_in_lookup_hash;   struct callback_head d_rcu; } ;    65     struct dentry {   unsigned int d_flags;   seqcount_t d_seq;   struct hlist_bl_node d_hash;   struct dentry *d_parent;   struct qstr d_name;   struct inode *d_inode;   unsigned char d_iname[32U];   struct lockref d_lockref;   const struct dentry_operations *d_op;   struct super_block *d_sb;   unsigned long d_time;   void *d_fsdata;   union __anonunion____missing_field_name_346 __annonCompField69;   struct list_head d_child;   struct list_head d_subdirs;   union __anonunion_d_u_347 d_u; } ;   121     struct dentry_operations {   int (*d_revalidate)(struct dentry *, unsigned int);   int (*d_weak_revalidate)(struct dentry *, unsigned int);   int (*d_hash)(const struct dentry *, struct qstr *);   int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *);   int (*d_delete)(const struct dentry *);   int (*d_init)(struct dentry *);   void (*d_release)(struct dentry *);   void (*d_prune)(struct dentry *);   void (*d_iput)(struct dentry *, struct inode *);   char * (*d_dname)(struct dentry *, char *, int);   struct vfsmount * (*d_automount)(struct path *);   int (*d_manage)(struct dentry *, bool );   struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ;   592     struct path {   struct vfsmount *mnt;   struct dentry *dentry; } ;    19     struct shrink_control {   gfp_t gfp_mask;   unsigned long nr_to_scan;   int nid;   struct mem_cgroup *memcg; } ;    27     struct shrinker {   unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *);   unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *);   int seeks;   long batch;   unsigned long flags;   struct list_head list;   atomic_long_t *nr_deferred; } ;    80     struct list_lru_one {   struct list_head list;   long nr_items; } ;    32     struct list_lru_memcg {   struct list_lru_one *lru[0U]; } ;    37     struct list_lru_node {   spinlock_t lock;   struct list_lru_one lru;   struct list_lru_memcg *memcg_lrus; } ;    47     struct list_lru {   struct list_lru_node *node;   struct list_head list; } ;    63     struct __anonstruct____missing_field_name_349 {   struct radix_tree_node *parent;   void *private_data; } ;    63     union __anonunion____missing_field_name_348 {   struct __anonstruct____missing_field_name_349 __annonCompField70;   struct callback_head callback_head; } ;    63     struct radix_tree_node {   unsigned char shift;   unsigned char offset;   unsigned int count;   union __anonunion____missing_field_name_348 __annonCompField71;   struct list_head private_list;   void *slots[64U];   unsigned long tags[3U][1U]; } ;   106     struct radix_tree_root {   gfp_t gfp_mask;   struct radix_tree_node *rnode; } ;    45     struct fiemap_extent {   __u64 fe_logical;   __u64 fe_physical;   __u64 fe_length;   __u64 fe_reserved64[2U];   __u32 fe_flags;   __u32 fe_reserved[3U]; } ;    38     enum migrate_mode {   MIGRATE_ASYNC = 0,   MIGRATE_SYNC_LIGHT = 1,   MIGRATE_SYNC = 2 } ;    44     struct bio_vec {   struct page *bv_page;   unsigned int bv_len;   unsigned int bv_offset; } ;    87     struct block_device ;   273     struct delayed_call {   void (*fn)(void *);   void *arg; } ;   264     struct bdi_writeback ;   265     struct export_operations ;   267     struct iovec ;   268     struct kiocb ;   269     struct poll_table_struct ;   270     struct kstatfs ;   271     struct swap_info_struct ;   272     struct iov_iter ;   273     struct fscrypt_info ;   274     struct fscrypt_operations ;    76     struct iattr {   unsigned int ia_valid;   umode_t ia_mode;   kuid_t ia_uid;   kgid_t ia_gid;   loff_t ia_size;   struct timespec ia_atime;   struct timespec ia_mtime;   struct timespec ia_ctime;   struct file *ia_file; } ;   262     struct percpu_counter {   raw_spinlock_t lock;   s64 count;   struct list_head list;   s32 *counters; } ;   213     struct dquot ;   214     struct kqid ;    19     typedef __kernel_uid32_t projid_t;    23     struct __anonstruct_kprojid_t_357 {   projid_t val; } ;    23     typedef struct __anonstruct_kprojid_t_357 kprojid_t;   181     enum quota_type {   USRQUOTA = 0,   GRPQUOTA = 1,   PRJQUOTA = 2 } ;    66     typedef long long qsize_t;    67     union __anonunion____missing_field_name_358 {   kuid_t uid;   kgid_t gid;   kprojid_t projid; } ;    67     struct kqid {   union __anonunion____missing_field_name_358 __annonCompField73;   enum quota_type type; } ;   194     struct mem_dqblk {   qsize_t dqb_bhardlimit;   qsize_t dqb_bsoftlimit;   qsize_t dqb_curspace;   qsize_t dqb_rsvspace;   qsize_t dqb_ihardlimit;   qsize_t dqb_isoftlimit;   qsize_t dqb_curinodes;   time64_t dqb_btime;   time64_t dqb_itime; } ;   216     struct quota_format_type ;   217     struct mem_dqinfo {   struct quota_format_type *dqi_format;   int dqi_fmt_id;   struct list_head dqi_dirty_list;   unsigned long dqi_flags;   unsigned int dqi_bgrace;   unsigned int dqi_igrace;   qsize_t dqi_max_spc_limit;   qsize_t dqi_max_ino_limit;   void *dqi_priv; } ;   282     struct dquot {   struct hlist_node dq_hash;   struct list_head dq_inuse;   struct list_head dq_free;   struct list_head dq_dirty;   struct mutex dq_lock;   atomic_t dq_count;   wait_queue_head_t dq_wait_unused;   struct super_block *dq_sb;   struct kqid dq_id;   loff_t dq_off;   unsigned long dq_flags;   struct mem_dqblk dq_dqb; } ;   309     struct quota_format_ops {   int (*check_quota_file)(struct super_block *, int);   int (*read_file_info)(struct super_block *, int);   int (*write_file_info)(struct super_block *, int);   int (*free_file_info)(struct super_block *, int);   int (*read_dqblk)(struct dquot *);   int (*commit_dqblk)(struct dquot *);   int (*release_dqblk)(struct dquot *);   int (*get_next_id)(struct super_block *, struct kqid *); } ;   321     struct dquot_operations {   int (*write_dquot)(struct dquot *);   struct dquot * (*alloc_dquot)(struct super_block *, int);   void (*destroy_dquot)(struct dquot *);   int (*acquire_dquot)(struct dquot *);   int (*release_dquot)(struct dquot *);   int (*mark_dirty)(struct dquot *);   int (*write_info)(struct super_block *, int);   qsize_t * (*get_reserved_space)(struct inode *);   int (*get_projid)(struct inode *, kprojid_t *);   int (*get_next_id)(struct super_block *, struct kqid *); } ;   338     struct qc_dqblk {   int d_fieldmask;   u64 d_spc_hardlimit;   u64 d_spc_softlimit;   u64 d_ino_hardlimit;   u64 d_ino_softlimit;   u64 d_space;   u64 d_ino_count;   s64 d_ino_timer;   s64 d_spc_timer;   int d_ino_warns;   int d_spc_warns;   u64 d_rt_spc_hardlimit;   u64 d_rt_spc_softlimit;   u64 d_rt_space;   s64 d_rt_spc_timer;   int d_rt_spc_warns; } ;   361     struct qc_type_state {   unsigned int flags;   unsigned int spc_timelimit;   unsigned int ino_timelimit;   unsigned int rt_spc_timelimit;   unsigned int spc_warnlimit;   unsigned int ino_warnlimit;   unsigned int rt_spc_warnlimit;   unsigned long long ino;   blkcnt_t blocks;   blkcnt_t nextents; } ;   407     struct qc_state {   unsigned int s_incoredqs;   struct qc_type_state s_state[3U]; } ;   418     struct qc_info {   int i_fieldmask;   unsigned int i_flags;   unsigned int i_spc_timelimit;   unsigned int i_ino_timelimit;   unsigned int i_rt_spc_timelimit;   unsigned int i_spc_warnlimit;   unsigned int i_ino_warnlimit;   unsigned int i_rt_spc_warnlimit; } ;   431     struct quotactl_ops {   int (*quota_on)(struct super_block *, int, int, struct path *);   int (*quota_off)(struct super_block *, int);   int (*quota_enable)(struct super_block *, unsigned int);   int (*quota_disable)(struct super_block *, unsigned int);   int (*quota_sync)(struct super_block *, int);   int (*set_info)(struct super_block *, int, struct qc_info *);   int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *);   int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*get_state)(struct super_block *, struct qc_state *);   int (*rm_xquota)(struct super_block *, unsigned int); } ;   447     struct quota_format_type {   int qf_fmt_id;   const struct quota_format_ops *qf_ops;   struct module *qf_owner;   struct quota_format_type *qf_next; } ;   511     struct quota_info {   unsigned int flags;   struct mutex dqio_mutex;   struct mutex dqonoff_mutex;   struct inode *files[3U];   struct mem_dqinfo info[3U];   const struct quota_format_ops *ops[3U]; } ;   541     struct writeback_control ;   542     struct kiocb {   struct file *ki_filp;   loff_t ki_pos;   void (*ki_complete)(struct kiocb *, long, long);   void *private;   int ki_flags; } ;   368     struct address_space_operations {   int (*writepage)(struct page *, struct writeback_control *);   int (*readpage)(struct file *, struct page *);   int (*writepages)(struct address_space *, struct writeback_control *);   int (*set_page_dirty)(struct page *);   int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int);   int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **);   int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *);   sector_t  (*bmap)(struct address_space *, sector_t );   void (*invalidatepage)(struct page *, unsigned int, unsigned int);   int (*releasepage)(struct page *, gfp_t );   void (*freepage)(struct page *);   ssize_t  (*direct_IO)(struct kiocb *, struct iov_iter *);   int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode );   bool  (*isolate_page)(struct page *, isolate_mode_t );   void (*putback_page)(struct page *);   int (*launder_page)(struct page *);   int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);   void (*is_dirty_writeback)(struct page *, bool *, bool *);   int (*error_remove_page)(struct address_space *, struct page *);   int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *);   void (*swap_deactivate)(struct file *); } ;   427     struct address_space {   struct inode *host;   struct radix_tree_root page_tree;   spinlock_t tree_lock;   atomic_t i_mmap_writable;   struct rb_root i_mmap;   struct rw_semaphore i_mmap_rwsem;   unsigned long nrpages;   unsigned long nrexceptional;   unsigned long writeback_index;   const struct address_space_operations *a_ops;   unsigned long flags;   spinlock_t private_lock;   gfp_t gfp_mask;   struct list_head private_list;   void *private_data; } ;   449     struct request_queue ;   450     struct hd_struct ;   450     struct gendisk ;   450     struct block_device {   dev_t bd_dev;   int bd_openers;   struct inode *bd_inode;   struct super_block *bd_super;   struct mutex bd_mutex;   void *bd_claiming;   void *bd_holder;   int bd_holders;   bool bd_write_holder;   struct list_head bd_holder_disks;   struct block_device *bd_contains;   unsigned int bd_block_size;   struct hd_struct *bd_part;   unsigned int bd_part_count;   int bd_invalidated;   struct gendisk *bd_disk;   struct request_queue *bd_queue;   struct list_head bd_list;   unsigned long bd_private;   int bd_fsfreeze_count;   struct mutex bd_fsfreeze_mutex; } ;   565     struct posix_acl ;   592     struct inode_operations ;   592     union __anonunion____missing_field_name_363 {   const unsigned int i_nlink;   unsigned int __i_nlink; } ;   592     union __anonunion____missing_field_name_364 {   struct hlist_head i_dentry;   struct callback_head i_rcu; } ;   592     struct file_lock_context ;   592     struct cdev ;   592     union __anonunion____missing_field_name_365 {   struct pipe_inode_info *i_pipe;   struct block_device *i_bdev;   struct cdev *i_cdev;   char *i_link;   unsigned int i_dir_seq; } ;   592     struct inode {   umode_t i_mode;   unsigned short i_opflags;   kuid_t i_uid;   kgid_t i_gid;   unsigned int i_flags;   struct posix_acl *i_acl;   struct posix_acl *i_default_acl;   const struct inode_operations *i_op;   struct super_block *i_sb;   struct address_space *i_mapping;   void *i_security;   unsigned long i_ino;   union __anonunion____missing_field_name_363 __annonCompField74;   dev_t i_rdev;   loff_t i_size;   struct timespec i_atime;   struct timespec i_mtime;   struct timespec i_ctime;   spinlock_t i_lock;   unsigned short i_bytes;   unsigned int i_blkbits;   blkcnt_t i_blocks;   unsigned long i_state;   struct rw_semaphore i_rwsem;   unsigned long dirtied_when;   unsigned long dirtied_time_when;   struct hlist_node i_hash;   struct list_head i_io_list;   struct bdi_writeback *i_wb;   int i_wb_frn_winner;   u16 i_wb_frn_avg_time;   u16 i_wb_frn_history;   struct list_head i_lru;   struct list_head i_sb_list;   struct list_head i_wb_list;   union __anonunion____missing_field_name_364 __annonCompField75;   u64 i_version;   atomic_t i_count;   atomic_t i_dio_count;   atomic_t i_writecount;   atomic_t i_readcount;   const struct file_operations *i_fop;   struct file_lock_context *i_flctx;   struct address_space i_data;   struct list_head i_devices;   union __anonunion____missing_field_name_365 __annonCompField76;   __u32 i_generation;   __u32 i_fsnotify_mask;   struct hlist_head i_fsnotify_marks;   struct fscrypt_info *i_crypt_info;   void *i_private; } ;   847     struct fown_struct {   rwlock_t lock;   struct pid *pid;   enum pid_type pid_type;   kuid_t uid;   kuid_t euid;   int signum; } ;   855     struct file_ra_state {   unsigned long start;   unsigned int size;   unsigned int async_size;   unsigned int ra_pages;   unsigned int mmap_miss;   loff_t prev_pos; } ;   878     union __anonunion_f_u_366 {   struct llist_node fu_llist;   struct callback_head fu_rcuhead; } ;   878     struct file {   union __anonunion_f_u_366 f_u;   struct path f_path;   struct inode *f_inode;   const struct file_operations *f_op;   spinlock_t f_lock;   atomic_long_t f_count;   unsigned int f_flags;   fmode_t f_mode;   struct mutex f_pos_lock;   loff_t f_pos;   struct fown_struct f_owner;   const struct cred *f_cred;   struct file_ra_state f_ra;   u64 f_version;   void *f_security;   void *private_data;   struct list_head f_ep_links;   struct list_head f_tfile_llink;   struct address_space *f_mapping; } ;   963     typedef void *fl_owner_t;   964     struct file_lock ;   965     struct file_lock_operations {   void (*fl_copy_lock)(struct file_lock *, struct file_lock *);   void (*fl_release_private)(struct file_lock *); } ;   971     struct lock_manager_operations {   int (*lm_compare_owner)(struct file_lock *, struct file_lock *);   unsigned long int (*lm_owner_key)(struct file_lock *);   fl_owner_t  (*lm_get_owner)(fl_owner_t );   void (*lm_put_owner)(fl_owner_t );   void (*lm_notify)(struct file_lock *);   int (*lm_grant)(struct file_lock *, int);   bool  (*lm_break)(struct file_lock *);   int (*lm_change)(struct file_lock *, int, struct list_head *);   void (*lm_setup)(struct file_lock *, void **); } ;   992     struct net ;   998     struct nlm_lockowner ;   999     struct nfs_lock_info {   u32 state;   struct nlm_lockowner *owner;   struct list_head list; } ;    14     struct nfs4_lock_state ;    15     struct nfs4_lock_info {   struct nfs4_lock_state *owner; } ;    19     struct fasync_struct ;    19     struct __anonstruct_afs_368 {   struct list_head link;   int state; } ;    19     union __anonunion_fl_u_367 {   struct nfs_lock_info nfs_fl;   struct nfs4_lock_info nfs4_fl;   struct __anonstruct_afs_368 afs; } ;    19     struct file_lock {   struct file_lock *fl_next;   struct list_head fl_list;   struct hlist_node fl_link;   struct list_head fl_block;   fl_owner_t fl_owner;   unsigned int fl_flags;   unsigned char fl_type;   unsigned int fl_pid;   int fl_link_cpu;   struct pid *fl_nspid;   wait_queue_head_t fl_wait;   struct file *fl_file;   loff_t fl_start;   loff_t fl_end;   struct fasync_struct *fl_fasync;   unsigned long fl_break_time;   unsigned long fl_downgrade_time;   const struct file_lock_operations *fl_ops;   const struct lock_manager_operations *fl_lmops;   union __anonunion_fl_u_367 fl_u; } ;  1051     struct file_lock_context {   spinlock_t flc_lock;   struct list_head flc_flock;   struct list_head flc_posix;   struct list_head flc_lease; } ;  1271     struct fasync_struct {   spinlock_t fa_lock;   int magic;   int fa_fd;   struct fasync_struct *fa_next;   struct file *fa_file;   struct callback_head fa_rcu; } ;  1306     struct sb_writers {   int frozen;   wait_queue_head_t wait_unfrozen;   struct percpu_rw_semaphore rw_sem[3U]; } ;  1336     struct super_operations ;  1336     struct xattr_handler ;  1336     struct mtd_info ;  1336     struct super_block {   struct list_head s_list;   dev_t s_dev;   unsigned char s_blocksize_bits;   unsigned long s_blocksize;   loff_t s_maxbytes;   struct file_system_type *s_type;   const struct super_operations *s_op;   const struct dquot_operations *dq_op;   const struct quotactl_ops *s_qcop;   const struct export_operations *s_export_op;   unsigned long s_flags;   unsigned long s_iflags;   unsigned long s_magic;   struct dentry *s_root;   struct rw_semaphore s_umount;   int s_count;   atomic_t s_active;   void *s_security;   const struct xattr_handler **s_xattr;   const struct fscrypt_operations *s_cop;   struct hlist_bl_head s_anon;   struct list_head s_mounts;   struct block_device *s_bdev;   struct backing_dev_info *s_bdi;   struct mtd_info *s_mtd;   struct hlist_node s_instances;   unsigned int s_quota_types;   struct quota_info s_dquot;   struct sb_writers s_writers;   char s_id[32U];   u8 s_uuid[16U];   void *s_fs_info;   unsigned int s_max_links;   fmode_t s_mode;   u32 s_time_gran;   struct mutex s_vfs_rename_mutex;   char *s_subtype;   char *s_options;   const struct dentry_operations *s_d_op;   int cleancache_poolid;   struct shrinker s_shrink;   atomic_long_t s_remove_count;   int s_readonly_remount;   struct workqueue_struct *s_dio_done_wq;   struct hlist_head s_pins;   struct user_namespace *s_user_ns;   struct list_lru s_dentry_lru;   struct list_lru s_inode_lru;   struct callback_head rcu;   struct work_struct destroy_work;   struct mutex s_sync_lock;   int s_stack_depth;   spinlock_t s_inode_list_lock;   struct list_head s_inodes;   spinlock_t s_inode_wblist_lock;   struct list_head s_inodes_wb; } ;  1620     struct fiemap_extent_info {   unsigned int fi_flags;   unsigned int fi_extents_mapped;   unsigned int fi_extents_max;   struct fiemap_extent *fi_extents_start; } ;  1633     struct dir_context ;  1658     struct dir_context {   int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int);   loff_t pos; } ;  1665     struct file_operations {   struct module *owner;   loff_t  (*llseek)(struct file *, loff_t , int);   ssize_t  (*read)(struct file *, char *, size_t , loff_t *);   ssize_t  (*write)(struct file *, const char *, size_t , loff_t *);   ssize_t  (*read_iter)(struct kiocb *, struct iov_iter *);   ssize_t  (*write_iter)(struct kiocb *, struct iov_iter *);   int (*iterate)(struct file *, struct dir_context *);   int (*iterate_shared)(struct file *, struct dir_context *);   unsigned int (*poll)(struct file *, struct poll_table_struct *);   long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long);   long int (*compat_ioctl)(struct file *, unsigned int, unsigned long);   int (*mmap)(struct file *, struct vm_area_struct *);   int (*open)(struct inode *, struct file *);   int (*flush)(struct file *, fl_owner_t );   int (*release)(struct inode *, struct file *);   int (*fsync)(struct file *, loff_t , loff_t , int);   int (*aio_fsync)(struct kiocb *, int);   int (*fasync)(int, struct file *, int);   int (*lock)(struct file *, int, struct file_lock *);   ssize_t  (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int);   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   int (*check_flags)(int);   int (*flock)(struct file *, int, struct file_lock *);   ssize_t  (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int);   ssize_t  (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int);   int (*setlease)(struct file *, long, struct file_lock **, void **);   long int (*fallocate)(struct file *, int, loff_t , loff_t );   void (*show_fdinfo)(struct seq_file *, struct file *);   ssize_t  (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int);   int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 );   ssize_t  (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ;  1734     struct inode_operations {   struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int);   const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *);   int (*permission)(struct inode *, int);   struct posix_acl * (*get_acl)(struct inode *, int);   int (*readlink)(struct dentry *, char *, int);   int (*create)(struct inode *, struct dentry *, umode_t , bool );   int (*link)(struct dentry *, struct inode *, struct dentry *);   int (*unlink)(struct inode *, struct dentry *);   int (*symlink)(struct inode *, struct dentry *, const char *);   int (*mkdir)(struct inode *, struct dentry *, umode_t );   int (*rmdir)(struct inode *, struct dentry *);   int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t );   int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int);   int (*setattr)(struct dentry *, struct iattr *);   int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *);   ssize_t  (*listxattr)(struct dentry *, char *, size_t );   int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 );   int (*update_time)(struct inode *, struct timespec *, int);   int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *);   int (*tmpfile)(struct inode *, struct dentry *, umode_t );   int (*set_acl)(struct inode *, struct posix_acl *, int); } ;  1784     struct super_operations {   struct inode * (*alloc_inode)(struct super_block *);   void (*destroy_inode)(struct inode *);   void (*dirty_inode)(struct inode *, int);   int (*write_inode)(struct inode *, struct writeback_control *);   int (*drop_inode)(struct inode *);   void (*evict_inode)(struct inode *);   void (*put_super)(struct super_block *);   int (*sync_fs)(struct super_block *, int);   int (*freeze_super)(struct super_block *);   int (*freeze_fs)(struct super_block *);   int (*thaw_super)(struct super_block *);   int (*unfreeze_fs)(struct super_block *);   int (*statfs)(struct dentry *, struct kstatfs *);   int (*remount_fs)(struct super_block *, int *, char *);   void (*umount_begin)(struct super_block *);   int (*show_options)(struct seq_file *, struct dentry *);   int (*show_devname)(struct seq_file *, struct dentry *);   int (*show_path)(struct seq_file *, struct dentry *);   int (*show_stats)(struct seq_file *, struct dentry *);   ssize_t  (*quota_read)(struct super_block *, int, char *, size_t , loff_t );   ssize_t  (*quota_write)(struct super_block *, int, const char *, size_t , loff_t );   struct dquot ** (*get_dquots)(struct inode *);   int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t );   long int (*nr_cached_objects)(struct super_block *, struct shrink_control *);   long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;  2027     struct file_system_type {   const char *name;   int fs_flags;   struct dentry * (*mount)(struct file_system_type *, int, const char *, void *);   void (*kill_sb)(struct super_block *);   struct module *owner;   struct file_system_type *next;   struct hlist_head fs_supers;   struct lock_class_key s_lock_key;   struct lock_class_key s_umount_key;   struct lock_class_key s_vfs_rename_key;   struct lock_class_key s_writers_key[3U];   struct lock_class_key i_lock_key;   struct lock_class_key i_mutex_key;   struct lock_class_key i_mutex_dir_key; } ;  3211     struct seq_file {   char *buf;   size_t size;   size_t from;   size_t count;   size_t pad_until;   loff_t index;   loff_t read_pos;   u64 version;   struct mutex lock;   const struct seq_operations *op;   int poll_event;   const struct file *file;   void *private; } ;    30     struct seq_operations {   void * (*start)(struct seq_file *, loff_t *);   void (*stop)(struct seq_file *, void *);   void * (*next)(struct seq_file *, void *, loff_t *);   int (*show)(struct seq_file *, void *); } ;   222     struct pinctrl ;   223     struct pinctrl_state ;   194     struct dev_pin_info {   struct pinctrl *p;   struct pinctrl_state *default_state;   struct pinctrl_state *init_state;   struct pinctrl_state *sleep_state;   struct pinctrl_state *idle_state; } ;    76     struct dma_map_ops ;    76     struct dev_archdata {   struct dma_map_ops *dma_ops;   void *iommu; } ;    21     struct pdev_archdata { } ;    24     struct device_private ;    25     struct device_driver ;    26     struct driver_private ;    27     struct class ;    28     struct subsys_private ;    29     struct bus_type ;    30     struct iommu_ops ;    31     struct iommu_group ;    32     struct iommu_fwspec ;    62     struct device_attribute ;    62     struct bus_type {   const char *name;   const char *dev_name;   struct device *dev_root;   struct device_attribute *dev_attrs;   const struct attribute_group **bus_groups;   const struct attribute_group **dev_groups;   const struct attribute_group **drv_groups;   int (*match)(struct device *, struct device_driver *);   int (*uevent)(struct device *, struct kobj_uevent_env *);   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*online)(struct device *);   int (*offline)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct dev_pm_ops *pm;   const struct iommu_ops *iommu_ops;   struct subsys_private *p;   struct lock_class_key lock_key; } ;   143     struct device_type ;   202     enum probe_type {   PROBE_DEFAULT_STRATEGY = 0,   PROBE_PREFER_ASYNCHRONOUS = 1,   PROBE_FORCE_SYNCHRONOUS = 2 } ;   208     struct device_driver {   const char *name;   struct bus_type *bus;   struct module *owner;   const char *mod_name;   bool suppress_bind_attrs;   enum probe_type probe_type;   const struct of_device_id *of_match_table;   const struct acpi_device_id *acpi_match_table;   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct attribute_group **groups;   const struct dev_pm_ops *pm;   struct driver_private *p; } ;   358     struct class_attribute ;   358     struct class {   const char *name;   struct module *owner;   struct class_attribute *class_attrs;   const struct attribute_group **dev_groups;   struct kobject *dev_kobj;   int (*dev_uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *);   void (*class_release)(struct class *);   void (*dev_release)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct kobj_ns_type_operations *ns_type;   const void * (*namespace)(struct device *);   const struct dev_pm_ops *pm;   struct subsys_private *p; } ;   451     struct class_attribute {   struct attribute attr;   ssize_t  (*show)(struct class *, struct class_attribute *, char *);   ssize_t  (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;   519     struct device_type {   const char *name;   const struct attribute_group **groups;   int (*uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *);   void (*release)(struct device *);   const struct dev_pm_ops *pm; } ;   547     struct device_attribute {   struct attribute attr;   ssize_t  (*show)(struct device *, struct device_attribute *, char *);   ssize_t  (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;   700     struct device_dma_parameters {   unsigned int max_segment_size;   unsigned long segment_boundary_mask; } ;   709     struct irq_domain ;   709     struct dma_coherent_mem ;   709     struct cma ;   709     struct device {   struct device *parent;   struct device_private *p;   struct kobject kobj;   const char *init_name;   const struct device_type *type;   struct mutex mutex;   struct bus_type *bus;   struct device_driver *driver;   void *platform_data;   void *driver_data;   struct dev_pm_info power;   struct dev_pm_domain *pm_domain;   struct irq_domain *msi_domain;   struct dev_pin_info *pins;   struct list_head msi_list;   int numa_node;   u64 *dma_mask;   u64 coherent_dma_mask;   unsigned long dma_pfn_offset;   struct device_dma_parameters *dma_parms;   struct list_head dma_pools;   struct dma_coherent_mem *dma_mem;   struct cma *cma_area;   struct dev_archdata archdata;   struct device_node *of_node;   struct fwnode_handle *fwnode;   dev_t devt;   u32 id;   spinlock_t devres_lock;   struct list_head devres_head;   struct klist_node knode_class;   struct class *class;   const struct attribute_group **groups;   void (*release)(struct device *);   struct iommu_group *iommu_group;   struct iommu_fwspec *iommu_fwspec;   bool offline_disabled;   bool offline; } ;   865     struct wakeup_source {   const char *name;   struct list_head entry;   spinlock_t lock;   struct wake_irq *wakeirq;   struct timer_list timer;   unsigned long timer_expires;   ktime_t total_time;   ktime_t max_time;   ktime_t last_time;   ktime_t start_prevent_time;   ktime_t prevent_sleep_time;   unsigned long event_count;   unsigned long active_count;   unsigned long relax_count;   unsigned long expire_count;   unsigned long wakeup_count;   bool active;   bool autosleep_enabled; } ;  1330     struct irq_desc ;  1331     struct irq_data ;    13     enum irqreturn {   IRQ_NONE = 0,   IRQ_HANDLED = 1,   IRQ_WAKE_THREAD = 2 } ;    16     typedef enum irqreturn irqreturn_t;    30     struct msi_msg ;    31     enum irqchip_irq_state ;    63     struct msi_desc ;    64     struct irq_common_data {   unsigned int state_use_accessors;   unsigned int node;   void *handler_data;   struct msi_desc *msi_desc;   cpumask_var_t affinity; } ;   151     struct irq_chip ;   151     struct irq_data {   u32 mask;   unsigned int irq;   unsigned long hwirq;   struct irq_common_data *common;   struct irq_chip *chip;   struct irq_domain *domain;   struct irq_data *parent_data;   void *chip_data; } ;   321     struct irq_chip {   struct device *parent_device;   const char *name;   unsigned int (*irq_startup)(struct irq_data *);   void (*irq_shutdown)(struct irq_data *);   void (*irq_enable)(struct irq_data *);   void (*irq_disable)(struct irq_data *);   void (*irq_ack)(struct irq_data *);   void (*irq_mask)(struct irq_data *);   void (*irq_mask_ack)(struct irq_data *);   void (*irq_unmask)(struct irq_data *);   void (*irq_eoi)(struct irq_data *);   int (*irq_set_affinity)(struct irq_data *, const struct cpumask *, bool );   int (*irq_retrigger)(struct irq_data *);   int (*irq_set_type)(struct irq_data *, unsigned int);   int (*irq_set_wake)(struct irq_data *, unsigned int);   void (*irq_bus_lock)(struct irq_data *);   void (*irq_bus_sync_unlock)(struct irq_data *);   void (*irq_cpu_online)(struct irq_data *);   void (*irq_cpu_offline)(struct irq_data *);   void (*irq_suspend)(struct irq_data *);   void (*irq_resume)(struct irq_data *);   void (*irq_pm_shutdown)(struct irq_data *);   void (*irq_calc_mask)(struct irq_data *);   void (*irq_print_chip)(struct irq_data *, struct seq_file *);   int (*irq_request_resources)(struct irq_data *);   void (*irq_release_resources)(struct irq_data *);   void (*irq_compose_msi_msg)(struct irq_data *, struct msi_msg *);   void (*irq_write_msi_msg)(struct irq_data *, struct msi_msg *);   int (*irq_get_irqchip_state)(struct irq_data *, enum irqchip_irq_state , bool *);   int (*irq_set_irqchip_state)(struct irq_data *, enum irqchip_irq_state , bool );   int (*irq_set_vcpu_affinity)(struct irq_data *, void *);   void (*ipi_send_single)(struct irq_data *, unsigned int);   void (*ipi_send_mask)(struct irq_data *, const struct cpumask *);   unsigned long flags; } ;   422     struct irq_affinity_notify ;   423     struct proc_dir_entry ;   424     struct irqaction ;   424     struct irq_desc {   struct irq_common_data irq_common_data;   struct irq_data irq_data;   unsigned int *kstat_irqs;   void (*handle_irq)(struct irq_desc *);   struct irqaction *action;   unsigned int status_use_accessors;   unsigned int core_internal_state__do_not_mess_with_it;   unsigned int depth;   unsigned int wake_depth;   unsigned int irq_count;   unsigned long last_unhandled;   unsigned int irqs_unhandled;   atomic_t threads_handled;   int threads_handled_last;   raw_spinlock_t lock;   struct cpumask *percpu_enabled;   const struct cpumask *percpu_affinity;   const struct cpumask *affinity_hint;   struct irq_affinity_notify *affinity_notify;   cpumask_var_t pending_mask;   unsigned long threads_oneshot;   atomic_t threads_active;   wait_queue_head_t wait_for_threads;   unsigned int nr_actions;   unsigned int no_suspend_depth;   unsigned int cond_suspend_depth;   unsigned int force_resume_depth;   struct proc_dir_entry *dir;   struct callback_head rcu;   struct kobject kobj;   int parent_irq;   struct module *owner;   const char *name; } ;   130     struct exception_table_entry {   int insn;   int fixup;   int handler; } ;   739     struct irq_chip_regs {   unsigned long enable;   unsigned long disable;   unsigned long mask;   unsigned long ack;   unsigned long eoi;   unsigned long type;   unsigned long polarity; } ;   778     struct irq_chip_type {   struct irq_chip chip;   struct irq_chip_regs regs;   void (*handler)(struct irq_desc *);   u32 type;   u32 mask_cache_priv;   u32 *mask_cache; } ;   800     struct irq_chip_generic {   raw_spinlock_t lock;   void *reg_base;   u32  (*reg_readl)(void *);   void (*reg_writel)(u32 , void *);   void (*suspend)(struct irq_chip_generic *);   void (*resume)(struct irq_chip_generic *);   unsigned int irq_base;   unsigned int irq_cnt;   u32 mask_cache;   u32 type_cache;   u32 polarity_cache;   u32 wake_enabled;   u32 wake_active;   unsigned int num_ct;   void *private;   unsigned long installed;   unsigned long unused;   struct irq_domain *domain;   struct list_head list;   struct irq_chip_type chip_types[0U]; } ;   856     enum irq_gc_flags {   IRQ_GC_INIT_MASK_CACHE = 1,   IRQ_GC_INIT_NESTED_LOCK = 2,   IRQ_GC_MASK_CACHE_PER_TYPE = 4,   IRQ_GC_NO_MASK = 8,   IRQ_GC_BE_IO = 16 } ;   864     struct irq_domain_chip_generic {   unsigned int irqs_per_chip;   unsigned int num_chips;   unsigned int irq_flags_to_clear;   unsigned int irq_flags_to_set;   enum irq_gc_flags gc_flags;   struct irq_chip_generic *gc[0U]; } ;    51     struct irq_fwspec {   struct fwnode_handle *fwnode;   int param_count;   u32 param[16U]; } ;    64     enum irq_domain_bus_token {   DOMAIN_BUS_ANY = 0,   DOMAIN_BUS_WIRED = 1,   DOMAIN_BUS_PCI_MSI = 2,   DOMAIN_BUS_PLATFORM_MSI = 3,   DOMAIN_BUS_NEXUS = 4,   DOMAIN_BUS_IPI = 5,   DOMAIN_BUS_FSL_MC_MSI = 6 } ;    74     struct irq_domain_ops {   int (*match)(struct irq_domain *, struct device_node *, enum irq_domain_bus_token );   int (*select)(struct irq_domain *, struct irq_fwspec *, enum irq_domain_bus_token );   int (*map)(struct irq_domain *, unsigned int, irq_hw_number_t );   void (*unmap)(struct irq_domain *, unsigned int);   int (*xlate)(struct irq_domain *, struct device_node *, const u32 *, unsigned int, unsigned long *, unsigned int *);   int (*alloc)(struct irq_domain *, unsigned int, unsigned int, void *);   void (*free)(struct irq_domain *, unsigned int, unsigned int);   void (*activate)(struct irq_domain *, struct irq_data *);   void (*deactivate)(struct irq_domain *, struct irq_data *);   int (*translate)(struct irq_domain *, struct irq_fwspec *, unsigned long *, unsigned int *); } ;   122     struct irq_domain {   struct list_head link;   const char *name;   const struct irq_domain_ops *ops;   void *host_data;   unsigned int flags;   struct fwnode_handle *fwnode;   enum irq_domain_bus_token bus_token;   struct irq_domain_chip_generic *gc;   struct irq_domain *parent;   irq_hw_number_t hwirq_max;   unsigned int revmap_direct_max_irq;   unsigned int revmap_size;   struct radix_tree_root revmap_tree;   unsigned int linear_revmap[]; } ;   184     struct gpio_desc ;    93     struct irqaction {   irqreturn_t  (*handler)(int, void *);   void *dev_id;   void *percpu_dev_id;   struct irqaction *next;   irqreturn_t  (*thread_fn)(int, void *);   struct task_struct *thread;   struct irqaction *secondary;   unsigned int irq;   unsigned int flags;   unsigned long thread_flags;   unsigned long thread_mask;   const char *name;   struct proc_dir_entry *dir; } ;   214     struct irq_affinity_notify {   unsigned int irq;   struct kref kref;   struct work_struct work;   void (*notify)(struct irq_affinity_notify *, const cpumask_t *);   void (*release)(struct kref *); } ;   392     enum irqchip_irq_state {   IRQCHIP_STATE_PENDING = 0,   IRQCHIP_STATE_ACTIVE = 1,   IRQCHIP_STATE_MASKED = 2,   IRQCHIP_STATE_LINE_LEVEL = 3 } ;   494     struct tasklet_struct {   struct tasklet_struct *next;   unsigned long state;   atomic_t count;   void (*func)(unsigned long);   unsigned long data; } ;    56     struct iovec {   void *iov_base;   __kernel_size_t iov_len; } ;    21     struct kvec {   void *iov_base;   size_t iov_len; } ;    29     union __anonunion____missing_field_name_378 {   const struct iovec *iov;   const struct kvec *kvec;   const struct bio_vec *bvec;   struct pipe_inode_info *pipe; } ;    29     union __anonunion____missing_field_name_379 {   unsigned long nr_segs;   int idx; } ;    29     struct iov_iter {   int type;   size_t iov_offset;   size_t count;   union __anonunion____missing_field_name_378 __annonCompField85;   union __anonunion____missing_field_name_379 __annonCompField86; } ;   273     struct vm_fault {   unsigned int flags;   gfp_t gfp_mask;   unsigned long pgoff;   void *virtual_address;   struct page *cow_page;   struct page *page;   void *entry; } ;   308     struct fault_env {   struct vm_area_struct *vma;   unsigned long address;   unsigned int flags;   pmd_t *pmd;   pte_t *pte;   spinlock_t *ptl;   pgtable_t prealloc_pte; } ;   335     struct vm_operations_struct {   void (*open)(struct vm_area_struct *);   void (*close)(struct vm_area_struct *);   int (*mremap)(struct vm_area_struct *);   int (*fault)(struct vm_area_struct *, struct vm_fault *);   int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int);   void (*map_pages)(struct fault_env *, unsigned long, unsigned long);   int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*access)(struct vm_area_struct *, unsigned long, void *, int, int);   const char * (*name)(struct vm_area_struct *);   int (*set_policy)(struct vm_area_struct *, struct mempolicy *);   struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long);   struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;  2450     struct scatterlist {   unsigned long sg_magic;   unsigned long page_link;   unsigned int offset;   unsigned int length;   dma_addr_t dma_address;   unsigned int dma_length; } ;    21     struct sg_table {   struct scatterlist *sgl;   unsigned int nents;   unsigned int orig_nents; } ;  1418     struct dql {   unsigned int num_queued;   unsigned int adj_limit;   unsigned int last_obj_cnt;   unsigned int limit;   unsigned int num_completed;   unsigned int prev_ovlimit;   unsigned int prev_num_queued;   unsigned int prev_last_obj_cnt;   unsigned int lowest_slack;   unsigned long slack_start_time;   unsigned int max_limit;   unsigned int min_limit;   unsigned int slack_hold_time; } ;    11     typedef unsigned short __kernel_sa_family_t;    23     typedef __kernel_sa_family_t sa_family_t;    24     struct sockaddr {   sa_family_t sa_family;   char sa_data[14U]; } ;    43     struct __anonstruct_sync_serial_settings_391 {   unsigned int clock_rate;   unsigned int clock_type;   unsigned short loopback; } ;    43     typedef struct __anonstruct_sync_serial_settings_391 sync_serial_settings;    50     struct __anonstruct_te1_settings_392 {   unsigned int clock_rate;   unsigned int clock_type;   unsigned short loopback;   unsigned int slot_map; } ;    50     typedef struct __anonstruct_te1_settings_392 te1_settings;    55     struct __anonstruct_raw_hdlc_proto_393 {   unsigned short encoding;   unsigned short parity; } ;    55     typedef struct __anonstruct_raw_hdlc_proto_393 raw_hdlc_proto;    65     struct __anonstruct_fr_proto_394 {   unsigned int t391;   unsigned int t392;   unsigned int n391;   unsigned int n392;   unsigned int n393;   unsigned short lmi;   unsigned short dce; } ;    65     typedef struct __anonstruct_fr_proto_394 fr_proto;    69     struct __anonstruct_fr_proto_pvc_395 {   unsigned int dlci; } ;    69     typedef struct __anonstruct_fr_proto_pvc_395 fr_proto_pvc;    74     struct __anonstruct_fr_proto_pvc_info_396 {   unsigned int dlci;   char master[16U]; } ;    74     typedef struct __anonstruct_fr_proto_pvc_info_396 fr_proto_pvc_info;    79     struct __anonstruct_cisco_proto_397 {   unsigned int interval;   unsigned int timeout; } ;    79     typedef struct __anonstruct_cisco_proto_397 cisco_proto;   117     struct ifmap {   unsigned long mem_start;   unsigned long mem_end;   unsigned short base_addr;   unsigned char irq;   unsigned char dma;   unsigned char port; } ;   197     union __anonunion_ifs_ifsu_398 {   raw_hdlc_proto *raw_hdlc;   cisco_proto *cisco;   fr_proto *fr;   fr_proto_pvc *fr_pvc;   fr_proto_pvc_info *fr_pvc_info;   sync_serial_settings *sync;   te1_settings *te1; } ;   197     struct if_settings {   unsigned int type;   unsigned int size;   union __anonunion_ifs_ifsu_398 ifs_ifsu; } ;   216     union __anonunion_ifr_ifrn_399 {   char ifrn_name[16U]; } ;   216     union __anonunion_ifr_ifru_400 {   struct sockaddr ifru_addr;   struct sockaddr ifru_dstaddr;   struct sockaddr ifru_broadaddr;   struct sockaddr ifru_netmask;   struct sockaddr ifru_hwaddr;   short ifru_flags;   int ifru_ivalue;   int ifru_mtu;   struct ifmap ifru_map;   char ifru_slave[16U];   char ifru_newname[16U];   void *ifru_data;   struct if_settings ifru_settings; } ;   216     struct ifreq {   union __anonunion_ifr_ifrn_399 ifr_ifrn;   union __anonunion_ifr_ifru_400 ifr_ifru; } ;    18     typedef s32 compat_time_t;    39     typedef s32 compat_long_t;    45     typedef u32 compat_uptr_t;    46     struct compat_timespec {   compat_time_t tv_sec;   s32 tv_nsec; } ;   278     struct compat_robust_list {   compat_uptr_t next; } ;   282     struct compat_robust_list_head {   struct compat_robust_list list;   compat_long_t futex_offset;   compat_uptr_t list_op_pending; } ;   126     struct sk_buff ;   161     struct in6_addr ;    96     enum dma_data_direction {   DMA_BIDIRECTIONAL = 0,   DMA_TO_DEVICE = 1,   DMA_FROM_DEVICE = 2,   DMA_NONE = 3 } ;   103     struct dma_map_ops {   void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long);   void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long);   int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long);   int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long);   dma_addr_t  (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long);   void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long);   int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long);   void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long);   dma_addr_t  (*map_resource)(struct device *, phys_addr_t , size_t , enum dma_data_direction , unsigned long);   void (*unmap_resource)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long);   void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction );   void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction );   int (*mapping_error)(struct device *, dma_addr_t );   int (*dma_supported)(struct device *, u64 );   int (*set_dma_mask)(struct device *, u64 );   int is_phys; } ;    15     typedef u64 netdev_features_t;    70     union __anonunion_in6_u_426 {   __u8 u6_addr8[16U];   __be16 u6_addr16[8U];   __be32 u6_addr32[4U]; } ;    70     struct in6_addr {   union __anonunion_in6_u_426 in6_u; } ;    46     struct ethhdr {   unsigned char h_dest[6U];   unsigned char h_source[6U];   __be16 h_proto; } ;   205     struct pipe_buf_operations ;   205     struct pipe_buffer {   struct page *page;   unsigned int offset;   unsigned int len;   const struct pipe_buf_operations *ops;   unsigned int flags;   unsigned long private; } ;    27     struct pipe_inode_info {   struct mutex mutex;   wait_queue_head_t wait;   unsigned int nrbufs;   unsigned int curbuf;   unsigned int buffers;   unsigned int readers;   unsigned int writers;   unsigned int files;   unsigned int waiting_writers;   unsigned int r_counter;   unsigned int w_counter;   struct page *tmp_page;   struct fasync_struct *fasync_readers;   struct fasync_struct *fasync_writers;   struct pipe_buffer *bufs;   struct user_struct *user; } ;    63     struct pipe_buf_operations {   int can_merge;   int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);   void (*release)(struct pipe_inode_info *, struct pipe_buffer *);   int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);   void (*get)(struct pipe_inode_info *, struct pipe_buffer *); } ;   264     struct napi_struct ;   265     struct nf_conntrack {   atomic_t use; } ;   254     union __anonunion____missing_field_name_438 {   __be32 ipv4_daddr;   struct in6_addr ipv6_daddr;   char neigh_header[8U]; } ;   254     struct nf_bridge_info {   atomic_t use;   unsigned char orig_proto;   unsigned char pkt_otherhost;   unsigned char in_prerouting;   unsigned char bridged_dnat;   __u16 frag_max_size;   struct net_device *physindev;   struct net_device *physoutdev;   union __anonunion____missing_field_name_438 __annonCompField93; } ;   278     struct sk_buff_head {   struct sk_buff *next;   struct sk_buff *prev;   __u32 qlen;   spinlock_t lock; } ;   310     struct skb_frag_struct ;   310     typedef struct skb_frag_struct skb_frag_t;   311     struct __anonstruct_page_439 {   struct page *p; } ;   311     struct skb_frag_struct {   struct __anonstruct_page_439 page;   __u32 page_offset;   __u32 size; } ;   344     struct skb_shared_hwtstamps {   ktime_t hwtstamp; } ;   410     struct skb_shared_info {   unsigned char nr_frags;   __u8 tx_flags;   unsigned short gso_size;   unsigned short gso_segs;   unsigned short gso_type;   struct sk_buff *frag_list;   struct skb_shared_hwtstamps hwtstamps;   u32 tskey;   __be32 ip6_frag_id;   atomic_t dataref;   void *destructor_arg;   skb_frag_t frags[17U]; } ;   500     typedef unsigned int sk_buff_data_t;   501     struct __anonstruct____missing_field_name_441 {   u32 stamp_us;   u32 stamp_jiffies; } ;   501     union __anonunion____missing_field_name_440 {   u64 v64;   struct __anonstruct____missing_field_name_441 __annonCompField94; } ;   501     struct skb_mstamp {   union __anonunion____missing_field_name_440 __annonCompField95; } ;   564     union __anonunion____missing_field_name_444 {   ktime_t tstamp;   struct skb_mstamp skb_mstamp; } ;   564     struct __anonstruct____missing_field_name_443 {   struct sk_buff *next;   struct sk_buff *prev;   union __anonunion____missing_field_name_444 __annonCompField96; } ;   564     union __anonunion____missing_field_name_442 {   struct __anonstruct____missing_field_name_443 __annonCompField97;   struct rb_node rbnode; } ;   564     struct sec_path ;   564     struct __anonstruct____missing_field_name_446 {   __u16 csum_start;   __u16 csum_offset; } ;   564     union __anonunion____missing_field_name_445 {   __wsum csum;   struct __anonstruct____missing_field_name_446 __annonCompField99; } ;   564     union __anonunion____missing_field_name_447 {   unsigned int napi_id;   unsigned int sender_cpu; } ;   564     union __anonunion____missing_field_name_448 {   __u32 mark;   __u32 reserved_tailroom; } ;   564     union __anonunion____missing_field_name_449 {   __be16 inner_protocol;   __u8 inner_ipproto; } ;   564     struct sk_buff {   union __anonunion____missing_field_name_442 __annonCompField98;   struct sock *sk;   struct net_device *dev;   char cb[48U];   unsigned long _skb_refdst;   void (*destructor)(struct sk_buff *);   struct sec_path *sp;   struct nf_conntrack *nfct;   struct nf_bridge_info *nf_bridge;   unsigned int len;   unsigned int data_len;   __u16 mac_len;   __u16 hdr_len;   __u16 queue_mapping;   __u8 __cloned_offset[0U];   unsigned char cloned;   unsigned char nohdr;   unsigned char fclone;   unsigned char peeked;   unsigned char head_frag;   unsigned char xmit_more;   unsigned char __unused;   __u32 headers_start[0U];   __u8 __pkt_type_offset[0U];   unsigned char pkt_type;   unsigned char pfmemalloc;   unsigned char ignore_df;   unsigned char nfctinfo;   unsigned char nf_trace;   unsigned char ip_summed;   unsigned char ooo_okay;   unsigned char l4_hash;   unsigned char sw_hash;   unsigned char wifi_acked_valid;   unsigned char wifi_acked;   unsigned char no_fcs;   unsigned char encapsulation;   unsigned char encap_hdr_csum;   unsigned char csum_valid;   unsigned char csum_complete_sw;   unsigned char csum_level;   unsigned char csum_bad;   unsigned char ndisc_nodetype;   unsigned char ipvs_property;   unsigned char inner_protocol_type;   unsigned char remcsum_offload;   unsigned char offload_fwd_mark;   __u16 tc_index;   __u16 tc_verd;   union __anonunion____missing_field_name_445 __annonCompField100;   __u32 priority;   int skb_iif;   __u32 hash;   __be16 vlan_proto;   __u16 vlan_tci;   union __anonunion____missing_field_name_447 __annonCompField101;   __u32 secmark;   union __anonunion____missing_field_name_448 __annonCompField102;   union __anonunion____missing_field_name_449 __annonCompField103;   __u16 inner_transport_header;   __u16 inner_network_header;   __u16 inner_mac_header;   __be16 protocol;   __u16 transport_header;   __u16 network_header;   __u16 mac_header;   __u32 headers_end[0U];   sk_buff_data_t tail;   sk_buff_data_t end;   unsigned char *head;   unsigned char *data;   unsigned int truesize;   atomic_t users; } ;   838     struct dst_entry ;    39     struct ethtool_cmd {   __u32 cmd;   __u32 supported;   __u32 advertising;   __u16 speed;   __u8 duplex;   __u8 port;   __u8 phy_address;   __u8 transceiver;   __u8 autoneg;   __u8 mdio_support;   __u32 maxtxpkt;   __u32 maxrxpkt;   __u16 speed_hi;   __u8 eth_tp_mdix;   __u8 eth_tp_mdix_ctrl;   __u32 lp_advertising;   __u32 reserved[2U]; } ;   131     struct ethtool_drvinfo {   __u32 cmd;   char driver[32U];   char version[32U];   char fw_version[32U];   char bus_info[32U];   char erom_version[32U];   char reserved2[12U];   __u32 n_priv_flags;   __u32 n_stats;   __u32 testinfo_len;   __u32 eedump_len;   __u32 regdump_len; } ;   195     struct ethtool_wolinfo {   __u32 cmd;   __u32 supported;   __u32 wolopts;   __u8 sopass[6U]; } ;   239     struct ethtool_tunable {   __u32 cmd;   __u32 id;   __u32 type_id;   __u32 len;   void *data[0U]; } ;   251     struct ethtool_regs {   __u32 cmd;   __u32 version;   __u32 len;   __u8 data[0U]; } ;   273     struct ethtool_eeprom {   __u32 cmd;   __u32 magic;   __u32 offset;   __u32 len;   __u8 data[0U]; } ;   299     struct ethtool_eee {   __u32 cmd;   __u32 supported;   __u32 advertised;   __u32 lp_advertised;   __u32 eee_active;   __u32 eee_enabled;   __u32 tx_lpi_enabled;   __u32 tx_lpi_timer;   __u32 reserved[2U]; } ;   328     struct ethtool_modinfo {   __u32 cmd;   __u32 type;   __u32 eeprom_len;   __u32 reserved[8U]; } ;   345     struct ethtool_coalesce {   __u32 cmd;   __u32 rx_coalesce_usecs;   __u32 rx_max_coalesced_frames;   __u32 rx_coalesce_usecs_irq;   __u32 rx_max_coalesced_frames_irq;   __u32 tx_coalesce_usecs;   __u32 tx_max_coalesced_frames;   __u32 tx_coalesce_usecs_irq;   __u32 tx_max_coalesced_frames_irq;   __u32 stats_block_coalesce_usecs;   __u32 use_adaptive_rx_coalesce;   __u32 use_adaptive_tx_coalesce;   __u32 pkt_rate_low;   __u32 rx_coalesce_usecs_low;   __u32 rx_max_coalesced_frames_low;   __u32 tx_coalesce_usecs_low;   __u32 tx_max_coalesced_frames_low;   __u32 pkt_rate_high;   __u32 rx_coalesce_usecs_high;   __u32 rx_max_coalesced_frames_high;   __u32 tx_coalesce_usecs_high;   __u32 tx_max_coalesced_frames_high;   __u32 rate_sample_interval; } ;   444     struct ethtool_ringparam {   __u32 cmd;   __u32 rx_max_pending;   __u32 rx_mini_max_pending;   __u32 rx_jumbo_max_pending;   __u32 tx_max_pending;   __u32 rx_pending;   __u32 rx_mini_pending;   __u32 rx_jumbo_pending;   __u32 tx_pending; } ;   481     struct ethtool_channels {   __u32 cmd;   __u32 max_rx;   __u32 max_tx;   __u32 max_other;   __u32 max_combined;   __u32 rx_count;   __u32 tx_count;   __u32 other_count;   __u32 combined_count; } ;   509     struct ethtool_pauseparam {   __u32 cmd;   __u32 autoneg;   __u32 rx_pause;   __u32 tx_pause; } ;   613     struct ethtool_test {   __u32 cmd;   __u32 flags;   __u32 reserved;   __u32 len;   __u64 data[0U]; } ;   645     struct ethtool_stats {   __u32 cmd;   __u32 n_stats;   __u64 data[0U]; } ;   687     struct ethtool_tcpip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be16 psrc;   __be16 pdst;   __u8 tos; } ;   720     struct ethtool_ah_espip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be32 spi;   __u8 tos; } ;   736     struct ethtool_usrip4_spec {   __be32 ip4src;   __be32 ip4dst;   __be32 l4_4_bytes;   __u8 tos;   __u8 ip_ver;   __u8 proto; } ;   756     struct ethtool_tcpip6_spec {   __be32 ip6src[4U];   __be32 ip6dst[4U];   __be16 psrc;   __be16 pdst;   __u8 tclass; } ;   774     struct ethtool_ah_espip6_spec {   __be32 ip6src[4U];   __be32 ip6dst[4U];   __be32 spi;   __u8 tclass; } ;   790     struct ethtool_usrip6_spec {   __be32 ip6src[4U];   __be32 ip6dst[4U];   __be32 l4_4_bytes;   __u8 tclass;   __u8 l4_proto; } ;   806     union ethtool_flow_union {   struct ethtool_tcpip4_spec tcp_ip4_spec;   struct ethtool_tcpip4_spec udp_ip4_spec;   struct ethtool_tcpip4_spec sctp_ip4_spec;   struct ethtool_ah_espip4_spec ah_ip4_spec;   struct ethtool_ah_espip4_spec esp_ip4_spec;   struct ethtool_usrip4_spec usr_ip4_spec;   struct ethtool_tcpip6_spec tcp_ip6_spec;   struct ethtool_tcpip6_spec udp_ip6_spec;   struct ethtool_tcpip6_spec sctp_ip6_spec;   struct ethtool_ah_espip6_spec ah_ip6_spec;   struct ethtool_ah_espip6_spec esp_ip6_spec;   struct ethtool_usrip6_spec usr_ip6_spec;   struct ethhdr ether_spec;   __u8 hdata[52U]; } ;   823     struct ethtool_flow_ext {   __u8 padding[2U];   unsigned char h_dest[6U];   __be16 vlan_etype;   __be16 vlan_tci;   __be32 data[2U]; } ;   842     struct ethtool_rx_flow_spec {   __u32 flow_type;   union ethtool_flow_union h_u;   struct ethtool_flow_ext h_ext;   union ethtool_flow_union m_u;   struct ethtool_flow_ext m_ext;   __u64 ring_cookie;   __u32 location; } ;   892     struct ethtool_rxnfc {   __u32 cmd;   __u32 flow_type;   __u64 data;   struct ethtool_rx_flow_spec fs;   __u32 rule_cnt;   __u32 rule_locs[0U]; } ;  1063     struct ethtool_flash {   __u32 cmd;   __u32 region;   char data[128U]; } ;  1071     struct ethtool_dump {   __u32 cmd;   __u32 version;   __u32 flag;   __u32 len;   __u8 data[0U]; } ;  1147     struct ethtool_ts_info {   __u32 cmd;   __u32 so_timestamping;   __s32 phc_index;   __u32 tx_types;   __u32 tx_reserved[3U];   __u32 rx_filters;   __u32 rx_reserved[3U]; } ;  1522     struct ethtool_link_settings {   __u32 cmd;   __u32 speed;   __u8 duplex;   __u8 port;   __u8 phy_address;   __u8 autoneg;   __u8 mdio_support;   __u8 eth_tp_mdix;   __u8 eth_tp_mdix_ctrl;   __s8 link_mode_masks_nwords;   __u32 reserved[8U];   __u32 link_mode_masks[0U]; } ;    39     enum ethtool_phys_id_state {   ETHTOOL_ID_INACTIVE = 0,   ETHTOOL_ID_ACTIVE = 1,   ETHTOOL_ID_ON = 2,   ETHTOOL_ID_OFF = 3 } ;    97     struct __anonstruct_link_modes_453 {   unsigned long supported[1U];   unsigned long advertising[1U];   unsigned long lp_advertising[1U]; } ;    97     struct ethtool_link_ksettings {   struct ethtool_link_settings base;   struct __anonstruct_link_modes_453 link_modes; } ;   158     struct ethtool_ops {   int (*get_settings)(struct net_device *, struct ethtool_cmd *);   int (*set_settings)(struct net_device *, struct ethtool_cmd *);   void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);   int (*get_regs_len)(struct net_device *);   void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);   void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);   int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);   u32  (*get_msglevel)(struct net_device *);   void (*set_msglevel)(struct net_device *, u32 );   int (*nway_reset)(struct net_device *);   u32  (*get_link)(struct net_device *);   int (*get_eeprom_len)(struct net_device *);   int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);   int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);   void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);   int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);   void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *);   int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *);   void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);   void (*get_strings)(struct net_device *, u32 , u8 *);   int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state );   void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *);   int (*begin)(struct net_device *);   void (*complete)(struct net_device *);   u32  (*get_priv_flags)(struct net_device *);   int (*set_priv_flags)(struct net_device *, u32 );   int (*get_sset_count)(struct net_device *, int);   int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *);   int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);   int (*flash_device)(struct net_device *, struct ethtool_flash *);   int (*reset)(struct net_device *, u32 *);   u32  (*get_rxfh_key_size)(struct net_device *);   u32  (*get_rxfh_indir_size)(struct net_device *);   int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *);   int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8 );   void (*get_channels)(struct net_device *, struct ethtool_channels *);   int (*set_channels)(struct net_device *, struct ethtool_channels *);   int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);   int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *);   int (*set_dump)(struct net_device *, struct ethtool_dump *);   int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);   int (*get_module_info)(struct net_device *, struct ethtool_modinfo *);   int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);   int (*get_eee)(struct net_device *, struct ethtool_eee *);   int (*set_eee)(struct net_device *, struct ethtool_eee *);   int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *);   int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *);   int (*get_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *);   int (*set_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *);   int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *);   int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); } ;   375     struct prot_inuse ;   376     struct netns_core {   struct ctl_table_header *sysctl_hdr;   int sysctl_somaxconn;   struct prot_inuse *inuse; } ;    38     struct u64_stats_sync { } ;   164     struct ipstats_mib {   u64 mibs[36U];   struct u64_stats_sync syncp; } ;    61     struct icmp_mib {   unsigned long mibs[28U]; } ;    67     struct icmpmsg_mib {   atomic_long_t mibs[512U]; } ;    72     struct icmpv6_mib {   unsigned long mibs[6U]; } ;    83     struct icmpv6msg_mib {   atomic_long_t mibs[512U]; } ;    93     struct tcp_mib {   unsigned long mibs[16U]; } ;   100     struct udp_mib {   unsigned long mibs[9U]; } ;   106     struct linux_mib {   unsigned long mibs[118U]; } ;   112     struct linux_xfrm_mib {   unsigned long mibs[29U]; } ;   118     struct netns_mib {   struct tcp_mib *tcp_statistics;   struct ipstats_mib *ip_statistics;   struct linux_mib *net_statistics;   struct udp_mib *udp_statistics;   struct udp_mib *udplite_statistics;   struct icmp_mib *icmp_statistics;   struct icmpmsg_mib *icmpmsg_statistics;   struct proc_dir_entry *proc_net_devsnmp6;   struct udp_mib *udp_stats_in6;   struct udp_mib *udplite_stats_in6;   struct ipstats_mib *ipv6_statistics;   struct icmpv6_mib *icmpv6_statistics;   struct icmpv6msg_mib *icmpv6msg_statistics;   struct linux_xfrm_mib *xfrm_statistics; } ;    26     struct netns_unix {   int sysctl_max_dgram_qlen;   struct ctl_table_header *ctl; } ;    12     struct netns_packet {   struct mutex sklist_lock;   struct hlist_head sklist; } ;    14     struct netns_frags {   struct percpu_counter mem;   int timeout;   int high_thresh;   int low_thresh;   int max_dist; } ;   187     struct ipv4_devconf ;   188     struct fib_rules_ops ;   189     struct fib_table ;   190     struct local_ports {   seqlock_t lock;   int range[2U];   bool warned; } ;    24     struct ping_group_range {   seqlock_t lock;   kgid_t range[2U]; } ;    29     struct inet_peer_base ;    29     struct xt_table ;    29     struct netns_ipv4 {   struct ctl_table_header *forw_hdr;   struct ctl_table_header *frags_hdr;   struct ctl_table_header *ipv4_hdr;   struct ctl_table_header *route_hdr;   struct ctl_table_header *xfrm4_hdr;   struct ipv4_devconf *devconf_all;   struct ipv4_devconf *devconf_dflt;   struct fib_rules_ops *rules_ops;   bool fib_has_custom_rules;   struct fib_table *fib_main;   struct fib_table *fib_default;   int fib_num_tclassid_users;   struct hlist_head *fib_table_hash;   bool fib_offload_disabled;   struct sock *fibnl;   struct sock **icmp_sk;   struct sock *mc_autojoin_sk;   struct inet_peer_base *peers;   struct sock **tcp_sk;   struct netns_frags frags;   struct xt_table *iptable_filter;   struct xt_table *iptable_mangle;   struct xt_table *iptable_raw;   struct xt_table *arptable_filter;   struct xt_table *iptable_security;   struct xt_table *nat_table;   int sysctl_icmp_echo_ignore_all;   int sysctl_icmp_echo_ignore_broadcasts;   int sysctl_icmp_ignore_bogus_error_responses;   int sysctl_icmp_ratelimit;   int sysctl_icmp_ratemask;   int sysctl_icmp_errors_use_inbound_ifaddr;   struct local_ports ip_local_ports;   int sysctl_tcp_ecn;   int sysctl_tcp_ecn_fallback;   int sysctl_ip_default_ttl;   int sysctl_ip_no_pmtu_disc;   int sysctl_ip_fwd_use_pmtu;   int sysctl_ip_nonlocal_bind;   int sysctl_ip_dynaddr;   int sysctl_ip_early_demux;   int sysctl_fwmark_reflect;   int sysctl_tcp_fwmark_accept;   int sysctl_tcp_l3mdev_accept;   int sysctl_tcp_mtu_probing;   int sysctl_tcp_base_mss;   int sysctl_tcp_probe_threshold;   u32 sysctl_tcp_probe_interval;   int sysctl_tcp_keepalive_time;   int sysctl_tcp_keepalive_probes;   int sysctl_tcp_keepalive_intvl;   int sysctl_tcp_syn_retries;   int sysctl_tcp_synack_retries;   int sysctl_tcp_syncookies;   int sysctl_tcp_reordering;   int sysctl_tcp_retries1;   int sysctl_tcp_retries2;   int sysctl_tcp_orphan_retries;   int sysctl_tcp_fin_timeout;   unsigned int sysctl_tcp_notsent_lowat;   int sysctl_igmp_max_memberships;   int sysctl_igmp_max_msf;   int sysctl_igmp_llm_reports;   int sysctl_igmp_qrv;   struct ping_group_range ping_group_range;   atomic_t dev_addr_genid;   unsigned long *sysctl_local_reserved_ports;   struct list_head mr_tables;   struct fib_rules_ops *mr_rules_ops;   int sysctl_fib_multipath_use_neigh;   atomic_t rt_genid; } ;   141     struct neighbour ;   141     struct dst_ops {   unsigned short family;   unsigned int gc_thresh;   int (*gc)(struct dst_ops *);   struct dst_entry * (*check)(struct dst_entry *, __u32 );   unsigned int (*default_advmss)(const struct dst_entry *);   unsigned int (*mtu)(const struct dst_entry *);   u32 * (*cow_metrics)(struct dst_entry *, unsigned long);   void (*destroy)(struct dst_entry *);   void (*ifdown)(struct dst_entry *, struct net_device *, int);   struct dst_entry * (*negative_advice)(struct dst_entry *);   void (*link_failure)(struct sk_buff *);   void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 );   void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *);   int (*local_out)(struct net *, struct sock *, struct sk_buff *);   struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *);   struct kmem_cache *kmem_cachep;   struct percpu_counter pcpuc_entries; } ;    73     struct netns_sysctl_ipv6 {   struct ctl_table_header *hdr;   struct ctl_table_header *route_hdr;   struct ctl_table_header *icmp_hdr;   struct ctl_table_header *frags_hdr;   struct ctl_table_header *xfrm6_hdr;   int bindv6only;   int flush_delay;   int ip6_rt_max_size;   int ip6_rt_gc_min_interval;   int ip6_rt_gc_timeout;   int ip6_rt_gc_interval;   int ip6_rt_gc_elasticity;   int ip6_rt_mtu_expires;   int ip6_rt_min_advmss;   int flowlabel_consistency;   int auto_flowlabels;   int icmpv6_time;   int anycast_src_echo_reply;   int ip_nonlocal_bind;   int fwmark_reflect;   int idgen_retries;   int idgen_delay;   int flowlabel_state_ranges; } ;    40     struct ipv6_devconf ;    40     struct rt6_info ;    40     struct rt6_statistics ;    40     struct fib6_table ;    40     struct netns_ipv6 {   struct netns_sysctl_ipv6 sysctl;   struct ipv6_devconf *devconf_all;   struct ipv6_devconf *devconf_dflt;   struct inet_peer_base *peers;   struct netns_frags frags;   struct xt_table *ip6table_filter;   struct xt_table *ip6table_mangle;   struct xt_table *ip6table_raw;   struct xt_table *ip6table_security;   struct xt_table *ip6table_nat;   struct rt6_info *ip6_null_entry;   struct rt6_statistics *rt6_stats;   struct timer_list ip6_fib_timer;   struct hlist_head *fib_table_hash;   struct fib6_table *fib6_main_tbl;   struct list_head fib6_walkers;   struct dst_ops ip6_dst_ops;   rwlock_t fib6_walker_lock;   spinlock_t fib6_gc_lock;   unsigned int ip6_rt_gc_expire;   unsigned long ip6_rt_last_gc;   struct rt6_info *ip6_prohibit_entry;   struct rt6_info *ip6_blk_hole_entry;   struct fib6_table *fib6_local_tbl;   struct fib_rules_ops *fib6_rules_ops;   struct sock **icmp_sk;   struct sock *ndisc_sk;   struct sock *tcp_sk;   struct sock *igmp_sk;   struct sock *mc_autojoin_sk;   struct list_head mr6_tables;   struct fib_rules_ops *mr6_rules_ops;   atomic_t dev_addr_genid;   atomic_t fib6_sernum; } ;    89     struct netns_nf_frag {   struct netns_sysctl_ipv6 sysctl;   struct netns_frags frags; } ;    95     struct netns_sysctl_lowpan {   struct ctl_table_header *frags_hdr; } ;    14     struct netns_ieee802154_lowpan {   struct netns_sysctl_lowpan sysctl;   struct netns_frags frags; } ;    20     struct sctp_mib ;    21     struct netns_sctp {   struct sctp_mib *sctp_statistics;   struct proc_dir_entry *proc_net_sctp;   struct ctl_table_header *sysctl_header;   struct sock *ctl_sock;   struct list_head local_addr_list;   struct list_head addr_waitq;   struct timer_list addr_wq_timer;   struct list_head auto_asconf_splist;   spinlock_t addr_wq_lock;   spinlock_t local_addr_lock;   unsigned int rto_initial;   unsigned int rto_min;   unsigned int rto_max;   int rto_alpha;   int rto_beta;   int max_burst;   int cookie_preserve_enable;   char *sctp_hmac_alg;   unsigned int valid_cookie_life;   unsigned int sack_timeout;   unsigned int hb_interval;   int max_retrans_association;   int max_retrans_path;   int max_retrans_init;   int pf_retrans;   int pf_enable;   int sndbuf_policy;   int rcvbuf_policy;   int default_auto_asconf;   int addip_enable;   int addip_noauth;   int prsctp_enable;   int auth_enable;   int scope_policy;   int rwnd_upd_shift;   unsigned long max_autoclose; } ;   141     struct netns_dccp {   struct sock *v4_ctl_sk;   struct sock *v6_ctl_sk; } ;    79     struct nf_logger ;    80     struct nf_queue_handler ;    81     struct nf_hook_entry ;    81     struct netns_nf {   struct proc_dir_entry *proc_netfilter;   const struct nf_queue_handler *queue_handler;   const struct nf_logger *nf_loggers[13U];   struct ctl_table_header *nf_log_dir_header;   struct nf_hook_entry *hooks[13U][8U]; } ;    21     struct ebt_table ;    22     struct netns_xt {   struct list_head tables[13U];   bool notrack_deprecated_warning;   bool clusterip_deprecated_warning;   struct ebt_table *broute_table;   struct ebt_table *frame_filter;   struct ebt_table *frame_nat; } ;    19     struct hlist_nulls_node ;    19     struct hlist_nulls_head {   struct hlist_nulls_node *first; } ;    23     struct hlist_nulls_node {   struct hlist_nulls_node *next;   struct hlist_nulls_node **pprev; } ;    32     struct nf_proto_net {   struct ctl_table_header *ctl_table_header;   struct ctl_table *ctl_table;   unsigned int users; } ;    21     struct nf_generic_net {   struct nf_proto_net pn;   unsigned int timeout; } ;    26     struct nf_tcp_net {   struct nf_proto_net pn;   unsigned int timeouts[14U];   unsigned int tcp_loose;   unsigned int tcp_be_liberal;   unsigned int tcp_max_retrans; } ;    40     struct nf_udp_net {   struct nf_proto_net pn;   unsigned int timeouts[2U]; } ;    45     struct nf_icmp_net {   struct nf_proto_net pn;   unsigned int timeout; } ;    50     struct nf_ip_net {   struct nf_generic_net generic;   struct nf_tcp_net tcp;   struct nf_udp_net udp;   struct nf_icmp_net icmp;   struct nf_icmp_net icmpv6; } ;    58     struct ct_pcpu {   spinlock_t lock;   struct hlist_nulls_head unconfirmed;   struct hlist_nulls_head dying; } ;    64     struct ip_conntrack_stat ;    64     struct nf_ct_event_notifier ;    64     struct nf_exp_event_notifier ;    64     struct netns_ct {   atomic_t count;   unsigned int expect_count;   struct delayed_work ecache_dwork;   bool ecache_dwork_pending;   struct ctl_table_header *sysctl_header;   struct ctl_table_header *acct_sysctl_header;   struct ctl_table_header *tstamp_sysctl_header;   struct ctl_table_header *event_sysctl_header;   struct ctl_table_header *helper_sysctl_header;   unsigned int sysctl_log_invalid;   int sysctl_events;   int sysctl_acct;   int sysctl_auto_assign_helper;   bool auto_assign_helper_warned;   int sysctl_tstamp;   int sysctl_checksum;   struct ct_pcpu *pcpu_lists;   struct ip_conntrack_stat *stat;   struct nf_ct_event_notifier *nf_conntrack_event_cb;   struct nf_exp_event_notifier *nf_expect_event_cb;   struct nf_ip_net nf_ct_proto;   unsigned int labels_used;   u8 label_words; } ;    96     struct nft_af_info ;    97     struct netns_nftables {   struct list_head af_info;   struct list_head commit_list;   struct nft_af_info *ipv4;   struct nft_af_info *ipv6;   struct nft_af_info *inet;   struct nft_af_info *arp;   struct nft_af_info *bridge;   struct nft_af_info *netdev;   unsigned int base_seq;   u8 gencursor; } ;   509     struct flow_cache_percpu {   struct hlist_head *hash_table;   int hash_count;   u32 hash_rnd;   int hash_rnd_recalc;   struct tasklet_struct flush_tasklet; } ;    16     struct flow_cache {   u32 hash_shift;   struct flow_cache_percpu *percpu;   struct notifier_block hotcpu_notifier;   int low_watermark;   int high_watermark;   struct timer_list rnd_timer; } ;    25     struct xfrm_policy_hash {   struct hlist_head *table;   unsigned int hmask;   u8 dbits4;   u8 sbits4;   u8 dbits6;   u8 sbits6; } ;    21     struct xfrm_policy_hthresh {   struct work_struct work;   seqlock_t lock;   u8 lbits4;   u8 rbits4;   u8 lbits6;   u8 rbits6; } ;    30     struct netns_xfrm {   struct list_head state_all;   struct hlist_head *state_bydst;   struct hlist_head *state_bysrc;   struct hlist_head *state_byspi;   unsigned int state_hmask;   unsigned int state_num;   struct work_struct state_hash_work;   struct list_head policy_all;   struct hlist_head *policy_byidx;   unsigned int policy_idx_hmask;   struct hlist_head policy_inexact[3U];   struct xfrm_policy_hash policy_bydst[3U];   unsigned int policy_count[6U];   struct work_struct policy_hash_work;   struct xfrm_policy_hthresh policy_hthresh;   struct sock *nlsk;   struct sock *nlsk_stash;   u32 sysctl_aevent_etime;   u32 sysctl_aevent_rseqth;   int sysctl_larval_drop;   u32 sysctl_acq_expires;   struct ctl_table_header *sysctl_hdr;   struct dst_ops xfrm4_dst_ops;   struct dst_ops xfrm6_dst_ops;   spinlock_t xfrm_state_lock;   spinlock_t xfrm_policy_lock;   struct mutex xfrm_cfg_mutex;   struct flow_cache flow_cache_global;   atomic_t flow_cache_genid;   struct list_head flow_cache_gc_list;   atomic_t flow_cache_gc_count;   spinlock_t flow_cache_gc_lock;   struct work_struct flow_cache_gc_work;   struct work_struct flow_cache_flush_work;   struct mutex flow_flush_sem; } ;    87     struct mpls_route ;    88     struct netns_mpls {   size_t platform_labels;   struct mpls_route **platform_label;   struct ctl_table_header *ctl; } ;    16     struct proc_ns_operations ;    17     struct ns_common {   atomic_long_t stashed;   const struct proc_ns_operations *ops;   unsigned int inum; } ;    11     struct net_generic ;    12     struct netns_ipvs ;    13     struct ucounts ;    13     struct net {   atomic_t passive;   atomic_t count;   spinlock_t rules_mod_lock;   atomic64_t cookie_gen;   struct list_head list;   struct list_head cleanup_list;   struct list_head exit_list;   struct user_namespace *user_ns;   struct ucounts *ucounts;   spinlock_t nsid_lock;   struct idr netns_ids;   struct ns_common ns;   struct proc_dir_entry *proc_net;   struct proc_dir_entry *proc_net_stat;   struct ctl_table_set sysctls;   struct sock *rtnl;   struct sock *genl_sock;   struct list_head dev_base_head;   struct hlist_head *dev_name_head;   struct hlist_head *dev_index_head;   unsigned int dev_base_seq;   int ifindex;   unsigned int dev_unreg_count;   struct list_head rules_ops;   struct net_device *loopback_dev;   struct netns_core core;   struct netns_mib mib;   struct netns_packet packet;   struct netns_unix unx;   struct netns_ipv4 ipv4;   struct netns_ipv6 ipv6;   struct netns_ieee802154_lowpan ieee802154_lowpan;   struct netns_sctp sctp;   struct netns_dccp dccp;   struct netns_nf nf;   struct netns_xt xt;   struct netns_ct ct;   struct netns_nftables nft;   struct netns_nf_frag nf_frag;   struct sock *nfnl;   struct sock *nfnl_stash;   struct list_head nfnl_acct_list;   struct list_head nfct_timeout_list;   struct sk_buff_head wext_nlevents;   struct net_generic *gen;   struct netns_xfrm xfrm;   struct netns_ipvs *ipvs;   struct netns_mpls mpls;   struct sock *diag_nlsk;   atomic_t fnhe_genid; } ;   248     struct __anonstruct_possible_net_t_459 {   struct net *net; } ;   248     typedef struct __anonstruct_possible_net_t_459 possible_net_t;   296     struct mii_bus ;   303     struct mdio_device {   struct device dev;   const struct dev_pm_ops *pm_ops;   struct mii_bus *bus;   int (*bus_match)(struct device *, struct device_driver *);   void (*device_free)(struct mdio_device *);   void (*device_remove)(struct mdio_device *);   int addr;   int flags; } ;    41     struct mdio_driver_common {   struct device_driver driver;   int flags; } ;   244     struct phy_device ;   245     enum ldv_31859 {   PHY_INTERFACE_MODE_NA = 0,   PHY_INTERFACE_MODE_MII = 1,   PHY_INTERFACE_MODE_GMII = 2,   PHY_INTERFACE_MODE_SGMII = 3,   PHY_INTERFACE_MODE_TBI = 4,   PHY_INTERFACE_MODE_REVMII = 5,   PHY_INTERFACE_MODE_RMII = 6,   PHY_INTERFACE_MODE_RGMII = 7,   PHY_INTERFACE_MODE_RGMII_ID = 8,   PHY_INTERFACE_MODE_RGMII_RXID = 9,   PHY_INTERFACE_MODE_RGMII_TXID = 10,   PHY_INTERFACE_MODE_RTBI = 11,   PHY_INTERFACE_MODE_SMII = 12,   PHY_INTERFACE_MODE_XGMII = 13,   PHY_INTERFACE_MODE_MOCA = 14,   PHY_INTERFACE_MODE_QSGMII = 15,   PHY_INTERFACE_MODE_TRGMII = 16,   PHY_INTERFACE_MODE_MAX = 17 } ;    85     typedef enum ldv_31859 phy_interface_t;   133     enum ldv_31911 {   MDIOBUS_ALLOCATED = 1,   MDIOBUS_REGISTERED = 2,   MDIOBUS_UNREGISTERED = 3,   MDIOBUS_RELEASED = 4 } ;   140     struct mii_bus {   struct module *owner;   const char *name;   char id[17U];   void *priv;   int (*read)(struct mii_bus *, int, int);   int (*write)(struct mii_bus *, int, int, u16 );   int (*reset)(struct mii_bus *);   struct mutex mdio_lock;   struct device *parent;   enum ldv_31911 state;   struct device dev;   struct mdio_device *mdio_map[32U];   u32 phy_mask;   u32 phy_ignore_ta_mask;   int irq[32U]; } ;   221     enum phy_state {   PHY_DOWN = 0,   PHY_STARTING = 1,   PHY_READY = 2,   PHY_PENDING = 3,   PHY_UP = 4,   PHY_AN = 5,   PHY_RUNNING = 6,   PHY_NOLINK = 7,   PHY_FORCING = 8,   PHY_CHANGELINK = 9,   PHY_HALTED = 10,   PHY_RESUMING = 11 } ;   236     struct phy_c45_device_ids {   u32 devices_in_package;   u32 device_ids[8U]; } ;   329     struct phy_driver ;   329     struct phy_device {   struct mdio_device mdio;   struct phy_driver *drv;   u32 phy_id;   struct phy_c45_device_ids c45_ids;   bool is_c45;   bool is_internal;   bool is_pseudo_fixed_link;   bool has_fixups;   bool suspended;   enum phy_state state;   u32 dev_flags;   phy_interface_t interface;   int speed;   int duplex;   int pause;   int asym_pause;   int link;   u32 interrupts;   u32 supported;   u32 advertising;   u32 lp_advertising;   int autoneg;   int link_timeout;   int irq;   void *priv;   struct work_struct phy_queue;   struct delayed_work state_queue;   atomic_t irq_disable;   struct mutex lock;   struct net_device *attached_dev;   u8 mdix;   void (*adjust_link)(struct net_device *); } ;   431     struct phy_driver {   struct mdio_driver_common mdiodrv;   u32 phy_id;   char *name;   unsigned int phy_id_mask;   u32 features;   u32 flags;   const void *driver_data;   int (*soft_reset)(struct phy_device *);   int (*config_init)(struct phy_device *);   int (*probe)(struct phy_device *);   int (*suspend)(struct phy_device *);   int (*resume)(struct phy_device *);   int (*config_aneg)(struct phy_device *);   int (*aneg_done)(struct phy_device *);   int (*read_status)(struct phy_device *);   int (*ack_interrupt)(struct phy_device *);   int (*config_intr)(struct phy_device *);   int (*did_interrupt)(struct phy_device *);   void (*remove)(struct phy_device *);   int (*match_phy_device)(struct phy_device *);   int (*ts_info)(struct phy_device *, struct ethtool_ts_info *);   int (*hwtstamp)(struct phy_device *, struct ifreq *);   bool  (*rxtstamp)(struct phy_device *, struct sk_buff *, int);   void (*txtstamp)(struct phy_device *, struct sk_buff *, int);   int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *);   void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *);   void (*link_change_notify)(struct phy_device *);   int (*read_mmd_indirect)(struct phy_device *, int, int, int);   void (*write_mmd_indirect)(struct phy_device *, int, int, int, u32 );   int (*module_info)(struct phy_device *, struct ethtool_modinfo *);   int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *);   int (*get_sset_count)(struct phy_device *);   void (*get_strings)(struct phy_device *, u8 *);   void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); } ;   844     struct fixed_phy_status {   int link;   int speed;   int duplex;   int pause;   int asym_pause; } ;    27     enum dsa_tag_protocol {   DSA_TAG_PROTO_NONE = 0,   DSA_TAG_PROTO_DSA = 1,   DSA_TAG_PROTO_TRAILER = 2,   DSA_TAG_PROTO_EDSA = 3,   DSA_TAG_PROTO_BRCM = 4,   DSA_TAG_PROTO_QCA = 5,   DSA_TAG_LAST = 6 } ;    37     struct dsa_chip_data {   struct device *host_dev;   int sw_addr;   int eeprom_len;   struct device_node *of_node;   char *port_names[12U];   struct device_node *port_dn[12U];   s8 rtable[4U]; } ;    71     struct dsa_platform_data {   struct device *netdev;   struct net_device *of_netdev;   int nr_chips;   struct dsa_chip_data *chip; } ;    87     struct packet_type ;    88     struct dsa_switch ;    88     struct dsa_device_ops ;    88     struct dsa_switch_tree {   struct list_head list;   u32 tree;   struct kref refcount;   bool applied;   struct dsa_platform_data *pd;   struct net_device *master_netdev;   int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);   struct ethtool_ops master_ethtool_ops;   const struct ethtool_ops *master_orig_ethtool_ops;   s8 cpu_switch;   s8 cpu_port;   struct dsa_switch *ds[4U];   const struct dsa_device_ops *tag_ops; } ;   141     struct dsa_port {   struct net_device *netdev;   struct device_node *dn;   unsigned int ageing_time;   u8 stp_state; } ;   148     struct dsa_switch_ops ;   148     struct dsa_switch {   struct device *dev;   struct dsa_switch_tree *dst;   int index;   void *priv;   struct dsa_chip_data *cd;   struct dsa_switch_ops *ops;   s8 rtable[4U];   char hwmon_name[24U];   struct device *hwmon_dev;   struct net_device *master_netdev;   u32 dsa_port_mask;   u32 cpu_port_mask;   u32 enabled_port_mask;   u32 phys_mii_mask;   struct dsa_port ports[12U];   struct mii_bus *slave_mii_bus; } ;   235     struct switchdev_trans ;   236     struct switchdev_obj ;   237     struct switchdev_obj_port_fdb ;   238     struct switchdev_obj_port_mdb ;   239     struct switchdev_obj_port_vlan ;   240     struct dsa_switch_ops {   struct list_head list;   const char * (*probe)(struct device *, struct device *, int, void **);   enum dsa_tag_protocol  (*get_tag_protocol)(struct dsa_switch *);   int (*setup)(struct dsa_switch *);   int (*set_addr)(struct dsa_switch *, u8 *);   u32  (*get_phy_flags)(struct dsa_switch *, int);   int (*phy_read)(struct dsa_switch *, int, int);   int (*phy_write)(struct dsa_switch *, int, int, u16 );   void (*adjust_link)(struct dsa_switch *, int, struct phy_device *);   void (*fixed_link_update)(struct dsa_switch *, int, struct fixed_phy_status *);   void (*get_strings)(struct dsa_switch *, int, uint8_t *);   void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *);   int (*get_sset_count)(struct dsa_switch *);   void (*get_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *);   int (*set_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *);   int (*suspend)(struct dsa_switch *);   int (*resume)(struct dsa_switch *);   int (*port_enable)(struct dsa_switch *, int, struct phy_device *);   void (*port_disable)(struct dsa_switch *, int, struct phy_device *);   int (*set_eee)(struct dsa_switch *, int, struct phy_device *, struct ethtool_eee *);   int (*get_eee)(struct dsa_switch *, int, struct ethtool_eee *);   int (*get_temp)(struct dsa_switch *, int *);   int (*get_temp_limit)(struct dsa_switch *, int *);   int (*set_temp_limit)(struct dsa_switch *, int);   int (*get_temp_alarm)(struct dsa_switch *, bool *);   int (*get_eeprom_len)(struct dsa_switch *);   int (*get_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *);   int (*set_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *);   int (*get_regs_len)(struct dsa_switch *, int);   void (*get_regs)(struct dsa_switch *, int, struct ethtool_regs *, void *);   int (*set_ageing_time)(struct dsa_switch *, unsigned int);   int (*port_bridge_join)(struct dsa_switch *, int, struct net_device *);   void (*port_bridge_leave)(struct dsa_switch *, int);   void (*port_stp_state_set)(struct dsa_switch *, int, u8 );   void (*port_fast_age)(struct dsa_switch *, int);   int (*port_vlan_filtering)(struct dsa_switch *, int, bool );   int (*port_vlan_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *);   void (*port_vlan_add)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *);   int (*port_vlan_del)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *);   int (*port_vlan_dump)(struct dsa_switch *, int, struct switchdev_obj_port_vlan *, int (*)(struct switchdev_obj *));   int (*port_fdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *);   void (*port_fdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *);   int (*port_fdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *);   int (*port_fdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_fdb *, int (*)(struct switchdev_obj *));   int (*port_mdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *, struct switchdev_trans *);   void (*port_mdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *, struct switchdev_trans *);   int (*port_mdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *);   int (*port_mdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_mdb *, int (*)(struct switchdev_obj *)); } ;   407     struct ieee_ets {   __u8 willing;   __u8 ets_cap;   __u8 cbs;   __u8 tc_tx_bw[8U];   __u8 tc_rx_bw[8U];   __u8 tc_tsa[8U];   __u8 prio_tc[8U];   __u8 tc_reco_bw[8U];   __u8 tc_reco_tsa[8U];   __u8 reco_prio_tc[8U]; } ;    69     struct ieee_maxrate {   __u64 tc_maxrate[8U]; } ;    87     struct ieee_qcn {   __u8 rpg_enable[8U];   __u32 rppp_max_rps[8U];   __u32 rpg_time_reset[8U];   __u32 rpg_byte_reset[8U];   __u32 rpg_threshold[8U];   __u32 rpg_max_rate[8U];   __u32 rpg_ai_rate[8U];   __u32 rpg_hai_rate[8U];   __u32 rpg_gd[8U];   __u32 rpg_min_dec_fac[8U];   __u32 rpg_min_rate[8U];   __u32 cndd_state_machine[8U]; } ;   132     struct ieee_qcn_stats {   __u64 rppp_rp_centiseconds[8U];   __u32 rppp_created_rps[8U]; } ;   144     struct ieee_pfc {   __u8 pfc_cap;   __u8 pfc_en;   __u8 mbc;   __u16 delay;   __u64 requests[8U];   __u64 indications[8U]; } ;   164     struct cee_pg {   __u8 willing;   __u8 error;   __u8 pg_en;   __u8 tcs_supported;   __u8 pg_bw[8U];   __u8 prio_pg[8U]; } ;   187     struct cee_pfc {   __u8 willing;   __u8 error;   __u8 pfc_en;   __u8 tcs_supported; } ;   202     struct dcb_app {   __u8 selector;   __u8 priority;   __u16 protocol; } ;   236     struct dcb_peer_app_info {   __u8 willing;   __u8 error; } ;    40     struct dcbnl_rtnl_ops {   int (*ieee_getets)(struct net_device *, struct ieee_ets *);   int (*ieee_setets)(struct net_device *, struct ieee_ets *);   int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *);   int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *);   int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *);   int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *);   int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *);   int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *);   int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *);   int (*ieee_getapp)(struct net_device *, struct dcb_app *);   int (*ieee_setapp)(struct net_device *, struct dcb_app *);   int (*ieee_delapp)(struct net_device *, struct dcb_app *);   int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *);   int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *);   u8  (*getstate)(struct net_device *);   u8  (*setstate)(struct net_device *, u8 );   void (*getpermhwaddr)(struct net_device *, u8 *);   void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 );   void (*setpgbwgcfgtx)(struct net_device *, int, u8 );   void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 );   void (*setpgbwgcfgrx)(struct net_device *, int, u8 );   void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);   void (*getpgbwgcfgtx)(struct net_device *, int, u8 *);   void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);   void (*getpgbwgcfgrx)(struct net_device *, int, u8 *);   void (*setpfccfg)(struct net_device *, int, u8 );   void (*getpfccfg)(struct net_device *, int, u8 *);   u8  (*setall)(struct net_device *);   u8  (*getcap)(struct net_device *, int, u8 *);   int (*getnumtcs)(struct net_device *, int, u8 *);   int (*setnumtcs)(struct net_device *, int, u8 );   u8  (*getpfcstate)(struct net_device *);   void (*setpfcstate)(struct net_device *, u8 );   void (*getbcncfg)(struct net_device *, int, u32 *);   void (*setbcncfg)(struct net_device *, int, u32 );   void (*getbcnrp)(struct net_device *, int, u8 *);   void (*setbcnrp)(struct net_device *, int, u8 );   int (*setapp)(struct net_device *, u8 , u16 , u8 );   int (*getapp)(struct net_device *, u8 , u16 );   u8  (*getfeatcfg)(struct net_device *, int, u8 *);   u8  (*setfeatcfg)(struct net_device *, int, u8 );   u8  (*getdcbx)(struct net_device *);   u8  (*setdcbx)(struct net_device *, u8 );   int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *);   int (*peer_getapptable)(struct net_device *, struct dcb_app *);   int (*cee_peer_getpg)(struct net_device *, struct cee_pg *);   int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ;   105     struct taskstats {   __u16 version;   __u32 ac_exitcode;   __u8 ac_flag;   __u8 ac_nice;   __u64 cpu_count;   __u64 cpu_delay_total;   __u64 blkio_count;   __u64 blkio_delay_total;   __u64 swapin_count;   __u64 swapin_delay_total;   __u64 cpu_run_real_total;   __u64 cpu_run_virtual_total;   char ac_comm[32U];   __u8 ac_sched;   __u8 ac_pad[3U];   __u32 ac_uid;   __u32 ac_gid;   __u32 ac_pid;   __u32 ac_ppid;   __u32 ac_btime;   __u64 ac_etime;   __u64 ac_utime;   __u64 ac_stime;   __u64 ac_minflt;   __u64 ac_majflt;   __u64 coremem;   __u64 virtmem;   __u64 hiwater_rss;   __u64 hiwater_vm;   __u64 read_char;   __u64 write_char;   __u64 read_syscalls;   __u64 write_syscalls;   __u64 read_bytes;   __u64 write_bytes;   __u64 cancelled_write_bytes;   __u64 nvcsw;   __u64 nivcsw;   __u64 ac_utimescaled;   __u64 ac_stimescaled;   __u64 cpu_scaled_run_real_total;   __u64 freepages_count;   __u64 freepages_delay_total; } ;    58     struct mnt_namespace ;    59     struct ipc_namespace ;    60     struct cgroup_namespace ;    61     struct nsproxy {   atomic_t count;   struct uts_namespace *uts_ns;   struct ipc_namespace *ipc_ns;   struct mnt_namespace *mnt_ns;   struct pid_namespace *pid_ns_for_children;   struct net *net_ns;   struct cgroup_namespace *cgroup_ns; } ;    86     struct uid_gid_extent {   u32 first;   u32 lower_first;   u32 count; } ;    19     struct uid_gid_map {   u32 nr_extents;   struct uid_gid_extent extent[5U]; } ;    31     struct user_namespace {   struct uid_gid_map uid_map;   struct uid_gid_map gid_map;   struct uid_gid_map projid_map;   atomic_t count;   struct user_namespace *parent;   int level;   kuid_t owner;   kgid_t group;   struct ns_common ns;   unsigned long flags;   struct key *persistent_keyring_register;   struct rw_semaphore persistent_keyring_register_sem;   struct work_struct work;   struct ctl_table_set set;   struct ctl_table_header *sysctls;   struct ucounts *ucounts;   int ucount_max[7U]; } ;    63     struct ucounts {   struct hlist_node node;   struct user_namespace *ns;   kuid_t uid;   atomic_t count;   atomic_t ucount[7U]; } ;   631     struct cgroup_namespace {   atomic_t count;   struct ns_common ns;   struct user_namespace *user_ns;   struct ucounts *ucounts;   struct css_set *root_cset; } ;   686     struct netprio_map {   struct callback_head rcu;   u32 priomap_len;   u32 priomap[]; } ;    41     struct nlmsghdr {   __u32 nlmsg_len;   __u16 nlmsg_type;   __u16 nlmsg_flags;   __u32 nlmsg_seq;   __u32 nlmsg_pid; } ;   143     struct nlattr {   __u16 nla_len;   __u16 nla_type; } ;   105     struct netlink_callback {   struct sk_buff *skb;   const struct nlmsghdr *nlh;   int (*start)(struct netlink_callback *);   int (*dump)(struct sk_buff *, struct netlink_callback *);   int (*done)(struct netlink_callback *);   void *data;   struct module *module;   u16 family;   u16 min_dump_alloc;   unsigned int prev_seq;   unsigned int seq;   long args[6U]; } ;   183     struct ndmsg {   __u8 ndm_family;   __u8 ndm_pad1;   __u16 ndm_pad2;   __s32 ndm_ifindex;   __u16 ndm_state;   __u8 ndm_flags;   __u8 ndm_type; } ;    41     struct rtnl_link_stats64 {   __u64 rx_packets;   __u64 tx_packets;   __u64 rx_bytes;   __u64 tx_bytes;   __u64 rx_errors;   __u64 tx_errors;   __u64 rx_dropped;   __u64 tx_dropped;   __u64 multicast;   __u64 collisions;   __u64 rx_length_errors;   __u64 rx_over_errors;   __u64 rx_crc_errors;   __u64 rx_frame_errors;   __u64 rx_fifo_errors;   __u64 rx_missed_errors;   __u64 tx_aborted_errors;   __u64 tx_carrier_errors;   __u64 tx_fifo_errors;   __u64 tx_heartbeat_errors;   __u64 tx_window_errors;   __u64 rx_compressed;   __u64 tx_compressed;   __u64 rx_nohandler; } ;   866     struct ifla_vf_stats {   __u64 rx_packets;   __u64 tx_packets;   __u64 rx_bytes;   __u64 tx_bytes;   __u64 broadcast;   __u64 multicast; } ;    16     struct ifla_vf_info {   __u32 vf;   __u8 mac[32U];   __u32 vlan;   __u32 qos;   __u32 spoofchk;   __u32 linkstate;   __u32 min_tx_rate;   __u32 max_tx_rate;   __u32 rss_query_en;   __u32 trusted;   __be16 vlan_proto; } ;   117     struct netpoll_info ;   118     struct wireless_dev ;   119     struct wpan_dev ;   120     struct mpls_dev ;   121     struct udp_tunnel_info ;   122     struct bpf_prog ;    70     enum netdev_tx {   __NETDEV_TX_MIN = -2147483648,   NETDEV_TX_OK = 0,   NETDEV_TX_BUSY = 16 } ;   113     typedef enum netdev_tx netdev_tx_t;   132     struct net_device_stats {   unsigned long rx_packets;   unsigned long tx_packets;   unsigned long rx_bytes;   unsigned long tx_bytes;   unsigned long rx_errors;   unsigned long tx_errors;   unsigned long rx_dropped;   unsigned long tx_dropped;   unsigned long multicast;   unsigned long collisions;   unsigned long rx_length_errors;   unsigned long rx_over_errors;   unsigned long rx_crc_errors;   unsigned long rx_frame_errors;   unsigned long rx_fifo_errors;   unsigned long rx_missed_errors;   unsigned long tx_aborted_errors;   unsigned long tx_carrier_errors;   unsigned long tx_fifo_errors;   unsigned long tx_heartbeat_errors;   unsigned long tx_window_errors;   unsigned long rx_compressed;   unsigned long tx_compressed; } ;   195     struct neigh_parms ;   196     struct netdev_hw_addr {   struct list_head list;   unsigned char addr[32U];   unsigned char type;   bool global_use;   int sync_cnt;   int refcount;   int synced;   struct callback_head callback_head; } ;   216     struct netdev_hw_addr_list {   struct list_head list;   int count; } ;   221     struct hh_cache {   u16 hh_len;   u16 __pad;   seqlock_t hh_lock;   unsigned long hh_data[16U]; } ;   250     struct header_ops {   int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int);   int (*parse)(const struct sk_buff *, unsigned char *);   int (*cache)(const struct neighbour *, struct hh_cache *, __be16 );   void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *);   bool  (*validate)(const char *, unsigned int); } ;   301     struct napi_struct {   struct list_head poll_list;   unsigned long state;   int weight;   unsigned int gro_count;   int (*poll)(struct napi_struct *, int);   spinlock_t poll_lock;   int poll_owner;   struct net_device *dev;   struct sk_buff *gro_list;   struct sk_buff *skb;   struct hrtimer timer;   struct list_head dev_list;   struct hlist_node napi_hash_node;   unsigned int napi_id; } ;   347     enum rx_handler_result {   RX_HANDLER_CONSUMED = 0,   RX_HANDLER_ANOTHER = 1,   RX_HANDLER_EXACT = 2,   RX_HANDLER_PASS = 3 } ;   395     typedef enum rx_handler_result rx_handler_result_t;   396     typedef rx_handler_result_t  rx_handler_func_t(struct sk_buff **);   541     struct Qdisc ;   541     struct netdev_queue {   struct net_device *dev;   struct Qdisc *qdisc;   struct Qdisc *qdisc_sleeping;   struct kobject kobj;   int numa_node;   unsigned long tx_maxrate;   unsigned long trans_timeout;   spinlock_t _xmit_lock;   int xmit_lock_owner;   unsigned long trans_start;   unsigned long state;   struct dql dql; } ;   612     struct rps_map {   unsigned int len;   struct callback_head rcu;   u16 cpus[0U]; } ;   624     struct rps_dev_flow {   u16 cpu;   u16 filter;   unsigned int last_qtail; } ;   636     struct rps_dev_flow_table {   unsigned int mask;   struct callback_head rcu;   struct rps_dev_flow flows[0U]; } ;   688     struct netdev_rx_queue {   struct rps_map *rps_map;   struct rps_dev_flow_table *rps_flow_table;   struct kobject kobj;   struct net_device *dev; } ;   711     struct xps_map {   unsigned int len;   unsigned int alloc_len;   struct callback_head rcu;   u16 queues[0U]; } ;   724     struct xps_dev_maps {   struct callback_head rcu;   struct xps_map *cpu_map[0U]; } ;   735     struct netdev_tc_txq {   u16 count;   u16 offset; } ;   746     struct netdev_fcoe_hbainfo {   char manufacturer[64U];   char serial_number[64U];   char hardware_version[64U];   char driver_version[64U];   char optionrom_version[64U];   char firmware_version[64U];   char model[256U];   char model_description[256U]; } ;   762     struct netdev_phys_item_id {   unsigned char id[32U];   unsigned char id_len; } ;   790     struct tc_cls_u32_offload ;   791     struct tc_cls_flower_offload ;   791     struct tc_cls_matchall_offload ;   791     struct tc_cls_bpf_offload ;   791     union __anonunion____missing_field_name_469 {   u8 tc;   struct tc_cls_u32_offload *cls_u32;   struct tc_cls_flower_offload *cls_flower;   struct tc_cls_matchall_offload *cls_mall;   struct tc_cls_bpf_offload *cls_bpf; } ;   791     struct tc_to_netdev {   unsigned int type;   union __anonunion____missing_field_name_469 __annonCompField106; } ;   807     enum xdp_netdev_command {   XDP_SETUP_PROG = 0,   XDP_QUERY_PROG = 1 } ;   812     union __anonunion____missing_field_name_470 {   struct bpf_prog *prog;   bool prog_attached; } ;   812     struct netdev_xdp {   enum xdp_netdev_command command;   union __anonunion____missing_field_name_470 __annonCompField107; } ;   835     struct net_device_ops {   int (*ndo_init)(struct net_device *);   void (*ndo_uninit)(struct net_device *);   int (*ndo_open)(struct net_device *);   int (*ndo_stop)(struct net_device *);   netdev_tx_t  (*ndo_start_xmit)(struct sk_buff *, struct net_device *);   netdev_features_t  (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t );   u16  (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16  (*)(struct net_device *, struct sk_buff *));   void (*ndo_change_rx_flags)(struct net_device *, int);   void (*ndo_set_rx_mode)(struct net_device *);   int (*ndo_set_mac_address)(struct net_device *, void *);   int (*ndo_validate_addr)(struct net_device *);   int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int);   int (*ndo_set_config)(struct net_device *, struct ifmap *);   int (*ndo_change_mtu)(struct net_device *, int);   int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *);   void (*ndo_tx_timeout)(struct net_device *);   struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *);   bool  (*ndo_has_offload_stats)(int);   int (*ndo_get_offload_stats)(int, const struct net_device *, void *);   struct net_device_stats * (*ndo_get_stats)(struct net_device *);   int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 );   int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 );   void (*ndo_poll_controller)(struct net_device *);   int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *);   void (*ndo_netpoll_cleanup)(struct net_device *);   int (*ndo_busy_poll)(struct napi_struct *);   int (*ndo_set_vf_mac)(struct net_device *, int, u8 *);   int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 , __be16 );   int (*ndo_set_vf_rate)(struct net_device *, int, int, int);   int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool );   int (*ndo_set_vf_trust)(struct net_device *, int, bool );   int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *);   int (*ndo_set_vf_link_state)(struct net_device *, int, int);   int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *);   int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **);   int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *);   int (*ndo_set_vf_guid)(struct net_device *, int, u64 , int);   int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool );   int (*ndo_setup_tc)(struct net_device *, u32 , __be16 , struct tc_to_netdev *);   int (*ndo_fcoe_enable)(struct net_device *);   int (*ndo_fcoe_disable)(struct net_device *);   int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int);   int (*ndo_fcoe_ddp_done)(struct net_device *, u16 );   int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int);   int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *);   int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int);   int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 );   int (*ndo_add_slave)(struct net_device *, struct net_device *);   int (*ndo_del_slave)(struct net_device *, struct net_device *);   netdev_features_t  (*ndo_fix_features)(struct net_device *, netdev_features_t );   int (*ndo_set_features)(struct net_device *, netdev_features_t );   int (*ndo_neigh_construct)(struct net_device *, struct neighbour *);   void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *);   int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 , u16 );   int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 );   int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *);   int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16 );   int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 , int);   int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16 );   int (*ndo_change_carrier)(struct net_device *, bool );   int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *);   int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t );   void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *);   void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *);   void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *);   void (*ndo_dfwd_del_station)(struct net_device *, void *);   netdev_tx_t  (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *);   int (*ndo_get_lock_subclass)(struct net_device *);   int (*ndo_set_tx_maxrate)(struct net_device *, int, u32 );   int (*ndo_get_iflink)(const struct net_device *);   int (*ndo_change_proto_down)(struct net_device *, bool );   int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *);   void (*ndo_set_rx_headroom)(struct net_device *, int);   int (*ndo_xdp)(struct net_device *, struct netdev_xdp *); } ;  1371     struct __anonstruct_adj_list_471 {   struct list_head upper;   struct list_head lower; } ;  1371     struct __anonstruct_all_adj_list_472 {   struct list_head upper;   struct list_head lower; } ;  1371     struct iw_handler_def ;  1371     struct iw_public_data ;  1371     struct switchdev_ops ;  1371     struct l3mdev_ops ;  1371     struct ndisc_ops ;  1371     struct vlan_info ;  1371     struct tipc_bearer ;  1371     struct in_device ;  1371     struct dn_dev ;  1371     struct inet6_dev ;  1371     struct tcf_proto ;  1371     struct cpu_rmap ;  1371     struct pcpu_lstats ;  1371     struct pcpu_sw_netstats ;  1371     struct pcpu_dstats ;  1371     struct pcpu_vstats ;  1371     union __anonunion____missing_field_name_473 {   void *ml_priv;   struct pcpu_lstats *lstats;   struct pcpu_sw_netstats *tstats;   struct pcpu_dstats *dstats;   struct pcpu_vstats *vstats; } ;  1371     struct garp_port ;  1371     struct mrp_port ;  1371     struct rtnl_link_ops ;  1371     struct net_device {   char name[16U];   struct hlist_node name_hlist;   char *ifalias;   unsigned long mem_end;   unsigned long mem_start;   unsigned long base_addr;   int irq;   atomic_t carrier_changes;   unsigned long state;   struct list_head dev_list;   struct list_head napi_list;   struct list_head unreg_list;   struct list_head close_list;   struct list_head ptype_all;   struct list_head ptype_specific;   struct __anonstruct_adj_list_471 adj_list;   struct __anonstruct_all_adj_list_472 all_adj_list;   netdev_features_t features;   netdev_features_t hw_features;   netdev_features_t wanted_features;   netdev_features_t vlan_features;   netdev_features_t hw_enc_features;   netdev_features_t mpls_features;   netdev_features_t gso_partial_features;   int ifindex;   int group;   struct net_device_stats stats;   atomic_long_t rx_dropped;   atomic_long_t tx_dropped;   atomic_long_t rx_nohandler;   const struct iw_handler_def *wireless_handlers;   struct iw_public_data *wireless_data;   const struct net_device_ops *netdev_ops;   const struct ethtool_ops *ethtool_ops;   const struct switchdev_ops *switchdev_ops;   const struct l3mdev_ops *l3mdev_ops;   const struct ndisc_ops *ndisc_ops;   const struct header_ops *header_ops;   unsigned int flags;   unsigned int priv_flags;   unsigned short gflags;   unsigned short padded;   unsigned char operstate;   unsigned char link_mode;   unsigned char if_port;   unsigned char dma;   unsigned int mtu;   unsigned short type;   unsigned short hard_header_len;   unsigned short needed_headroom;   unsigned short needed_tailroom;   unsigned char perm_addr[32U];   unsigned char addr_assign_type;   unsigned char addr_len;   unsigned short neigh_priv_len;   unsigned short dev_id;   unsigned short dev_port;   spinlock_t addr_list_lock;   unsigned char name_assign_type;   bool uc_promisc;   struct netdev_hw_addr_list uc;   struct netdev_hw_addr_list mc;   struct netdev_hw_addr_list dev_addrs;   struct kset *queues_kset;   unsigned int promiscuity;   unsigned int allmulti;   struct vlan_info *vlan_info;   struct dsa_switch_tree *dsa_ptr;   struct tipc_bearer *tipc_ptr;   void *atalk_ptr;   struct in_device *ip_ptr;   struct dn_dev *dn_ptr;   struct inet6_dev *ip6_ptr;   void *ax25_ptr;   struct wireless_dev *ieee80211_ptr;   struct wpan_dev *ieee802154_ptr;   struct mpls_dev *mpls_ptr;   unsigned long last_rx;   unsigned char *dev_addr;   struct netdev_rx_queue *_rx;   unsigned int num_rx_queues;   unsigned int real_num_rx_queues;   unsigned long gro_flush_timeout;   rx_handler_func_t *rx_handler;   void *rx_handler_data;   struct tcf_proto *ingress_cl_list;   struct netdev_queue *ingress_queue;   struct nf_hook_entry *nf_hooks_ingress;   unsigned char broadcast[32U];   struct cpu_rmap *rx_cpu_rmap;   struct hlist_node index_hlist;   struct netdev_queue *_tx;   unsigned int num_tx_queues;   unsigned int real_num_tx_queues;   struct Qdisc *qdisc;   struct hlist_head qdisc_hash[16U];   unsigned long tx_queue_len;   spinlock_t tx_global_lock;   int watchdog_timeo;   struct xps_dev_maps *xps_maps;   struct tcf_proto *egress_cl_list;   struct timer_list watchdog_timer;   int *pcpu_refcnt;   struct list_head todo_list;   struct list_head link_watch_list;   unsigned char reg_state;   bool dismantle;   unsigned short rtnl_link_state;   void (*destructor)(struct net_device *);   struct netpoll_info *npinfo;   possible_net_t nd_net;   union __anonunion____missing_field_name_473 __annonCompField108;   struct garp_port *garp_port;   struct mrp_port *mrp_port;   struct device dev;   const struct attribute_group *sysfs_groups[4U];   const struct attribute_group *sysfs_rx_queue_group;   const struct rtnl_link_ops *rtnl_link_ops;   unsigned int gso_max_size;   u16 gso_max_segs;   const struct dcbnl_rtnl_ops *dcbnl_ops;   u8 num_tc;   struct netdev_tc_txq tc_to_txq[16U];   u8 prio_tc_map[16U];   unsigned int fcoe_ddp_xid;   struct netprio_map *priomap;   struct phy_device *phydev;   struct lock_class_key *qdisc_tx_busylock;   struct lock_class_key *qdisc_running_key;   bool proto_down; } ;  2180     struct packet_type {   __be16 type;   struct net_device *dev;   int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);   bool  (*id_match)(struct packet_type *, struct sock *);   void *af_packet_priv;   struct list_head list; } ;  2210     struct pcpu_sw_netstats {   u64 rx_packets;   u64 rx_bytes;   u64 tx_packets;   u64 tx_bytes;   struct u64_stats_sync syncp; } ;  3221     enum skb_free_reason {   SKB_REASON_CONSUMED = 0,   SKB_REASON_DROPPED = 1 } ;   475     struct macb_platform_data {   u32 phy_mask;   int phy_irq_pin;   u8 is_rmii;   u8 rev_eth_addr; } ;    25     struct mfd_cell ;    26     struct platform_device {   const char *name;   int id;   bool id_auto;   struct device dev;   u32 num_resources;   struct resource *resource;   const struct platform_device_id *id_entry;   char *driver_override;   struct mfd_cell *mfd_cell;   struct pdev_archdata archdata; } ;    59     enum of_gpio_flags {   OF_GPIO_ACTIVE_LOW = 1,   OF_GPIO_SINGLE_ENDED = 2 } ;    17     struct macb_dma_desc {   u32 addr;   u32 ctrl;   u32 addrh;   u32 resvd; } ;   486     struct macb_tx_skb {   struct sk_buff *skb;   dma_addr_t mapping;   size_t size;   bool mapped_as_page; } ;   585     struct macb_stats {   u32 rx_pause_frames;   u32 tx_ok;   u32 tx_single_cols;   u32 tx_multiple_cols;   u32 rx_ok;   u32 rx_fcs_errors;   u32 rx_align_errors;   u32 tx_deferred;   u32 tx_late_cols;   u32 tx_excessive_cols;   u32 tx_underruns;   u32 tx_carrier_errors;   u32 rx_resource_errors;   u32 rx_overruns;   u32 rx_symbol_errors;   u32 rx_oversize_pkts;   u32 rx_jabbers;   u32 rx_undersize_pkts;   u32 sqe_test_errors;   u32 rx_length_mismatch;   u32 tx_pause_frames; } ;   612     struct gem_stats {   u32 tx_octets_31_0;   u32 tx_octets_47_32;   u32 tx_frames;   u32 tx_broadcast_frames;   u32 tx_multicast_frames;   u32 tx_pause_frames;   u32 tx_64_byte_frames;   u32 tx_65_127_byte_frames;   u32 tx_128_255_byte_frames;   u32 tx_256_511_byte_frames;   u32 tx_512_1023_byte_frames;   u32 tx_1024_1518_byte_frames;   u32 tx_greater_than_1518_byte_frames;   u32 tx_underrun;   u32 tx_single_collision_frames;   u32 tx_multiple_collision_frames;   u32 tx_excessive_collisions;   u32 tx_late_collisions;   u32 tx_deferred_frames;   u32 tx_carrier_sense_errors;   u32 rx_octets_31_0;   u32 rx_octets_47_32;   u32 rx_frames;   u32 rx_broadcast_frames;   u32 rx_multicast_frames;   u32 rx_pause_frames;   u32 rx_64_byte_frames;   u32 rx_65_127_byte_frames;   u32 rx_128_255_byte_frames;   u32 rx_256_511_byte_frames;   u32 rx_512_1023_byte_frames;   u32 rx_1024_1518_byte_frames;   u32 rx_greater_than_1518_byte_frames;   u32 rx_undersized_frames;   u32 rx_oversize_frames;   u32 rx_jabbers;   u32 rx_frame_check_sequence_errors;   u32 rx_length_field_frame_errors;   u32 rx_symbol_errors;   u32 rx_alignment_errors;   u32 rx_resource_errors;   u32 rx_overruns;   u32 rx_ip_header_checksum_errors;   u32 rx_tcp_checksum_errors;   u32 rx_udp_checksum_errors; } ;   660     struct gem_statistic {   char stat_string[32U];   int offset;   u32 stat_bits; } ;   695     struct macb ;   696     struct macb_or_gem_ops {   int (*mog_alloc_rx_buffers)(struct macb *);   void (*mog_free_rx_buffers)(struct macb *);   void (*mog_init_rings)(struct macb *);   int (*mog_rx)(struct macb *, int); } ;   770     struct macb_config {   u32 caps;   unsigned int dma_burst_length;   int (*clk_init)(struct platform_device *, struct clk **, struct clk **, struct clk **, struct clk **);   int (*init)(struct platform_device *);   int jumbo_max_len; } ;   780     struct macb_queue {   struct macb *bp;   int irq;   unsigned int ISR;   unsigned int IER;   unsigned int IDR;   unsigned int IMR;   unsigned int TBQP;   unsigned int TBQPH;   unsigned int tx_head;   unsigned int tx_tail;   struct macb_dma_desc *tx_ring;   struct macb_tx_skb *tx_skb;   dma_addr_t tx_ring_dma;   struct work_struct tx_error_task; } ;   798     union __anonunion_hw_stats_482 {   struct macb_stats macb;   struct gem_stats gem; } ;   798     struct macb {   void *regs;   bool native_io;   u32  (*macb_reg_readl)(struct macb *, int);   void (*macb_reg_writel)(struct macb *, int, u32 );   unsigned int rx_tail;   unsigned int rx_prepared_head;   struct macb_dma_desc *rx_ring;   struct sk_buff **rx_skbuff;   void *rx_buffers;   size_t rx_buffer_size;   unsigned int num_queues;   unsigned int queue_mask;   struct macb_queue queues[8U];   spinlock_t lock;   struct platform_device *pdev;   struct clk *pclk;   struct clk *hclk;   struct clk *tx_clk;   struct clk *rx_clk;   struct net_device *dev;   struct napi_struct napi;   struct net_device_stats stats;   union __anonunion_hw_stats_482 hw_stats;   dma_addr_t rx_ring_dma;   dma_addr_t rx_buffers_dma;   struct macb_or_gem_ops macbgem_ops;   struct mii_bus *mii_bus;   int link;   int speed;   int duplex;   u32 caps;   unsigned int dma_burst_length;   phy_interface_t phy_interface;   struct gpio_desc *reset_gpio;   struct sk_buff *skb;   dma_addr_t skb_physaddr;   int skb_length;   unsigned int max_tx_length;   u64 ethtool_stats[43U];   unsigned int rx_frm_len_mask;   unsigned int jumbo_max_len;   u32 wol; } ;     1     void * __builtin_memcpy(void *, const void *, unsigned long);     1     long int __builtin_expect(long, long);   241     void __read_once_size(const volatile void *p, void *res, int size);   266     void __write_once_size(volatile void *p, void *res, int size);    34     extern struct module __this_module;    72     void set_bit(long nr, volatile unsigned long *addr);   110     void clear_bit(long nr, volatile unsigned long *addr);   204     bool  test_and_set_bit(long nr, volatile unsigned long *addr);   308     bool  constant_test_bit(long nr, const volatile unsigned long *addr);    63     void __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...);    69     void __dynamic_netdev_dbg(struct _ddebug *, const struct net_device *, const char *, ...);   411     int snprintf(char *, size_t , const char *, ...);     3     bool  ldv_is_err(const void *ptr);     6     long int ldv_ptr_err(const void *ptr);     8     void ldv_dma_map_page();     9     void ldv_dma_mapping_error();    32     long int PTR_ERR(const void *ptr);    41     bool  IS_ERR(const void *ptr);    25     void INIT_LIST_HEAD(struct list_head *list);    24     int atomic_read(const atomic_t *v);    71     void warn_slowpath_null(const char *, const int);   281     void lockdep_init_map(struct lockdep_map *, const char *, struct lock_class_key *, int);     7     extern unsigned long page_offset_base;     9     extern unsigned long vmemmap_base;   331     extern struct pv_irq_ops pv_irq_ops;    23     unsigned long int __phys_addr(unsigned long);    32     void * __memcpy(void *, const void *, size_t );   760     unsigned long int arch_local_save_flags();   765     void arch_local_irq_restore(unsigned long f);   770     void arch_local_irq_disable();   780     unsigned long int arch_local_irq_save();   155     int arch_irqs_disabled_flags(unsigned long flags);    20     void trace_hardirqs_on();    21     void trace_hardirqs_off();   581     void rep_nop();   586     void cpu_relax();    93     void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);    22     void _raw_spin_lock(raw_spinlock_t *);    34     unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *);    41     void _raw_spin_unlock(raw_spinlock_t *);    45     void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);   289     raw_spinlock_t * spinlock_check(spinlock_t *lock);   300     void spin_lock(spinlock_t *lock);   345     void spin_unlock(spinlock_t *lock);   360     void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);    78     extern volatile unsigned long jiffies;   369     unsigned long int __usecs_to_jiffies(const unsigned int);   405     unsigned long int usecs_to_jiffies(const unsigned int u);   181     void __init_work(struct work_struct *, int);   353     extern struct workqueue_struct *system_wq;   430     bool  queue_work_on(int, struct workqueue_struct *, struct work_struct *);   471     bool  queue_work(struct workqueue_struct *wq, struct work_struct *work);   530     bool  schedule_work(struct work_struct *work);   186     int clk_prepare(struct clk *);   205     void clk_unprepare(struct clk *);   249     struct clk * devm_clk_get(struct device *, const char *);   261     int clk_enable(struct clk *);   277     void clk_disable(struct clk *);   284     unsigned long int clk_get_rate(struct clk *);   337     long int clk_round_rate(struct clk *, unsigned long);   346     int clk_set_rate(struct clk *, unsigned long);   483     int clk_prepare_enable(struct clk *clk);   498     void clk_disable_unprepare(struct clk *clk);    62     unsigned int __readl(const volatile void *addr);    70     void __writel(unsigned int val, volatile void *addr);   154     void kfree(const void *);   330     void * __kmalloc(size_t , gfp_t );   478     void * kmalloc(size_t size, gfp_t flags);   634     void * kzalloc(size_t size, gfp_t flags);   123     void of_node_put(struct device_node *);   275     struct device_node * of_get_next_available_child(const struct device_node *, struct device_node *);   328     const void * of_get_property(const struct device_node *, const char *, int *);   337     const struct of_device_id * of_match_node(const struct of_device_id *, const struct device_node *);   683     void * devm_ioremap_resource(struct device *, struct resource *);   103     int device_init_wakeup(struct device *, bool );   104     int device_set_wakeup_enable(struct device *, bool );   915     void * dev_get_drvdata(const struct device *dev);   920     void dev_set_drvdata(struct device *dev, void *data);  1049     void * dev_get_platdata(const struct device *dev);  1138     void dev_err(const struct device *, const char *, ...);  1144     void _dev_info(const struct device *, const char *, ...);    97     int gpiod_direction_output(struct gpio_desc *, int);   102     void gpiod_set_value(struct gpio_desc *, int);   128     int gpiod_to_irq(const struct gpio_desc *);   131     struct gpio_desc * gpio_to_desc(unsigned int);    45     bool  gpio_is_valid(int number);   111     int __gpio_to_irq(unsigned int gpio);    68     int gpio_to_irq(unsigned int gpio);    84     int devm_gpio_request(struct device *, unsigned int, const char *);   164     int devm_request_threaded_irq(struct device *, unsigned int, irqreturn_t  (*)(int, void *), irqreturn_t  (*)(int, void *), unsigned long, const char *, void *);   170     int devm_request_irq(struct device *dev, unsigned int irq, irqreturn_t  (*handler)(int, void *), unsigned long irqflags, const char *devname, void *dev_id);    48     void usleep_range(unsigned long, unsigned long);  1003     void * lowmem_page_address(const struct page *page);   131     void kmemcheck_mark_initialized(void *address, unsigned int n);    36     void get_random_bytes(void *, int);    37     void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool );    42     void debug_dma_mapping_error(struct device *, dma_addr_t );    44     void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool );    53     void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *);   136     int valid_dma_direction(int dma_direction);    28     extern struct dma_map_ops *dma_ops;    30     struct dma_map_ops * get_dma_ops(struct device *dev);    42     bool  arch_dma_alloc_attrs(struct device **, gfp_t *);    46     int dma_supported(struct device *, u64 );   180     dma_addr_t  ldv_dma_map_single_attrs_5(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs);   180     dma_addr_t  dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs);   203     void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs);   250     dma_addr_t  ldv_dma_map_page_6(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir);   250     dma_addr_t  dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir);   269     void dma_unmap_page(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);   450     void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs);   491     void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag);   497     void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle);   517     int ldv_dma_mapping_error_7(struct device *dev, dma_addr_t dma_addr);   517     int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);   549     int dma_set_mask(struct device *dev, u64 mask);   325     unsigned int skb_frag_size(const skb_frag_t *frag);   904     void consume_skb(struct sk_buff *);   981     int pskb_expand_head(struct sk_buff *, int, int, gfp_t );  1190     unsigned char * skb_end_pointer(const struct sk_buff *skb);  1341     int skb_header_cloned(const struct sk_buff *skb);  1796     unsigned int skb_headlen(const struct sk_buff *skb);  1912     unsigned char * skb_put(struct sk_buff *, unsigned int);  1931     unsigned char * __skb_pull(struct sk_buff *skb, unsigned int len);  1974     unsigned int skb_headroom(const struct sk_buff *skb);  2013     void skb_reserve(struct sk_buff *skb, int len);  2220     unsigned char * skb_checksum_start(const struct sk_buff *skb);  2419     struct sk_buff * __netdev_alloc_skb(struct net_device *, unsigned int, gfp_t );  2435     struct sk_buff * netdev_alloc_skb(struct net_device *dev, unsigned int length);  2555     struct page * skb_frag_page(const skb_frag_t *frag);  2673     dma_addr_t  skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, size_t offset, size_t size, enum dma_data_direction dir);  2717     int __skb_cow(struct sk_buff *skb, unsigned int headroom, int cloned);  2758     int skb_cow_head(struct sk_buff *skb, unsigned int headroom);  3168     void skb_copy_to_linear_data_offset(struct sk_buff *skb, const int offset, const void *from, const unsigned int len);  3223     void skb_clone_tx_timestamp(struct sk_buff *);  3269     void skb_tstamp_tx(struct sk_buff *, struct skb_shared_hwtstamps *);  3272     void sw_tx_timestamp(struct sk_buff *skb);  3291     void skb_tx_timestamp(struct sk_buff *skb);  3653     u16  skb_get_queue_mapping(const struct sk_buff *skb);  3805     void skb_checksum_none_assert(const struct sk_buff *skb);    83     u32  ethtool_op_get_link(struct net_device *);    84     int ethtool_op_get_ts_info(struct net_device *, struct ethtool_ts_info *);   203     struct mii_bus * mdiobus_alloc_size(size_t );   204     struct mii_bus * mdiobus_alloc();   209     int __mdiobus_register(struct mii_bus *, struct module *);   211     void mdiobus_unregister(struct mii_bus *);   212     void mdiobus_free(struct mii_bus *);   220     struct phy_device * mdiobus_scan(struct mii_bus *, int);   753     struct phy_device * phy_find_first(struct mii_bus *);   756     int phy_connect_direct(struct net_device *, struct phy_device *, void (*)(struct net_device *), phy_interface_t );   762     void phy_disconnect(struct phy_device *);   764     void phy_start(struct phy_device *);   765     void phy_stop(struct phy_device *);   788     void phy_attached_info(struct phy_device *);   815     int phy_mii_ioctl(struct phy_device *, struct ifreq *, int);   835     int phy_ethtool_get_link_ksettings(struct net_device *, struct ethtool_link_ksettings *);   837     int phy_ethtool_set_link_ksettings(struct net_device *, const struct ethtool_link_ksettings *);   398     void __napi_schedule(struct napi_struct *);   401     bool  napi_disable_pending(struct napi_struct *n);   415     bool  napi_schedule_prep(struct napi_struct *n);   447     bool  napi_reschedule(struct napi_struct *napi);   465     void napi_complete(struct napi_struct *n);   502     void napi_disable(struct napi_struct *);   511     void napi_enable(struct napi_struct *n);  1961     struct netdev_queue * netdev_get_tx_queue(const struct net_device *dev, unsigned int index);  2056     void * netdev_priv(const struct net_device *dev);  2087     void netif_napi_add(struct net_device *, struct napi_struct *, int (*)(struct napi_struct *, int), int);  2422     void free_netdev(struct net_device *);  2843     void netif_tx_start_queue(struct netdev_queue *dev_queue);  2854     void netif_start_queue(struct net_device *dev);  2859     void netif_tx_start_all_queues(struct net_device *dev);  2869     void netif_tx_wake_queue(struct netdev_queue *);  2878     void netif_wake_queue(struct net_device *dev);  2893     void netif_tx_stop_queue(struct netdev_queue *dev_queue);  2905     void netif_stop_queue(struct net_device *dev);  2910     void netif_tx_stop_all_queues(struct net_device *);  2912     bool  netif_tx_queue_stopped(const struct netdev_queue *dev_queue);  3097     bool  netif_running(const struct net_device *dev);  3130     void netif_stop_subqueue(struct net_device *dev, u16 queue_index);  3143     bool  __netif_subqueue_stopped(const struct net_device *dev, u16 queue_index);  3157     void netif_wake_subqueue(struct net_device *, u16 );  3227     void __dev_kfree_skb_irq(struct sk_buff *, enum skb_free_reason );  3228     void __dev_kfree_skb_any(struct sk_buff *, enum skb_free_reason );  3249     void dev_kfree_skb_irq(struct sk_buff *skb);  3259     void dev_kfree_skb_any(struct sk_buff *skb);  3269     int netif_rx(struct sk_buff *);  3271     int netif_receive_skb(struct sk_buff *);  3377     void netif_carrier_on(struct net_device *);  3379     void netif_carrier_off(struct net_device *);  3690     int register_netdev(struct net_device *);  3691     void unregister_netdev(struct net_device *);  4274     void netdev_err(const struct net_device *, const char *, ...);  4276     void netdev_warn(const struct net_device *, const char *, ...);  4280     void netdev_info(const struct net_device *, const char *, ...);    36     __be16  eth_type_trans(struct sk_buff *, struct net_device *);    48     int eth_mac_addr(struct net_device *, void *);    49     int eth_change_mtu(struct net_device *, int);    50     int eth_validate_addr(struct net_device *);    52     struct net_device * alloc_etherdev_mqs(int, unsigned int, unsigned int);    96     bool  is_zero_ether_addr(const u8 *addr);   114     bool  is_multicast_ether_addr(const u8 *addr);   189     bool  is_valid_ether_addr(const u8 *addr);   221     void eth_random_addr(u8 *addr);   261     void eth_hw_addr_random(struct net_device *dev);   274     void ether_addr_copy(u8 *dst, const u8 *src);    52     struct resource * platform_get_resource(struct platform_device *, unsigned int, unsigned int);    54     int platform_get_irq(struct platform_device *, unsigned int);   211     void * platform_get_drvdata(const struct platform_device *pdev);   216     void platform_set_drvdata(struct platform_device *pdev, void *data);    51     int of_get_named_gpio_flags(struct device_node *, const char *, int, enum of_gpio_flags *);   140     int of_get_named_gpio(struct device_node *np, const char *propname, int index);    16     int of_mdiobus_register(struct mii_bus *, struct device_node *);    14     int of_get_phy_mode(struct device_node *);    15     const void * of_get_mac_address(struct device_node *);   694     const struct gem_statistic gem_statistics[43U] = { { { 't', 'x', '_', 'o', 'c', 't', 'e', 't', 's', '\x0' }, 256, 0U }, { { 't', 'x', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 264, 0U }, { { 't', 'x', '_', 'b', 'r', 'o', 'a', 'd', 'c', 'a', 's', 't', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 268, 0U }, { { 't', 'x', '_', 'm', 'u', 'l', 't', 'i', 'c', 'a', 's', 't', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 272, 0U }, { { 't', 'x', '_', 'p', 'a', 'u', 's', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 276, 0U }, { { 't', 'x', '_', '6', '4', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 280, 0U }, { { 't', 'x', '_', '6', '5', '_', '1', '2', '7', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 284, 0U }, { { 't', 'x', '_', '1', '2', '8', '_', '2', '5', '5', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 288, 0U }, { { 't', 'x', '_', '2', '5', '6', '_', '5', '1', '1', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 292, 0U }, { { 't', 'x', '_', '5', '1', '2', '_', '1', '0', '2', '3', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 296, 0U }, { { 't', 'x', '_', '1', '0', '2', '4', '_', '1', '5', '1', '8', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 300, 0U }, { { 't', 'x', '_', 'g', 'r', 'e', 'a', 't', 'e', 'r', '_', 't', 'h', 'a', 'n', '_', '1', '5', '1', '8', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's' }, 304, 0U }, { { 't', 'x', '_', 'u', 'n', 'd', 'e', 'r', 'r', 'u', 'n', '\x0' }, 308, 576U }, { { 't', 'x', '_', 's', 'i', 'n', 'g', 'l', 'e', '_', 'c', 'o', 'l', 'l', 'i', 's', 'i', 'o', 'n', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 312, 1088U }, { { 't', 'x', '_', 'm', 'u', 'l', 't', 'i', 'p', 'l', 'e', '_', 'c', 'o', 'l', 'l', 'i', 's', 'i', 'o', 'n', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 316, 1088U }, { { 't', 'x', '_', 'e', 'x', 'c', 'e', 's', 's', 'i', 'v', 'e', '_', 'c', 'o', 'l', 'l', 'i', 's', 'i', 'o', 'n', 's', '\x0' }, 320, 1216U }, { { 't', 'x', '_', 'l', 'a', 't', 'e', '_', 'c', 'o', 'l', 'l', 'i', 's', 'i', 'o', 'n', 's', '\x0' }, 324, 1088U }, { { 't', 'x', '_', 'd', 'e', 'f', 'e', 'r', 'r', 'e', 'd', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 328, 0U }, { { 't', 'x', '_', 'c', 'a', 'r', 'r', 'i', 'e', 'r', '_', 's', 'e', 'n', 's', 'e', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 332, 1088U }, { { 'r', 'x', '_', 'o', 'c', 't', 'e', 't', 's', '\x0' }, 336, 0U }, { { 'r', 'x', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 344, 0U }, { { 'r', 'x', '_', 'b', 'r', 'o', 'a', 'd', 'c', 'a', 's', 't', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 348, 0U }, { { 'r', 'x', '_', 'm', 'u', 'l', 't', 'i', 'c', 'a', 's', 't', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 352, 0U }, { { 'r', 'x', '_', 'p', 'a', 'u', 's', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 356, 0U }, { { 'r', 'x', '_', '6', '4', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 360, 0U }, { { 'r', 'x', '_', '6', '5', '_', '1', '2', '7', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 364, 0U }, { { 'r', 'x', '_', '1', '2', '8', '_', '2', '5', '5', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 368, 0U }, { { 'r', 'x', '_', '2', '5', '6', '_', '5', '1', '1', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 372, 0U }, { { 'r', 'x', '_', '5', '1', '2', '_', '1', '0', '2', '3', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 376, 0U }, { { 'r', 'x', '_', '1', '0', '2', '4', '_', '1', '5', '1', '8', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 380, 0U }, { { 'r', 'x', '_', 'g', 'r', 'e', 'a', 't', 'e', 'r', '_', 't', 'h', 'a', 'n', '_', '1', '5', '1', '8', '_', 'b', 'y', 't', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's' }, 384, 0U }, { { 'r', 'x', '_', 'u', 'n', 'd', 'e', 'r', 's', 'i', 'z', 'e', 'd', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 388, 3U }, { { 'r', 'x', '_', 'o', 'v', 'e', 'r', 's', 'i', 'z', 'e', '_', 'f', 'r', 'a', 'm', 'e', 's', '\x0' }, 392, 3U }, { { 'r', 'x', '_', 'j', 'a', 'b', 'b', 'e', 'r', 's', '\x0' }, 396, 3U }, { { 'r', 'x', '_', 'f', 'r', 'a', 'm', 'e', '_', 'c', 'h', 'e', 'c', 'k', '_', 's', 'e', 'q', 'u', 'e', 'n', 'c', 'e', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 400, 9U }, { { 'r', 'x', '_', 'l', 'e', 'n', 'g', 't', 'h', '_', 'f', 'i', 'e', 'l', 'd', '_', 'f', 'r', 'a', 'm', 'e', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 404, 1U }, { { 'r', 'x', '_', 's', 'y', 'm', 'b', 'o', 'l', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 408, 17U }, { { 'r', 'x', '_', 'a', 'l', 'i', 'g', 'n', 'm', 'e', 'n', 't', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 412, 5U }, { { 'r', 'x', '_', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 416, 5U }, { { 'r', 'x', '_', 'o', 'v', 'e', 'r', 'r', 'u', 'n', 's', '\x0' }, 420, 33U }, { { 'r', 'x', '_', 'i', 'p', '_', 'h', 'e', 'a', 'd', 'e', 'r', '_', 'c', 'h', 'e', 'c', 'k', 's', 'u', 'm', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 424, 1U }, { { 'r', 'x', '_', 't', 'c', 'p', '_', 'c', 'h', 'e', 'c', 'k', 's', 'u', 'm', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 428, 1U }, { { 'r', 'x', '_', 'u', 'd', 'p', '_', 'c', 'h', 'e', 'c', 'k', 's', 'u', 'm', '_', 'e', 'r', 'r', 'o', 'r', 's', '\x0' }, 432, 1U } };   862     bool  macb_is_gem(struct macb *bp);    71     unsigned int macb_tx_ring_wrap(unsigned int index);    76     struct macb_dma_desc * macb_tx_desc(struct macb_queue *queue, unsigned int index);    82     struct macb_tx_skb * macb_tx_skb(struct macb_queue *queue, unsigned int index);    88     dma_addr_t  macb_tx_dma(struct macb_queue *queue, unsigned int index);    97     unsigned int macb_rx_ring_wrap(unsigned int index);   102     struct macb_dma_desc * macb_rx_desc(struct macb *bp, unsigned int index);   107     void * macb_rx_buffer(struct macb *bp, unsigned int index);   113     u32  hw_readl_native(struct macb *bp, int offset);   118     void hw_writel_native(struct macb *bp, int offset, u32 value);   123     u32  hw_readl(struct macb *bp, int offset);   128     void hw_writel(struct macb *bp, int offset, u32 value);   137     bool  hw_is_native_io(void *addr);   150     bool  hw_is_gem(void *addr, bool native_io);   162     void macb_set_hwaddr(struct macb *bp);   181     void macb_get_hwaddr(struct macb *bp);   222     int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum);   242     int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);   267     void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev);   305     void macb_handle_link_change(struct net_device *dev);   372     int macb_mii_probe(struct net_device *dev);   422     int macb_mii_init(struct macb *bp);   495     void macb_update_stats(struct macb *bp);   507     int macb_halt_tx(struct macb *bp);   527     void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb);   545     void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr);   553     void macb_tx_error_task(struct work_struct *work);   659     void macb_tx_interrupt(struct macb_queue *queue);   728     void gem_rx_refill(struct macb *bp);   784     void discard_partial_frame(struct macb *bp, unsigned int begin, unsigned int end);   804     int gem_rx(struct macb *bp, int budget);   886     int macb_rx_frame(struct macb *bp, unsigned int first_frag, unsigned int last_frag);   967     void macb_init_rx_ring(struct macb *bp);   981     int macb_rx(struct macb *bp, int budget);  1056     int macb_poll(struct napi_struct *napi, int budget);  1090     irqreturn_t  macb_interrupt(int irq, void *dev_id);  1200     void macb_poll_controller(struct net_device *dev);  1214     unsigned int macb_tx_map(struct macb *bp, struct macb_queue *queue, struct sk_buff *skb);  1343     int macb_clear_csum(struct sk_buff *skb);  1361     int macb_start_xmit(struct sk_buff *skb, struct net_device *dev);  1427     void macb_init_rx_buffer_size(struct macb *bp, size_t size);  1447     void gem_free_rx_buffers(struct macb *bp);  1478     void macb_free_rx_buffers(struct macb *bp);  1488     void macb_free_consistent(struct macb *bp);  1511     int gem_alloc_rx_buffers(struct macb *bp);  1526     int macb_alloc_rx_buffers(struct macb *bp);  1542     int macb_alloc_consistent(struct macb *bp);  1585     void gem_init_rings(struct macb *bp);  1607     void macb_init_rings(struct macb *bp);  1624     void macb_reset_hw(struct macb *bp);  1650     u32  gem_mdc_clk_div(struct macb *bp);  1671     u32  macb_mdc_clk_div(struct macb *bp);  1696     u32  macb_dbw(struct macb *bp);  1719     void macb_configure_dma(struct macb *bp);  1750     void macb_init_hw(struct macb *bp);  1843     int hash_bit_value(int bitnr, __u8 *addr);  1851     int hash_get_index(__u8 *addr);  1867     void macb_sethashtable(struct net_device *dev);  1887     void macb_set_rx_mode(struct net_device *dev);  1929     int macb_open(struct net_device *dev);  1967     int macb_close(struct net_device *dev);  1988     int macb_change_mtu(struct net_device *dev, int new_mtu);  2008     void gem_update_stats(struct macb *bp);  2029     struct net_device_stats * gem_get_stats(struct macb *bp);  2067     void gem_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data);  2077     int gem_get_sset_count(struct net_device *dev, int sset);  2087     void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p);  2100     struct net_device_stats * macb_get_stats(struct net_device *dev);  2147     int macb_get_regs_len(struct net_device *netdev);  2152     void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p);  2185     void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol);  2200     int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol);  2218     const struct ethtool_ops macb_ethtool_ops = { 0, 0, 0, &macb_get_regs_len, &macb_get_regs, &macb_get_wol, &macb_set_wol, 0, 0, 0, ðtool_op_get_link, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ðtool_op_get_ts_info, 0, 0, 0, 0, 0, 0, 0, 0, &phy_ethtool_get_link_ksettings, &phy_ethtool_set_link_ksettings };  2229     const struct ethtool_ops gem_ethtool_ops = { 0, 0, 0, &macb_get_regs_len, &macb_get_regs, 0, 0, 0, 0, 0, ðtool_op_get_link, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &gem_get_ethtool_strings, 0, &gem_get_ethtool_stats, 0, 0, 0, 0, &gem_get_sset_count, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ðtool_op_get_ts_info, 0, 0, 0, 0, 0, 0, 0, 0, &phy_ethtool_get_link_ksettings, &phy_ethtool_set_link_ksettings };  2241     int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);  2254     int macb_set_features(struct net_device *netdev, netdev_features_t features);  2288     const struct net_device_ops macb_netdev_ops = { 0, 0, &macb_open, &macb_close, (netdev_tx_t  (*)(struct sk_buff *, struct net_device *))(&macb_start_xmit), 0, 0, 0, &macb_set_rx_mode, ð_mac_addr, ð_validate_addr, &macb_ioctl, 0, &macb_change_mtu, 0, 0, 0, 0, 0, &macb_get_stats, 0, 0, &macb_poll_controller, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &macb_set_features, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };  2307     void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf);  2329     void macb_probe_queues(void *mem, bool native_io, unsigned int *queue_mask, unsigned int *num_queues);  2358     int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk);  2424     int macb_init(struct platform_device *pdev);  2545     int at91ether_start(struct net_device *dev);  2596     int at91ether_open(struct net_device *dev);  2630     int at91ether_close(struct net_device *dev);  2665     int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev);  2694     void at91ether_rx(struct net_device *dev);  2732     irqreturn_t  at91ether_interrupt(int irq, void *dev_id);  2778     void at91ether_poll_controller(struct net_device *dev);  2788     const struct net_device_ops at91ether_netdev_ops = { 0, 0, &at91ether_open, &at91ether_close, (netdev_tx_t  (*)(struct sk_buff *, struct net_device *))(&at91ether_start_xmit), 0, 0, 0, &macb_set_rx_mode, ð_mac_addr, ð_validate_addr, &macb_ioctl, 0, ð_change_mtu, 0, 0, 0, 0, 0, &macb_get_stats, 0, 0, &at91ether_poll_controller, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };  2803     int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk);  2826     int at91ether_init(struct platform_device *pdev);  2852     const struct macb_config at91sam9260_config = { 6U, 0U, &macb_clk_init, &macb_init, 0 };  2858     const struct macb_config pc302gem_config = { 1610612736U, 16U, &macb_clk_init, &macb_init, 0 };  2865     const struct macb_config sama5d2_config = { 4U, 16U, &macb_clk_init, &macb_init, 0 };  2872     const struct macb_config sama5d3_config = { 1610612740U, 16U, &macb_clk_init, &macb_init, 0 };  2880     const struct macb_config sama5d4_config = { 4U, 4U, &macb_clk_init, &macb_init, 0 };  2887     const struct macb_config emac_config = { 0U, 0U, &at91ether_clk_init, &at91ether_init, 0 };  2892     const struct macb_config np4_config = { 16U, 0U, &macb_clk_init, &macb_init, 0 };  2898     const struct macb_config zynqmp_config = { 536870944U, 16U, &macb_clk_init, &macb_init, 10240 };  2906     const struct macb_config zynq_config = { 536870920U, 16U, &macb_clk_init, &macb_init, 0 };  2913     const struct of_device_id macb_dt_ids[14U] = { { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'a', 't', '3', '2', 'a', 'p', '7', '0', '0', '0', '-', 'm', 'a', 'c', 'b', '\x0' }, 0 }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'a', 't', '9', '1', 's', 'a', 'm', '9', '2', '6', '0', '-', 'm', 'a', 'c', 'b', '\x0' }, (const void *)(&at91sam9260_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'm', 'a', 'c', 'b', '\x0' }, 0 }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'n', 'p', '4', '-', 'm', 'a', 'c', 'b', '\x0' }, (const void *)(&np4_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'p', 'c', '3', '0', '2', '-', 'g', 'e', 'm', '\x0' }, (const void *)(&pc302gem_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'g', 'e', 'm', '\x0' }, (const void *)(&pc302gem_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'a', 't', 'm', 'e', 'l', ',', 's', 'a', 'm', 'a', '5', 'd', '2', '-', 'g', 'e', 'm', '\x0' }, (const void *)(&sama5d2_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'a', 't', 'm', 'e', 'l', ',', 's', 'a', 'm', 'a', '5', 'd', '3', '-', 'g', 'e', 'm', '\x0' }, (const void *)(&sama5d3_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'a', 't', 'm', 'e', 'l', ',', 's', 'a', 'm', 'a', '5', 'd', '4', '-', 'g', 'e', 'm', '\x0' }, (const void *)(&sama5d4_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'a', 't', '9', '1', 'r', 'm', '9', '2', '0', '0', '-', 'e', 'm', 'a', 'c', '\x0' }, (const void *)(&emac_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'e', 'm', 'a', 'c', '\x0' }, (const void *)(&emac_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'z', 'y', 'n', 'q', 'm', 'p', '-', 'g', 'e', 'm', '\x0' }, (const void *)(&zynqmp_config) }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 'c', 'd', 'n', 's', ',', 'z', 'y', 'n', 'q', '-', 'g', 'e', 'm', '\x0' }, (const void *)(&zynq_config) } };  2929     const struct of_device_id __mod_of__macb_dt_ids_device_table[14U] = {  };  2932     int macb_probe(struct platform_device *pdev);  3109     int macb_remove(struct platform_device *pdev);  3219     void ldv_check_final_state();  3222     void ldv_check_return_value(int);  3225     void ldv_check_return_value_probe(int);  3228     void ldv_initialize();  3231     void ldv_handler_precall();  3234     int nondet_int();  3237     int LDV_IN_INTERRUPT = 0;  3240     void ldv_main0_sequence_infinite_withcheck_stateful();    10     void ldv_error();    14     void * ldv_err_ptr(long error);    28     bool  ldv_is_err_or_null(const void *ptr);     5     int LDV_DMA_MAP_CALLS = 0;           return ;         }        {      3242     struct net_device *var_group1;  3243     struct ethtool_regs *var_group2;  3244     void *var_macb_get_regs_68_p2;  3245     struct ethtool_wolinfo *var_group3;  3246     struct ethtool_stats *var_group4;  3247     u64 *var_gem_get_ethtool_stats_63_p2;  3248     unsigned int var_gem_get_ethtool_strings_65_p1;  3249     u8 *var_gem_get_ethtool_strings_65_p2;  3250     int var_gem_get_sset_count_64_p1;  3251     int res_macb_open_58;  3252     int res_macb_close_59;  3253     struct sk_buff *var_group5;  3254     struct ifreq *var_group6;  3255     int var_macb_ioctl_71_p2;  3256     int var_macb_change_mtu_60_p1;  3257     unsigned long long var_macb_set_features_72_p1;  3258     int res_at91ether_open_78;  3259     int res_at91ether_close_79;  3260     struct platform_device *var_group7;  3261     struct clk **var_group8;  3262     struct clk **var_macb_clk_init_75_p2;  3263     struct clk **var_macb_clk_init_75_p3;  3264     struct clk **var_macb_clk_init_75_p4;  3265     struct clk **var_at91ether_clk_init_84_p2;  3266     struct clk **var_at91ether_clk_init_84_p3;  3267     struct clk **var_at91ether_clk_init_84_p4;  3268     int res_macb_probe_86;  3269     int var_at91ether_interrupt_82_p0;  3270     void *var_at91ether_interrupt_82_p1;  3271     int var_macb_interrupt_34_p0;  3272     void *var_macb_interrupt_34_p1;  3273     int ldv_s_macb_netdev_ops_net_device_ops;  3274     int ldv_s_at91ether_netdev_ops_net_device_ops;  3275     int ldv_s_macb_driver_platform_driver;  3276     int tmp;  3277     int tmp___0;  6185     ldv_s_macb_netdev_ops_net_device_ops = 0;  6188     ldv_s_at91ether_netdev_ops_net_device_ops = 0;  6209     ldv_s_macb_driver_platform_driver = 0;  6171     LDV_IN_INTERRUPT = 1;  6180     ldv_initialize() { /* Function call is skipped due to function is undefined */}  6214     goto ldv_51130;  6214     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}  6219     goto ldv_51129;  6215     ldv_51129:;  6220     tmp = nondet_int() { /* Function call is skipped due to function is undefined */}  6220     switch (tmp);  7810     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}           {  2667       struct macb *lp;  2668       void *tmp;  2669       unsigned int tmp___0;             {  2058         return ((void *)dev) + 3200U;;             } 2667       lp = (struct macb *)tmp;  2669       tmp___0 = (*(lp->macb_reg_readl))(lp, 20);             {  2907         struct netdev_queue *tmp;               {  1964           struct netdev_queue *__CPAchecker_TMP_0 = (struct netdev_queue *)(dev->_tx);  1964           return __CPAchecker_TMP_0 + ((unsigned long)index);;               }              {                 { 80 Ignored inline assembler code    82             return ;;                 } 2896           return ;;               } 2908         return ;;             } 2673       lp->skb = skb;  2674       int __CPAchecker_TMP_0 = (int)(skb->len);  2674       lp->skb_length = __CPAchecker_TMP_0;  2675       void *__CPAchecker_TMP_1 = (void *)(skb->data);  2675       size_t __CPAchecker_TMP_2 = (size_t )(skb->len);             {    38         unsigned long long tmp;               {             }  184           struct dma_map_ops *ops;   185           struct dma_map_ops *tmp;   186           unsigned long long addr;   187           int tmp___0;   188           long tmp___1;   189           unsigned long tmp___2;   190           unsigned long tmp___3;                 {    32             long tmp;    35             tmp = __builtin_expect(((unsigned long)dev) == ((unsigned long)((struct device *)0)), 0L) { /* Function call is skipped due to function is undefined */}    35             assume(!(tmp != 0L));    35             assume(!(((unsigned long)(dev->archdata.dma_ops)) == ((unsigned long)((struct dma_map_ops *)0))));    38             return dev->archdata.dma_ops;;                 }  185           ops = tmp;                 {   133             return ;;                 }                {   138             int __CPAchecker_TMP_0;   138             assume(!(dma_direction == 0));   138             assume(dma_direction == 1);                   __CPAchecker_TMP_0 = 1;   138             return __CPAchecker_TMP_0;;                 }  189           tmp___1 = __builtin_expect(tmp___0 == 0, 0L) { /* Function call is skipped due to function is undefined */}   189           assume(!(tmp___1 != 0L));   190           tmp___2 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */}   190           addr = (*(ops->map_page))(dev, (struct page *)((tmp___2 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, dir, attrs);   193           tmp___3 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */}   193           debug_dma_map_page(dev, (struct page *)((tmp___3 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, (int)dir, addr, 1) { /* Function call is skipped due to function is undefined */}   196           return addr;;               } 2679       u32 __CPAchecker_TMP_3 = (u32 )(lp->skb_physaddr);  2679       (*(lp->macb_reg_writel))(lp, 12, __CPAchecker_TMP_3);  2681       (*(lp->macb_reg_writel))(lp, 16, skb->len);           } 7827     goto ldv_51080;  9771     ldv_51080:;  9772     ldv_51130:;  6214     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}  6219     goto ldv_51129;  6215     ldv_51129:;  6220     tmp = nondet_int() { /* Function call is skipped due to function is undefined */}  6220     switch (tmp);  7810     ldv_handler_precall() { /* Function call is skipped due to function is undefined */}           {         } 2667       struct macb *lp;  2668       void *tmp;  2669       unsigned int tmp___0;             {  2058         return ((void *)dev) + 3200U;;             } 2667       lp = (struct macb *)tmp;  2669       tmp___0 = (*(lp->macb_reg_readl))(lp, 20);             {  2907         struct netdev_queue *tmp;               {  1964           struct netdev_queue *__CPAchecker_TMP_0 = (struct netdev_queue *)(dev->_tx);  1964           return __CPAchecker_TMP_0 + ((unsigned long)index);;               }              {                 { 80 Ignored inline assembler code    82             return ;;                 } 2896           return ;;               } 2908         return ;;             } 2673       lp->skb = skb;  2674       int __CPAchecker_TMP_0 = (int)(skb->len);  2674       lp->skb_length = __CPAchecker_TMP_0;  2675       void *__CPAchecker_TMP_1 = (void *)(skb->data);  2675       size_t __CPAchecker_TMP_2 = (size_t )(skb->len);           } |              Source code         
     1 #ifndef _ASM_X86_BITOPS_H
    2 #define _ASM_X86_BITOPS_H
    3 
    4 /*
    5  * Copyright 1992, Linus Torvalds.
    6  *
    7  * Note: inlines with more than a single statement should be marked
    8  * __always_inline to avoid problems with older gcc's inlining heuristics.
    9  */
   10 
   11 #ifndef _LINUX_BITOPS_H
   12 #error only <linux/bitops.h> can be included directly
   13 #endif
   14 
   15 #include <linux/compiler.h>
   16 #include <asm/alternative.h>
   17 #include <asm/rmwcc.h>
   18 #include <asm/barrier.h>
   19 
   20 #if BITS_PER_LONG == 32
   21 # define _BITOPS_LONG_SHIFT 5
   22 #elif BITS_PER_LONG == 64
   23 # define _BITOPS_LONG_SHIFT 6
   24 #else
   25 # error "Unexpected BITS_PER_LONG"
   26 #endif
   27 
   28 #define BIT_64(n)			(U64_C(1) << (n))
   29 
   30 /*
   31  * These have to be done with inline assembly: that way the bit-setting
   32  * is guaranteed to be atomic. All bit operations return 0 if the bit
   33  * was cleared before the operation and != 0 if it was not.
   34  *
   35  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
   36  */
   37 
   38 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
   39 /* Technically wrong, but this avoids compilation errors on some gcc
   40    versions. */
   41 #define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
   42 #else
   43 #define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
   44 #endif
   45 
   46 #define ADDR				BITOP_ADDR(addr)
   47 
   48 /*
   49  * We do the locked ops that don't return the old value as
   50  * a mask operation on a byte.
   51  */
   52 #define IS_IMMEDIATE(nr)		(__builtin_constant_p(nr))
   53 #define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((void *)(addr) + ((nr)>>3))
   54 #define CONST_MASK(nr)			(1 << ((nr) & 7))
   55 
   56 /**
   57  * set_bit - Atomically set a bit in memory
   58  * @nr: the bit to set
   59  * @addr: the address to start counting from
   60  *
   61  * This function is atomic and may not be reordered.  See __set_bit()
   62  * if you do not require the atomic guarantees.
   63  *
   64  * Note: there are no guarantees that this function will not be reordered
   65  * on non x86 architectures, so if you are writing portable code,
   66  * make sure not to rely on its reordering guarantees.
   67  *
   68  * Note that @nr may be almost arbitrarily large; this function is not
   69  * restricted to acting on a single-word quantity.
   70  */
   71 static __always_inline void
   72 set_bit(long nr, volatile unsigned long *addr)
   73 {
   74 	if (IS_IMMEDIATE(nr)) {
   75 		asm volatile(LOCK_PREFIX "orb %1,%0"
   76 			: CONST_MASK_ADDR(nr, addr)
   77 			: "iq" ((u8)CONST_MASK(nr))
   78 			: "memory");
   79 	} else {
   80 		asm volatile(LOCK_PREFIX "bts %1,%0"
   81 			: BITOP_ADDR(addr) : "Ir" (nr) : "memory");
   82 	}
   83 }
   84 
   85 /**
   86  * __set_bit - Set a bit in memory
   87  * @nr: the bit to set
   88  * @addr: the address to start counting from
   89  *
   90  * Unlike set_bit(), this function is non-atomic and may be reordered.
   91  * If it's called on the same region of memory simultaneously, the effect
   92  * may be that only one operation succeeds.
   93  */
   94 static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
   95 {
   96 	asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
   97 }
   98 
   99 /**
  100  * clear_bit - Clears a bit in memory
  101  * @nr: Bit to clear
  102  * @addr: Address to start counting from
  103  *
  104  * clear_bit() is atomic and may not be reordered.  However, it does
  105  * not contain a memory barrier, so if it is used for locking purposes,
  106  * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
  107  * in order to ensure changes are visible on other processors.
  108  */
  109 static __always_inline void
  110 clear_bit(long nr, volatile unsigned long *addr)
  111 {
  112 	if (IS_IMMEDIATE(nr)) {
  113 		asm volatile(LOCK_PREFIX "andb %1,%0"
  114 			: CONST_MASK_ADDR(nr, addr)
  115 			: "iq" ((u8)~CONST_MASK(nr)));
  116 	} else {
  117 		asm volatile(LOCK_PREFIX "btr %1,%0"
  118 			: BITOP_ADDR(addr)
  119 			: "Ir" (nr));
  120 	}
  121 }
  122 
  123 /*
  124  * clear_bit_unlock - Clears a bit in memory
  125  * @nr: Bit to clear
  126  * @addr: Address to start counting from
  127  *
  128  * clear_bit() is atomic and implies release semantics before the memory
  129  * operation. It can be used for an unlock.
  130  */
  131 static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
  132 {
  133 	barrier();
  134 	clear_bit(nr, addr);
  135 }
  136 
  137 static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
  138 {
  139 	asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
  140 }
  141 
  142 /*
  143  * __clear_bit_unlock - Clears a bit in memory
  144  * @nr: Bit to clear
  145  * @addr: Address to start counting from
  146  *
  147  * __clear_bit() is non-atomic and implies release semantics before the memory
  148  * operation. It can be used for an unlock if no other CPUs can concurrently
  149  * modify other bits in the word.
  150  *
  151  * No memory barrier is required here, because x86 cannot reorder stores past
  152  * older loads. Same principle as spin_unlock.
  153  */
  154 static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
  155 {
  156 	barrier();
  157 	__clear_bit(nr, addr);
  158 }
  159 
  160 /**
  161  * __change_bit - Toggle a bit in memory
  162  * @nr: the bit to change
  163  * @addr: the address to start counting from
  164  *
  165  * Unlike change_bit(), this function is non-atomic and may be reordered.
  166  * If it's called on the same region of memory simultaneously, the effect
  167  * may be that only one operation succeeds.
  168  */
  169 static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
  170 {
  171 	asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
  172 }
  173 
  174 /**
  175  * change_bit - Toggle a bit in memory
  176  * @nr: Bit to change
  177  * @addr: Address to start counting from
  178  *
  179  * change_bit() is atomic and may not be reordered.
  180  * Note that @nr may be almost arbitrarily large; this function is not
  181  * restricted to acting on a single-word quantity.
  182  */
  183 static __always_inline void change_bit(long nr, volatile unsigned long *addr)
  184 {
  185 	if (IS_IMMEDIATE(nr)) {
  186 		asm volatile(LOCK_PREFIX "xorb %1,%0"
  187 			: CONST_MASK_ADDR(nr, addr)
  188 			: "iq" ((u8)CONST_MASK(nr)));
  189 	} else {
  190 		asm volatile(LOCK_PREFIX "btc %1,%0"
  191 			: BITOP_ADDR(addr)
  192 			: "Ir" (nr));
  193 	}
  194 }
  195 
  196 /**
  197  * test_and_set_bit - Set a bit and return its old value
  198  * @nr: Bit to set
  199  * @addr: Address to count from
  200  *
  201  * This operation is atomic and cannot be reordered.
  202  * It also implies a memory barrier.
  203  */
  204 static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
  205 {
  206 	GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
  207 }
  208 
  209 /**
  210  * test_and_set_bit_lock - Set a bit and return its old value for lock
  211  * @nr: Bit to set
  212  * @addr: Address to count from
  213  *
  214  * This is the same as test_and_set_bit on x86.
  215  */
  216 static __always_inline bool
  217 test_and_set_bit_lock(long nr, volatile unsigned long *addr)
  218 {
  219 	return test_and_set_bit(nr, addr);
  220 }
  221 
  222 /**
  223  * __test_and_set_bit - Set a bit and return its old value
  224  * @nr: Bit to set
  225  * @addr: Address to count from
  226  *
  227  * This operation is non-atomic and can be reordered.
  228  * If two examples of this operation race, one can appear to succeed
  229  * but actually fail.  You must protect multiple accesses with a lock.
  230  */
  231 static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
  232 {
  233 	bool oldbit;
  234 
  235 	asm("bts %2,%1\n\t"
  236 	    CC_SET(c)
  237 	    : CC_OUT(c) (oldbit), ADDR
  238 	    : "Ir" (nr));
  239 	return oldbit;
  240 }
  241 
  242 /**
  243  * test_and_clear_bit - Clear a bit and return its old value
  244  * @nr: Bit to clear
  245  * @addr: Address to count from
  246  *
  247  * This operation is atomic and cannot be reordered.
  248  * It also implies a memory barrier.
  249  */
  250 static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
  251 {
  252 	GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
  253 }
  254 
  255 /**
  256  * __test_and_clear_bit - Clear a bit and return its old value
  257  * @nr: Bit to clear
  258  * @addr: Address to count from
  259  *
  260  * This operation is non-atomic and can be reordered.
  261  * If two examples of this operation race, one can appear to succeed
  262  * but actually fail.  You must protect multiple accesses with a lock.
  263  *
  264  * Note: the operation is performed atomically with respect to
  265  * the local CPU, but not other CPUs. Portable code should not
  266  * rely on this behaviour.
  267  * KVM relies on this behaviour on x86 for modifying memory that is also
  268  * accessed from a hypervisor on the same CPU if running in a VM: don't change
  269  * this without also updating arch/x86/kernel/kvm.c
  270  */
  271 static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
  272 {
  273 	bool oldbit;
  274 
  275 	asm volatile("btr %2,%1\n\t"
  276 		     CC_SET(c)
  277 		     : CC_OUT(c) (oldbit), ADDR
  278 		     : "Ir" (nr));
  279 	return oldbit;
  280 }
  281 
  282 /* WARNING: non atomic and it can be reordered! */
  283 static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
  284 {
  285 	bool oldbit;
  286 
  287 	asm volatile("btc %2,%1\n\t"
  288 		     CC_SET(c)
  289 		     : CC_OUT(c) (oldbit), ADDR
  290 		     : "Ir" (nr) : "memory");
  291 
  292 	return oldbit;
  293 }
  294 
  295 /**
  296  * test_and_change_bit - Change a bit and return its old value
  297  * @nr: Bit to change
  298  * @addr: Address to count from
  299  *
  300  * This operation is atomic and cannot be reordered.
  301  * It also implies a memory barrier.
  302  */
  303 static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
  304 {
  305 	GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
  306 }
  307 
  308 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
  309 {
  310 	return ((1UL << (nr & (BITS_PER_LONG-1))) &
  311 		(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
  312 }
  313 
  314 static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
  315 {
  316 	bool oldbit;
  317 
  318 	asm volatile("bt %2,%1\n\t"
  319 		     CC_SET(c)
  320 		     : CC_OUT(c) (oldbit)
  321 		     : "m" (*(unsigned long *)addr), "Ir" (nr));
  322 
  323 	return oldbit;
  324 }
  325 
  326 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
  327 /**
  328  * test_bit - Determine whether a bit is set
  329  * @nr: bit number to test
  330  * @addr: Address to start counting from
  331  */
  332 static bool test_bit(int nr, const volatile unsigned long *addr);
  333 #endif
  334 
  335 #define test_bit(nr, addr)			\
  336 	(__builtin_constant_p((nr))		\
  337 	 ? constant_test_bit((nr), (addr))	\
  338 	 : variable_test_bit((nr), (addr)))
  339 
  340 /**
  341  * __ffs - find first set bit in word
  342  * @word: The word to search
  343  *
  344  * Undefined if no bit exists, so code should check against 0 first.
  345  */
  346 static __always_inline unsigned long __ffs(unsigned long word)
  347 {
  348 	asm("rep; bsf %1,%0"
  349 		: "=r" (word)
  350 		: "rm" (word));
  351 	return word;
  352 }
  353 
  354 /**
  355  * ffz - find first zero bit in word
  356  * @word: The word to search
  357  *
  358  * Undefined if no zero exists, so code should check against ~0UL first.
  359  */
  360 static __always_inline unsigned long ffz(unsigned long word)
  361 {
  362 	asm("rep; bsf %1,%0"
  363 		: "=r" (word)
  364 		: "r" (~word));
  365 	return word;
  366 }
  367 
  368 /*
  369  * __fls: find last set bit in word
  370  * @word: The word to search
  371  *
  372  * Undefined if no set bit exists, so code should check against 0 first.
  373  */
  374 static __always_inline unsigned long __fls(unsigned long word)
  375 {
  376 	asm("bsr %1,%0"
  377 	    : "=r" (word)
  378 	    : "rm" (word));
  379 	return word;
  380 }
  381 
  382 #undef ADDR
  383 
  384 #ifdef __KERNEL__
  385 /**
  386  * ffs - find first set bit in word
  387  * @x: the word to search
  388  *
  389  * This is defined the same way as the libc and compiler builtin ffs
  390  * routines, therefore differs in spirit from the other bitops.
  391  *
  392  * ffs(value) returns 0 if value is 0 or the position of the first
  393  * set bit if value is nonzero. The first (least significant) bit
  394  * is at position 1.
  395  */
  396 static __always_inline int ffs(int x)
  397 {
  398 	int r;
  399 
  400 #ifdef CONFIG_X86_64
  401 	/*
  402 	 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
  403 	 * dest reg is undefined if x==0, but their CPU architect says its
  404 	 * value is written to set it to the same as before, except that the
  405 	 * top 32 bits will be cleared.
  406 	 *
  407 	 * We cannot do this on 32 bits because at the very least some
  408 	 * 486 CPUs did not behave this way.
  409 	 */
  410 	asm("bsfl %1,%0"
  411 	    : "=r" (r)
  412 	    : "rm" (x), "0" (-1));
  413 #elif defined(CONFIG_X86_CMOV)
  414 	asm("bsfl %1,%0\n\t"
  415 	    "cmovzl %2,%0"
  416 	    : "=&r" (r) : "rm" (x), "r" (-1));
  417 #else
  418 	asm("bsfl %1,%0\n\t"
  419 	    "jnz 1f\n\t"
  420 	    "movl $-1,%0\n"
  421 	    "1:" : "=r" (r) : "rm" (x));
  422 #endif
  423 	return r + 1;
  424 }
  425 
  426 /**
  427  * fls - find last set bit in word
  428  * @x: the word to search
  429  *
  430  * This is defined in a similar way as the libc and compiler builtin
  431  * ffs, but returns the position of the most significant set bit.
  432  *
  433  * fls(value) returns 0 if value is 0 or the position of the last
  434  * set bit if value is nonzero. The last (most significant) bit is
  435  * at position 32.
  436  */
  437 static __always_inline int fls(int x)
  438 {
  439 	int r;
  440 
  441 #ifdef CONFIG_X86_64
  442 	/*
  443 	 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
  444 	 * dest reg is undefined if x==0, but their CPU architect says its
  445 	 * value is written to set it to the same as before, except that the
  446 	 * top 32 bits will be cleared.
  447 	 *
  448 	 * We cannot do this on 32 bits because at the very least some
  449 	 * 486 CPUs did not behave this way.
  450 	 */
  451 	asm("bsrl %1,%0"
  452 	    : "=r" (r)
  453 	    : "rm" (x), "0" (-1));
  454 #elif defined(CONFIG_X86_CMOV)
  455 	asm("bsrl %1,%0\n\t"
  456 	    "cmovzl %2,%0"
  457 	    : "=&r" (r) : "rm" (x), "rm" (-1));
  458 #else
  459 	asm("bsrl %1,%0\n\t"
  460 	    "jnz 1f\n\t"
  461 	    "movl $-1,%0\n"
  462 	    "1:" : "=r" (r) : "rm" (x));
  463 #endif
  464 	return r + 1;
  465 }
  466 
  467 /**
  468  * fls64 - find last set bit in a 64-bit word
  469  * @x: the word to search
  470  *
  471  * This is defined in a similar way as the libc and compiler builtin
  472  * ffsll, but returns the position of the most significant set bit.
  473  *
  474  * fls64(value) returns 0 if value is 0 or the position of the last
  475  * set bit if value is nonzero. The last (most significant) bit is
  476  * at position 64.
  477  */
  478 #ifdef CONFIG_X86_64
  479 static __always_inline int fls64(__u64 x)
  480 {
  481 	int bitpos = -1;
  482 	/*
  483 	 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
  484 	 * dest reg is undefined if x==0, but their CPU architect says its
  485 	 * value is written to set it to the same as before.
  486 	 */
  487 	asm("bsrq %1,%q0"
  488 	    : "+r" (bitpos)
  489 	    : "rm" (x));
  490 	return bitpos + 1;
  491 }
  492 #else
  493 #include <asm-generic/bitops/fls64.h>
  494 #endif
  495 
  496 #include <asm-generic/bitops/find.h>
  497 
  498 #include <asm-generic/bitops/sched.h>
  499 
  500 #include <asm/arch_hweight.h>
  501 
  502 #include <asm-generic/bitops/const_hweight.h>
  503 
  504 #include <asm-generic/bitops/le.h>
  505 
  506 #include <asm-generic/bitops/ext2-atomic-setbit.h>
  507 
  508 #endif /* __KERNEL__ */
  509 #endif /* _ASM_X86_BITOPS_H */                 1 #ifndef _ASM_X86_DMA_MAPPING_H
    2 #define _ASM_X86_DMA_MAPPING_H
    3 
    4 /*
    5  * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
    6  * Documentation/DMA-API.txt for documentation.
    7  */
    8 
    9 #include <linux/kmemcheck.h>
   10 #include <linux/scatterlist.h>
   11 #include <linux/dma-debug.h>
   12 #include <asm/io.h>
   13 #include <asm/swiotlb.h>
   14 #include <linux/dma-contiguous.h>
   15 
   16 #ifdef CONFIG_ISA
   17 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
   18 #else
   19 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
   20 #endif
   21 
   22 #define DMA_ERROR_CODE	0
   23 
   24 extern int iommu_merge;
   25 extern struct device x86_dma_fallback_dev;
   26 extern int panic_on_overflow;
   27 
   28 extern struct dma_map_ops *dma_ops;
   29 
   30 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
   31 {
   32 #ifndef CONFIG_X86_DEV_DMA_OPS
   33 	return dma_ops;
   34 #else
   35 	if (unlikely(!dev) || !dev->archdata.dma_ops)
   36 		return dma_ops;
   37 	else
   38 		return dev->archdata.dma_ops;
   39 #endif
   40 }
   41 
   42 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
   43 #define arch_dma_alloc_attrs arch_dma_alloc_attrs
   44 
   45 #define HAVE_ARCH_DMA_SUPPORTED 1
   46 extern int dma_supported(struct device *hwdev, u64 mask);
   47 
   48 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
   49 					dma_addr_t *dma_addr, gfp_t flag,
   50 					unsigned long attrs);
   51 
   52 extern void dma_generic_free_coherent(struct device *dev, size_t size,
   53 				      void *vaddr, dma_addr_t dma_addr,
   54 				      unsigned long attrs);
   55 
   56 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
   57 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
   58 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
   59 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
   60 #else
   61 
   62 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
   63 {
   64 	if (!dev->dma_mask)
   65 		return 0;
   66 
   67 	return addr + size - 1 <= *dev->dma_mask;
   68 }
   69 
   70 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
   71 {
   72 	return paddr;
   73 }
   74 
   75 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
   76 {
   77 	return daddr;
   78 }
   79 #endif /* CONFIG_X86_DMA_REMAP */
   80 
   81 static inline void
   82 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
   83 	enum dma_data_direction dir)
   84 {
   85 	flush_write_buffers();
   86 }
   87 
   88 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
   89 						    gfp_t gfp)
   90 {
   91 	unsigned long dma_mask = 0;
   92 
   93 	dma_mask = dev->coherent_dma_mask;
   94 	if (!dma_mask)
   95 		dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
   96 
   97 	return dma_mask;
   98 }
   99 
  100 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
  101 {
  102 	unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
  103 
  104 	if (dma_mask <= DMA_BIT_MASK(24))
  105 		gfp |= GFP_DMA;
  106 #ifdef CONFIG_X86_64
  107 	if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
  108 		gfp |= GFP_DMA32;
  109 #endif
  110        return gfp;
  111 }
  112 
  113 #endif                 1 
    2 /*
    3  * Cadence MACB/GEM Ethernet Controller driver
    4  *
    5  * Copyright (C) 2004-2006 Atmel Corporation
    6  *
    7  * This program is free software; you can redistribute it and/or modify
    8  * it under the terms of the GNU General Public License version 2 as
    9  * published by the Free Software Foundation.
   10  */
   11 
   12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   13 #include <linux/clk.h>
   14 #include <linux/module.h>
   15 #include <linux/moduleparam.h>
   16 #include <linux/kernel.h>
   17 #include <linux/types.h>
   18 #include <linux/circ_buf.h>
   19 #include <linux/slab.h>
   20 #include <linux/init.h>
   21 #include <linux/io.h>
   22 #include <linux/gpio.h>
   23 #include <linux/gpio/consumer.h>
   24 #include <linux/interrupt.h>
   25 #include <linux/netdevice.h>
   26 #include <linux/etherdevice.h>
   27 #include <linux/dma-mapping.h>
   28 #include <linux/platform_data/macb.h>
   29 #include <linux/platform_device.h>
   30 #include <linux/phy.h>
   31 #include <linux/of.h>
   32 #include <linux/of_device.h>
   33 #include <linux/of_gpio.h>
   34 #include <linux/of_mdio.h>
   35 #include <linux/of_net.h>
   36 
   37 #include "macb.h"
   38 
   39 #define MACB_RX_BUFFER_SIZE	128
   40 #define RX_BUFFER_MULTIPLE	64  /* bytes */
   41 #define RX_RING_SIZE		512 /* must be power of 2 */
   42 #define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
   43 
   44 #define TX_RING_SIZE		128 /* must be power of 2 */
   45 #define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
   46 
   47 /* level of occupied TX descriptors under which we wake up TX process */
   48 #define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
   49 
   50 #define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
   51 				 | MACB_BIT(ISR_ROVR))
   52 #define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
   53 					| MACB_BIT(ISR_RLE)		\
   54 					| MACB_BIT(TXERR))
   55 #define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
   56 
   57 #define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
   58 #define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
   59 
   60 #define GEM_MTU_MIN_SIZE	68
   61 
   62 #define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
   63 #define MACB_WOL_ENABLED		(0x1 << 1)
   64 
   65 /* Graceful stop timeouts in us. We should allow up to
   66  * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
   67  */
   68 #define MACB_HALT_TIMEOUT	1230
   69 
   70 /* Ring buffer accessors */
   71 static unsigned int macb_tx_ring_wrap(unsigned int index)
   72 {
   73 	return index & (TX_RING_SIZE - 1);
   74 }
   75 
   76 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
   77 					  unsigned int index)
   78 {
   79 	return &queue->tx_ring[macb_tx_ring_wrap(index)];
   80 }
   81 
   82 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
   83 				       unsigned int index)
   84 {
   85 	return &queue->tx_skb[macb_tx_ring_wrap(index)];
   86 }
   87 
   88 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
   89 {
   90 	dma_addr_t offset;
   91 
   92 	offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
   93 
   94 	return queue->tx_ring_dma + offset;
   95 }
   96 
   97 static unsigned int macb_rx_ring_wrap(unsigned int index)
   98 {
   99 	return index & (RX_RING_SIZE - 1);
  100 }
  101 
  102 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
  103 {
  104 	return &bp->rx_ring[macb_rx_ring_wrap(index)];
  105 }
  106 
  107 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
  108 {
  109 	return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
  110 }
  111 
  112 /* I/O accessors */
  113 static u32 hw_readl_native(struct macb *bp, int offset)
  114 {
  115 	return __raw_readl(bp->regs + offset);
  116 }
  117 
  118 static void hw_writel_native(struct macb *bp, int offset, u32 value)
  119 {
  120 	__raw_writel(value, bp->regs + offset);
  121 }
  122 
  123 static u32 hw_readl(struct macb *bp, int offset)
  124 {
  125 	return readl_relaxed(bp->regs + offset);
  126 }
  127 
  128 static void hw_writel(struct macb *bp, int offset, u32 value)
  129 {
  130 	writel_relaxed(value, bp->regs + offset);
  131 }
  132 
  133 /* Find the CPU endianness by using the loopback bit of NCR register. When the
  134  * CPU is in big endian we need to program swapped mode for management
  135  * descriptor access.
  136  */
  137 static bool hw_is_native_io(void __iomem *addr)
  138 {
  139 	u32 value = MACB_BIT(LLB);
  140 
  141 	__raw_writel(value, addr + MACB_NCR);
  142 	value = __raw_readl(addr + MACB_NCR);
  143 
  144 	/* Write 0 back to disable everything */
  145 	__raw_writel(0, addr + MACB_NCR);
  146 
  147 	return value == MACB_BIT(LLB);
  148 }
  149 
  150 static bool hw_is_gem(void __iomem *addr, bool native_io)
  151 {
  152 	u32 id;
  153 
  154 	if (native_io)
  155 		id = __raw_readl(addr + MACB_MID);
  156 	else
  157 		id = readl_relaxed(addr + MACB_MID);
  158 
  159 	return MACB_BFEXT(IDNUM, id) >= 0x2;
  160 }
  161 
  162 static void macb_set_hwaddr(struct macb *bp)
  163 {
  164 	u32 bottom;
  165 	u16 top;
  166 
  167 	bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
  168 	macb_or_gem_writel(bp, SA1B, bottom);
  169 	top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
  170 	macb_or_gem_writel(bp, SA1T, top);
  171 
  172 	/* Clear unused address register sets */
  173 	macb_or_gem_writel(bp, SA2B, 0);
  174 	macb_or_gem_writel(bp, SA2T, 0);
  175 	macb_or_gem_writel(bp, SA3B, 0);
  176 	macb_or_gem_writel(bp, SA3T, 0);
  177 	macb_or_gem_writel(bp, SA4B, 0);
  178 	macb_or_gem_writel(bp, SA4T, 0);
  179 }
  180 
  181 static void macb_get_hwaddr(struct macb *bp)
  182 {
  183 	struct macb_platform_data *pdata;
  184 	u32 bottom;
  185 	u16 top;
  186 	u8 addr[6];
  187 	int i;
  188 
  189 	pdata = dev_get_platdata(&bp->pdev->dev);
  190 
  191 	/* Check all 4 address register for valid address */
  192 	for (i = 0; i < 4; i++) {
  193 		bottom = macb_or_gem_readl(bp, SA1B + i * 8);
  194 		top = macb_or_gem_readl(bp, SA1T + i * 8);
  195 
  196 		if (pdata && pdata->rev_eth_addr) {
  197 			addr[5] = bottom & 0xff;
  198 			addr[4] = (bottom >> 8) & 0xff;
  199 			addr[3] = (bottom >> 16) & 0xff;
  200 			addr[2] = (bottom >> 24) & 0xff;
  201 			addr[1] = top & 0xff;
  202 			addr[0] = (top & 0xff00) >> 8;
  203 		} else {
  204 			addr[0] = bottom & 0xff;
  205 			addr[1] = (bottom >> 8) & 0xff;
  206 			addr[2] = (bottom >> 16) & 0xff;
  207 			addr[3] = (bottom >> 24) & 0xff;
  208 			addr[4] = top & 0xff;
  209 			addr[5] = (top >> 8) & 0xff;
  210 		}
  211 
  212 		if (is_valid_ether_addr(addr)) {
  213 			memcpy(bp->dev->dev_addr, addr, sizeof(addr));
  214 			return;
  215 		}
  216 	}
  217 
  218 	dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
  219 	eth_hw_addr_random(bp->dev);
  220 }
  221 
  222 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  223 {
  224 	struct macb *bp = bus->priv;
  225 	int value;
  226 
  227 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
  228 			      | MACB_BF(RW, MACB_MAN_READ)
  229 			      | MACB_BF(PHYA, mii_id)
  230 			      | MACB_BF(REGA, regnum)
  231 			      | MACB_BF(CODE, MACB_MAN_CODE)));
  232 
  233 	/* wait for end of transfer */
  234 	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
  235 		cpu_relax();
  236 
  237 	value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
  238 
  239 	return value;
  240 }
  241 
  242 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
  243 			   u16 value)
  244 {
  245 	struct macb *bp = bus->priv;
  246 
  247 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
  248 			      | MACB_BF(RW, MACB_MAN_WRITE)
  249 			      | MACB_BF(PHYA, mii_id)
  250 			      | MACB_BF(REGA, regnum)
  251 			      | MACB_BF(CODE, MACB_MAN_CODE)
  252 			      | MACB_BF(DATA, value)));
  253 
  254 	/* wait for end of transfer */
  255 	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
  256 		cpu_relax();
  257 
  258 	return 0;
  259 }
  260 
  261 /**
  262  * macb_set_tx_clk() - Set a clock to a new frequency
  263  * @clk		Pointer to the clock to change
  264  * @rate	New frequency in Hz
  265  * @dev		Pointer to the struct net_device
  266  */
  267 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
  268 {
  269 	long ferr, rate, rate_rounded;
  270 
  271 	if (!clk)
  272 		return;
  273 
  274 	switch (speed) {
  275 	case SPEED_10:
  276 		rate = 2500000;
  277 		break;
  278 	case SPEED_100:
  279 		rate = 25000000;
  280 		break;
  281 	case SPEED_1000:
  282 		rate = 125000000;
  283 		break;
  284 	default:
  285 		return;
  286 	}
  287 
  288 	rate_rounded = clk_round_rate(clk, rate);
  289 	if (rate_rounded < 0)
  290 		return;
  291 
  292 	/* RGMII allows 50 ppm frequency error. Test and warn if this limit
  293 	 * is not satisfied.
  294 	 */
  295 	ferr = abs(rate_rounded - rate);
  296 	ferr = DIV_ROUND_UP(ferr, rate / 100000);
  297 	if (ferr > 5)
  298 		netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
  299 			    rate);
  300 
  301 	if (clk_set_rate(clk, rate_rounded))
  302 		netdev_err(dev, "adjusting tx_clk failed.\n");
  303 }
  304 
  305 static void macb_handle_link_change(struct net_device *dev)
  306 {
  307 	struct macb *bp = netdev_priv(dev);
  308 	struct phy_device *phydev = dev->phydev;
  309 	unsigned long flags;
  310 	int status_change = 0;
  311 
  312 	spin_lock_irqsave(&bp->lock, flags);
  313 
  314 	if (phydev->link) {
  315 		if ((bp->speed != phydev->speed) ||
  316 		    (bp->duplex != phydev->duplex)) {
  317 			u32 reg;
  318 
  319 			reg = macb_readl(bp, NCFGR);
  320 			reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
  321 			if (macb_is_gem(bp))
  322 				reg &= ~GEM_BIT(GBE);
  323 
  324 			if (phydev->duplex)
  325 				reg |= MACB_BIT(FD);
  326 			if (phydev->speed == SPEED_100)
  327 				reg |= MACB_BIT(SPD);
  328 			if (phydev->speed == SPEED_1000 &&
  329 			    bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
  330 				reg |= GEM_BIT(GBE);
  331 
  332 			macb_or_gem_writel(bp, NCFGR, reg);
  333 
  334 			bp->speed = phydev->speed;
  335 			bp->duplex = phydev->duplex;
  336 			status_change = 1;
  337 		}
  338 	}
  339 
  340 	if (phydev->link != bp->link) {
  341 		if (!phydev->link) {
  342 			bp->speed = 0;
  343 			bp->duplex = -1;
  344 		}
  345 		bp->link = phydev->link;
  346 
  347 		status_change = 1;
  348 	}
  349 
  350 	spin_unlock_irqrestore(&bp->lock, flags);
  351 
  352 	if (status_change) {
  353 		if (phydev->link) {
  354 			/* Update the TX clock rate if and only if the link is
  355 			 * up and there has been a link change.
  356 			 */
  357 			macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
  358 
  359 			netif_carrier_on(dev);
  360 			netdev_info(dev, "link up (%d/%s)\n",
  361 				    phydev->speed,
  362 				    phydev->duplex == DUPLEX_FULL ?
  363 				    "Full" : "Half");
  364 		} else {
  365 			netif_carrier_off(dev);
  366 			netdev_info(dev, "link down\n");
  367 		}
  368 	}
  369 }
  370 
  371 /* based on au1000_eth. c*/
  372 static int macb_mii_probe(struct net_device *dev)
  373 {
  374 	struct macb *bp = netdev_priv(dev);
  375 	struct macb_platform_data *pdata;
  376 	struct phy_device *phydev;
  377 	int phy_irq;
  378 	int ret;
  379 
  380 	phydev = phy_find_first(bp->mii_bus);
  381 	if (!phydev) {
  382 		netdev_err(dev, "no PHY found\n");
  383 		return -ENXIO;
  384 	}
  385 
  386 	pdata = dev_get_platdata(&bp->pdev->dev);
  387 	if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
  388 		ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin,
  389 					"phy int");
  390 		if (!ret) {
  391 			phy_irq = gpio_to_irq(pdata->phy_irq_pin);
  392 			phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
  393 		}
  394 	}
  395 
  396 	/* attach the mac to the phy */
  397 	ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
  398 				 bp->phy_interface);
  399 	if (ret) {
  400 		netdev_err(dev, "Could not attach to PHY\n");
  401 		return ret;
  402 	}
  403 
  404 	/* mask with MAC supported features */
  405 	if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
  406 		phydev->supported &= PHY_GBIT_FEATURES;
  407 	else
  408 		phydev->supported &= PHY_BASIC_FEATURES;
  409 
  410 	if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
  411 		phydev->supported &= ~SUPPORTED_1000baseT_Half;
  412 
  413 	phydev->advertising = phydev->supported;
  414 
  415 	bp->link = 0;
  416 	bp->speed = 0;
  417 	bp->duplex = -1;
  418 
  419 	return 0;
  420 }
  421 
  422 static int macb_mii_init(struct macb *bp)
  423 {
  424 	struct macb_platform_data *pdata;
  425 	struct device_node *np;
  426 	int err = -ENXIO, i;
  427 
  428 	/* Enable management port */
  429 	macb_writel(bp, NCR, MACB_BIT(MPE));
  430 
  431 	bp->mii_bus = mdiobus_alloc();
  432 	if (!bp->mii_bus) {
  433 		err = -ENOMEM;
  434 		goto err_out;
  435 	}
  436 
  437 	bp->mii_bus->name = "MACB_mii_bus";
  438 	bp->mii_bus->read = &macb_mdio_read;
  439 	bp->mii_bus->write = &macb_mdio_write;
  440 	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
  441 		 bp->pdev->name, bp->pdev->id);
  442 	bp->mii_bus->priv = bp;
  443 	bp->mii_bus->parent = &bp->pdev->dev;
  444 	pdata = dev_get_platdata(&bp->pdev->dev);
  445 
  446 	dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
  447 
  448 	np = bp->pdev->dev.of_node;
  449 	if (np) {
  450 		/* try dt phy registration */
  451 		err = of_mdiobus_register(bp->mii_bus, np);
  452 
  453 		/* fallback to standard phy registration if no phy were
  454 		 * found during dt phy registration
  455 		 */
  456 		if (!err && !phy_find_first(bp->mii_bus)) {
  457 			for (i = 0; i < PHY_MAX_ADDR; i++) {
  458 				struct phy_device *phydev;
  459 
  460 				phydev = mdiobus_scan(bp->mii_bus, i);
  461 				if (IS_ERR(phydev) &&
  462 				    PTR_ERR(phydev) != -ENODEV) {
  463 					err = PTR_ERR(phydev);
  464 					break;
  465 				}
  466 			}
  467 
  468 			if (err)
  469 				goto err_out_unregister_bus;
  470 		}
  471 	} else {
  472 		if (pdata)
  473 			bp->mii_bus->phy_mask = pdata->phy_mask;
  474 
  475 		err = mdiobus_register(bp->mii_bus);
  476 	}
  477 
  478 	if (err)
  479 		goto err_out_free_mdiobus;
  480 
  481 	err = macb_mii_probe(bp->dev);
  482 	if (err)
  483 		goto err_out_unregister_bus;
  484 
  485 	return 0;
  486 
  487 err_out_unregister_bus:
  488 	mdiobus_unregister(bp->mii_bus);
  489 err_out_free_mdiobus:
  490 	mdiobus_free(bp->mii_bus);
  491 err_out:
  492 	return err;
  493 }
  494 
  495 static void macb_update_stats(struct macb *bp)
  496 {
  497 	u32 *p = &bp->hw_stats.macb.rx_pause_frames;
  498 	u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
  499 	int offset = MACB_PFR;
  500 
  501 	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
  502 
  503 	for (; p < end; p++, offset += 4)
  504 		*p += bp->macb_reg_readl(bp, offset);
  505 }
  506 
  507 static int macb_halt_tx(struct macb *bp)
  508 {
  509 	unsigned long	halt_time, timeout;
  510 	u32		status;
  511 
  512 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
  513 
  514 	timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
  515 	do {
  516 		halt_time = jiffies;
  517 		status = macb_readl(bp, TSR);
  518 		if (!(status & MACB_BIT(TGO)))
  519 			return 0;
  520 
  521 		usleep_range(10, 250);
  522 	} while (time_before(halt_time, timeout));
  523 
  524 	return -ETIMEDOUT;
  525 }
  526 
  527 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
  528 {
  529 	if (tx_skb->mapping) {
  530 		if (tx_skb->mapped_as_page)
  531 			dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
  532 				       tx_skb->size, DMA_TO_DEVICE);
  533 		else
  534 			dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
  535 					 tx_skb->size, DMA_TO_DEVICE);
  536 		tx_skb->mapping = 0;
  537 	}
  538 
  539 	if (tx_skb->skb) {
  540 		dev_kfree_skb_any(tx_skb->skb);
  541 		tx_skb->skb = NULL;
  542 	}
  543 }
  544 
  545 static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr)
  546 {
  547 	desc->addr = (u32)addr;
  548 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  549 	desc->addrh = (u32)(addr >> 32);
  550 #endif
  551 }
  552 
  553 static void macb_tx_error_task(struct work_struct *work)
  554 {
  555 	struct macb_queue	*queue = container_of(work, struct macb_queue,
  556 						      tx_error_task);
  557 	struct macb		*bp = queue->bp;
  558 	struct macb_tx_skb	*tx_skb;
  559 	struct macb_dma_desc	*desc;
  560 	struct sk_buff		*skb;
  561 	unsigned int		tail;
  562 	unsigned long		flags;
  563 
  564 	netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
  565 		    (unsigned int)(queue - bp->queues),
  566 		    queue->tx_tail, queue->tx_head);
  567 
  568 	/* Prevent the queue IRQ handlers from running: each of them may call
  569 	 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
  570 	 * As explained below, we have to halt the transmission before updating
  571 	 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
  572 	 * network engine about the macb/gem being halted.
  573 	 */
  574 	spin_lock_irqsave(&bp->lock, flags);
  575 
  576 	/* Make sure nobody is trying to queue up new packets */
  577 	netif_tx_stop_all_queues(bp->dev);
  578 
  579 	/* Stop transmission now
  580 	 * (in case we have just queued new packets)
  581 	 * macb/gem must be halted to write TBQP register
  582 	 */
  583 	if (macb_halt_tx(bp))
  584 		/* Just complain for now, reinitializing TX path can be good */
  585 		netdev_err(bp->dev, "BUG: halt tx timed out\n");
  586 
  587 	/* Treat frames in TX queue including the ones that caused the error.
  588 	 * Free transmit buffers in upper layer.
  589 	 */
  590 	for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
  591 		u32	ctrl;
  592 
  593 		desc = macb_tx_desc(queue, tail);
  594 		ctrl = desc->ctrl;
  595 		tx_skb = macb_tx_skb(queue, tail);
  596 		skb = tx_skb->skb;
  597 
  598 		if (ctrl & MACB_BIT(TX_USED)) {
  599 			/* skb is set for the last buffer of the frame */
  600 			while (!skb) {
  601 				macb_tx_unmap(bp, tx_skb);
  602 				tail++;
  603 				tx_skb = macb_tx_skb(queue, tail);
  604 				skb = tx_skb->skb;
  605 			}
  606 
  607 			/* ctrl still refers to the first buffer descriptor
  608 			 * since it's the only one written back by the hardware
  609 			 */
  610 			if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
  611 				netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
  612 					    macb_tx_ring_wrap(tail), skb->data);
  613 				bp->stats.tx_packets++;
  614 				bp->stats.tx_bytes += skb->len;
  615 			}
  616 		} else {
  617 			/* "Buffers exhausted mid-frame" errors may only happen
  618 			 * if the driver is buggy, so complain loudly about
  619 			 * those. Statistics are updated by hardware.
  620 			 */
  621 			if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
  622 				netdev_err(bp->dev,
  623 					   "BUG: TX buffers exhausted mid-frame\n");
  624 
  625 			desc->ctrl = ctrl | MACB_BIT(TX_USED);
  626 		}
  627 
  628 		macb_tx_unmap(bp, tx_skb);
  629 	}
  630 
  631 	/* Set end of TX queue */
  632 	desc = macb_tx_desc(queue, 0);
  633 	macb_set_addr(desc, 0);
  634 	desc->ctrl = MACB_BIT(TX_USED);
  635 
  636 	/* Make descriptor updates visible to hardware */
  637 	wmb();
  638 
  639 	/* Reinitialize the TX desc queue */
  640 	queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
  641 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  642 	queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
  643 #endif
  644 	/* Make TX ring reflect state of hardware */
  645 	queue->tx_head = 0;
  646 	queue->tx_tail = 0;
  647 
  648 	/* Housework before enabling TX IRQ */
  649 	macb_writel(bp, TSR, macb_readl(bp, TSR));
  650 	queue_writel(queue, IER, MACB_TX_INT_FLAGS);
  651 
  652 	/* Now we are ready to start transmission again */
  653 	netif_tx_start_all_queues(bp->dev);
  654 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
  655 
  656 	spin_unlock_irqrestore(&bp->lock, flags);
  657 }
  658 
  659 static void macb_tx_interrupt(struct macb_queue *queue)
  660 {
  661 	unsigned int tail;
  662 	unsigned int head;
  663 	u32 status;
  664 	struct macb *bp = queue->bp;
  665 	u16 queue_index = queue - bp->queues;
  666 
  667 	status = macb_readl(bp, TSR);
  668 	macb_writel(bp, TSR, status);
  669 
  670 	if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
  671 		queue_writel(queue, ISR, MACB_BIT(TCOMP));
  672 
  673 	netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
  674 		    (unsigned long)status);
  675 
  676 	head = queue->tx_head;
  677 	for (tail = queue->tx_tail; tail != head; tail++) {
  678 		struct macb_tx_skb	*tx_skb;
  679 		struct sk_buff		*skb;
  680 		struct macb_dma_desc	*desc;
  681 		u32			ctrl;
  682 
  683 		desc = macb_tx_desc(queue, tail);
  684 
  685 		/* Make hw descriptor updates visible to CPU */
  686 		rmb();
  687 
  688 		ctrl = desc->ctrl;
  689 
  690 		/* TX_USED bit is only set by hardware on the very first buffer
  691 		 * descriptor of the transmitted frame.
  692 		 */
  693 		if (!(ctrl & MACB_BIT(TX_USED)))
  694 			break;
  695 
  696 		/* Process all buffers of the current transmitted frame */
  697 		for (;; tail++) {
  698 			tx_skb = macb_tx_skb(queue, tail);
  699 			skb = tx_skb->skb;
  700 
  701 			/* First, update TX stats if needed */
  702 			if (skb) {
  703 				netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
  704 					    macb_tx_ring_wrap(tail), skb->data);
  705 				bp->stats.tx_packets++;
  706 				bp->stats.tx_bytes += skb->len;
  707 			}
  708 
  709 			/* Now we can safely release resources */
  710 			macb_tx_unmap(bp, tx_skb);
  711 
  712 			/* skb is set only for the last buffer of the frame.
  713 			 * WARNING: at this point skb has been freed by
  714 			 * macb_tx_unmap().
  715 			 */
  716 			if (skb)
  717 				break;
  718 		}
  719 	}
  720 
  721 	queue->tx_tail = tail;
  722 	if (__netif_subqueue_stopped(bp->dev, queue_index) &&
  723 	    CIRC_CNT(queue->tx_head, queue->tx_tail,
  724 		     TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
  725 		netif_wake_subqueue(bp->dev, queue_index);
  726 }
  727 
  728 static void gem_rx_refill(struct macb *bp)
  729 {
  730 	unsigned int		entry;
  731 	struct sk_buff		*skb;
  732 	dma_addr_t		paddr;
  733 
  734 	while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
  735 			  RX_RING_SIZE) > 0) {
  736 		entry = macb_rx_ring_wrap(bp->rx_prepared_head);
  737 
  738 		/* Make hw descriptor updates visible to CPU */
  739 		rmb();
  740 
  741 		bp->rx_prepared_head++;
  742 
  743 		if (!bp->rx_skbuff[entry]) {
  744 			/* allocate sk_buff for this free entry in ring */
  745 			skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
  746 			if (unlikely(!skb)) {
  747 				netdev_err(bp->dev,
  748 					   "Unable to allocate sk_buff\n");
  749 				break;
  750 			}
  751 
  752 			/* now fill corresponding descriptor entry */
  753 			paddr = dma_map_single(&bp->pdev->dev, skb->data,
  754 					       bp->rx_buffer_size,
  755 					       DMA_FROM_DEVICE);
  756 			if (dma_mapping_error(&bp->pdev->dev, paddr)) {
  757 				dev_kfree_skb(skb);
  758 				break;
  759 			}
  760 
  761 			bp->rx_skbuff[entry] = skb;
  762 
  763 			if (entry == RX_RING_SIZE - 1)
  764 				paddr |= MACB_BIT(RX_WRAP);
  765 			macb_set_addr(&(bp->rx_ring[entry]), paddr);
  766 			bp->rx_ring[entry].ctrl = 0;
  767 
  768 			/* properly align Ethernet header */
  769 			skb_reserve(skb, NET_IP_ALIGN);
  770 		} else {
  771 			bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
  772 			bp->rx_ring[entry].ctrl = 0;
  773 		}
  774 	}
  775 
  776 	/* Make descriptor updates visible to hardware */
  777 	wmb();
  778 
  779 	netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
  780 		    bp->rx_prepared_head, bp->rx_tail);
  781 }
  782 
  783 /* Mark DMA descriptors from begin up to and not including end as unused */
  784 static void discard_partial_frame(struct macb *bp, unsigned int begin,
  785 				  unsigned int end)
  786 {
  787 	unsigned int frag;
  788 
  789 	for (frag = begin; frag != end; frag++) {
  790 		struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
  791 
  792 		desc->addr &= ~MACB_BIT(RX_USED);
  793 	}
  794 
  795 	/* Make descriptor updates visible to hardware */
  796 	wmb();
  797 
  798 	/* When this happens, the hardware stats registers for
  799 	 * whatever caused this is updated, so we don't have to record
  800 	 * anything.
  801 	 */
  802 }
  803 
  804 static int gem_rx(struct macb *bp, int budget)
  805 {
  806 	unsigned int		len;
  807 	unsigned int		entry;
  808 	struct sk_buff		*skb;
  809 	struct macb_dma_desc	*desc;
  810 	int			count = 0;
  811 
  812 	while (count < budget) {
  813 		u32 ctrl;
  814 		dma_addr_t addr;
  815 		bool rxused;
  816 
  817 		entry = macb_rx_ring_wrap(bp->rx_tail);
  818 		desc = &bp->rx_ring[entry];
  819 
  820 		/* Make hw descriptor updates visible to CPU */
  821 		rmb();
  822 
  823 		rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
  824 		addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
  825 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  826 		addr |= ((u64)(desc->addrh) << 32);
  827 #endif
  828 		ctrl = desc->ctrl;
  829 
  830 		if (!rxused)
  831 			break;
  832 
  833 		bp->rx_tail++;
  834 		count++;
  835 
  836 		if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
  837 			netdev_err(bp->dev,
  838 				   "not whole frame pointed by descriptor\n");
  839 			bp->stats.rx_dropped++;
  840 			break;
  841 		}
  842 		skb = bp->rx_skbuff[entry];
  843 		if (unlikely(!skb)) {
  844 			netdev_err(bp->dev,
  845 				   "inconsistent Rx descriptor chain\n");
  846 			bp->stats.rx_dropped++;
  847 			break;
  848 		}
  849 		/* now everything is ready for receiving packet */
  850 		bp->rx_skbuff[entry] = NULL;
  851 		len = ctrl & bp->rx_frm_len_mask;
  852 
  853 		netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
  854 
  855 		skb_put(skb, len);
  856 		dma_unmap_single(&bp->pdev->dev, addr,
  857 				 bp->rx_buffer_size, DMA_FROM_DEVICE);
  858 
  859 		skb->protocol = eth_type_trans(skb, bp->dev);
  860 		skb_checksum_none_assert(skb);
  861 		if (bp->dev->features & NETIF_F_RXCSUM &&
  862 		    !(bp->dev->flags & IFF_PROMISC) &&
  863 		    GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
  864 			skb->ip_summed = CHECKSUM_UNNECESSARY;
  865 
  866 		bp->stats.rx_packets++;
  867 		bp->stats.rx_bytes += skb->len;
  868 
  869 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
  870 		netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
  871 			    skb->len, skb->csum);
  872 		print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
  873 			       skb_mac_header(skb), 16, true);
  874 		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
  875 			       skb->data, 32, true);
  876 #endif
  877 
  878 		netif_receive_skb(skb);
  879 	}
  880 
  881 	gem_rx_refill(bp);
  882 
  883 	return count;
  884 }
  885 
  886 static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
  887 			 unsigned int last_frag)
  888 {
  889 	unsigned int len;
  890 	unsigned int frag;
  891 	unsigned int offset;
  892 	struct sk_buff *skb;
  893 	struct macb_dma_desc *desc;
  894 
  895 	desc = macb_rx_desc(bp, last_frag);
  896 	len = desc->ctrl & bp->rx_frm_len_mask;
  897 
  898 	netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
  899 		    macb_rx_ring_wrap(first_frag),
  900 		    macb_rx_ring_wrap(last_frag), len);
  901 
  902 	/* The ethernet header starts NET_IP_ALIGN bytes into the
  903 	 * first buffer. Since the header is 14 bytes, this makes the
  904 	 * payload word-aligned.
  905 	 *
  906 	 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
  907 	 * the two padding bytes into the skb so that we avoid hitting
  908 	 * the slowpath in memcpy(), and pull them off afterwards.
  909 	 */
  910 	skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
  911 	if (!skb) {
  912 		bp->stats.rx_dropped++;
  913 		for (frag = first_frag; ; frag++) {
  914 			desc = macb_rx_desc(bp, frag);
  915 			desc->addr &= ~MACB_BIT(RX_USED);
  916 			if (frag == last_frag)
  917 				break;
  918 		}
  919 
  920 		/* Make descriptor updates visible to hardware */
  921 		wmb();
  922 
  923 		return 1;
  924 	}
  925 
  926 	offset = 0;
  927 	len += NET_IP_ALIGN;
  928 	skb_checksum_none_assert(skb);
  929 	skb_put(skb, len);
  930 
  931 	for (frag = first_frag; ; frag++) {
  932 		unsigned int frag_len = bp->rx_buffer_size;
  933 
  934 		if (offset + frag_len > len) {
  935 			if (unlikely(frag != last_frag)) {
  936 				dev_kfree_skb_any(skb);
  937 				return -1;
  938 			}
  939 			frag_len = len - offset;
  940 		}
  941 		skb_copy_to_linear_data_offset(skb, offset,
  942 					       macb_rx_buffer(bp, frag),
  943 					       frag_len);
  944 		offset += bp->rx_buffer_size;
  945 		desc = macb_rx_desc(bp, frag);
  946 		desc->addr &= ~MACB_BIT(RX_USED);
  947 
  948 		if (frag == last_frag)
  949 			break;
  950 	}
  951 
  952 	/* Make descriptor updates visible to hardware */
  953 	wmb();
  954 
  955 	__skb_pull(skb, NET_IP_ALIGN);
  956 	skb->protocol = eth_type_trans(skb, bp->dev);
  957 
  958 	bp->stats.rx_packets++;
  959 	bp->stats.rx_bytes += skb->len;
  960 	netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
  961 		    skb->len, skb->csum);
  962 	netif_receive_skb(skb);
  963 
  964 	return 0;
  965 }
  966 
  967 static inline void macb_init_rx_ring(struct macb *bp)
  968 {
  969 	dma_addr_t addr;
  970 	int i;
  971 
  972 	addr = bp->rx_buffers_dma;
  973 	for (i = 0; i < RX_RING_SIZE; i++) {
  974 		bp->rx_ring[i].addr = addr;
  975 		bp->rx_ring[i].ctrl = 0;
  976 		addr += bp->rx_buffer_size;
  977 	}
  978 	bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
  979 }
  980 
  981 static int macb_rx(struct macb *bp, int budget)
  982 {
  983 	bool reset_rx_queue = false;
  984 	int received = 0;
  985 	unsigned int tail;
  986 	int first_frag = -1;
  987 
  988 	for (tail = bp->rx_tail; budget > 0; tail++) {
  989 		struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
  990 		u32 addr, ctrl;
  991 
  992 		/* Make hw descriptor updates visible to CPU */
  993 		rmb();
  994 
  995 		addr = desc->addr;
  996 		ctrl = desc->ctrl;
  997 
  998 		if (!(addr & MACB_BIT(RX_USED)))
  999 			break;
 1000 
 1001 		if (ctrl & MACB_BIT(RX_SOF)) {
 1002 			if (first_frag != -1)
 1003 				discard_partial_frame(bp, first_frag, tail);
 1004 			first_frag = tail;
 1005 		}
 1006 
 1007 		if (ctrl & MACB_BIT(RX_EOF)) {
 1008 			int dropped;
 1009 
 1010 			if (unlikely(first_frag == -1)) {
 1011 				reset_rx_queue = true;
 1012 				continue;
 1013 			}
 1014 
 1015 			dropped = macb_rx_frame(bp, first_frag, tail);
 1016 			first_frag = -1;
 1017 			if (unlikely(dropped < 0)) {
 1018 				reset_rx_queue = true;
 1019 				continue;
 1020 			}
 1021 			if (!dropped) {
 1022 				received++;
 1023 				budget--;
 1024 			}
 1025 		}
 1026 	}
 1027 
 1028 	if (unlikely(reset_rx_queue)) {
 1029 		unsigned long flags;
 1030 		u32 ctrl;
 1031 
 1032 		netdev_err(bp->dev, "RX queue corruption: reset it\n");
 1033 
 1034 		spin_lock_irqsave(&bp->lock, flags);
 1035 
 1036 		ctrl = macb_readl(bp, NCR);
 1037 		macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
 1038 
 1039 		macb_init_rx_ring(bp);
 1040 		macb_writel(bp, RBQP, bp->rx_ring_dma);
 1041 
 1042 		macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
 1043 
 1044 		spin_unlock_irqrestore(&bp->lock, flags);
 1045 		return received;
 1046 	}
 1047 
 1048 	if (first_frag != -1)
 1049 		bp->rx_tail = first_frag;
 1050 	else
 1051 		bp->rx_tail = tail;
 1052 
 1053 	return received;
 1054 }
 1055 
 1056 static int macb_poll(struct napi_struct *napi, int budget)
 1057 {
 1058 	struct macb *bp = container_of(napi, struct macb, napi);
 1059 	int work_done;
 1060 	u32 status;
 1061 
 1062 	status = macb_readl(bp, RSR);
 1063 	macb_writel(bp, RSR, status);
 1064 
 1065 	work_done = 0;
 1066 
 1067 	netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
 1068 		    (unsigned long)status, budget);
 1069 
 1070 	work_done = bp->macbgem_ops.mog_rx(bp, budget);
 1071 	if (work_done < budget) {
 1072 		napi_complete(napi);
 1073 
 1074 		/* Packets received while interrupts were disabled */
 1075 		status = macb_readl(bp, RSR);
 1076 		if (status) {
 1077 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 1078 				macb_writel(bp, ISR, MACB_BIT(RCOMP));
 1079 			napi_reschedule(napi);
 1080 		} else {
 1081 			macb_writel(bp, IER, MACB_RX_INT_FLAGS);
 1082 		}
 1083 	}
 1084 
 1085 	/* TODO: Handle errors */
 1086 
 1087 	return work_done;
 1088 }
 1089 
 1090 static irqreturn_t macb_interrupt(int irq, void *dev_id)
 1091 {
 1092 	struct macb_queue *queue = dev_id;
 1093 	struct macb *bp = queue->bp;
 1094 	struct net_device *dev = bp->dev;
 1095 	u32 status, ctrl;
 1096 
 1097 	status = queue_readl(queue, ISR);
 1098 
 1099 	if (unlikely(!status))
 1100 		return IRQ_NONE;
 1101 
 1102 	spin_lock(&bp->lock);
 1103 
 1104 	while (status) {
 1105 		/* close possible race with dev_close */
 1106 		if (unlikely(!netif_running(dev))) {
 1107 			queue_writel(queue, IDR, -1);
 1108 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 1109 				queue_writel(queue, ISR, -1);
 1110 			break;
 1111 		}
 1112 
 1113 		netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
 1114 			    (unsigned int)(queue - bp->queues),
 1115 			    (unsigned long)status);
 1116 
 1117 		if (status & MACB_RX_INT_FLAGS) {
 1118 			/* There's no point taking any more interrupts
 1119 			 * until we have processed the buffers. The
 1120 			 * scheduling call may fail if the poll routine
 1121 			 * is already scheduled, so disable interrupts
 1122 			 * now.
 1123 			 */
 1124 			queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
 1125 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 1126 				queue_writel(queue, ISR, MACB_BIT(RCOMP));
 1127 
 1128 			if (napi_schedule_prep(&bp->napi)) {
 1129 				netdev_vdbg(bp->dev, "scheduling RX softirq\n");
 1130 				__napi_schedule(&bp->napi);
 1131 			}
 1132 		}
 1133 
 1134 		if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
 1135 			queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
 1136 			schedule_work(&queue->tx_error_task);
 1137 
 1138 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 1139 				queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
 1140 
 1141 			break;
 1142 		}
 1143 
 1144 		if (status & MACB_BIT(TCOMP))
 1145 			macb_tx_interrupt(queue);
 1146 
 1147 		/* Link change detection isn't possible with RMII, so we'll
 1148 		 * add that if/when we get our hands on a full-blown MII PHY.
 1149 		 */
 1150 
 1151 		/* There is a hardware issue under heavy load where DMA can
 1152 		 * stop, this causes endless "used buffer descriptor read"
 1153 		 * interrupts but it can be cleared by re-enabling RX. See
 1154 		 * the at91 manual, section 41.3.1 or the Zynq manual
 1155 		 * section 16.7.4 for details.
 1156 		 */
 1157 		if (status & MACB_BIT(RXUBR)) {
 1158 			ctrl = macb_readl(bp, NCR);
 1159 			macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
 1160 			macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
 1161 
 1162 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 1163 				queue_writel(queue, ISR, MACB_BIT(RXUBR));
 1164 		}
 1165 
 1166 		if (status & MACB_BIT(ISR_ROVR)) {
 1167 			/* We missed at least one packet */
 1168 			if (macb_is_gem(bp))
 1169 				bp->hw_stats.gem.rx_overruns++;
 1170 			else
 1171 				bp->hw_stats.macb.rx_overruns++;
 1172 
 1173 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 1174 				queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
 1175 		}
 1176 
 1177 		if (status & MACB_BIT(HRESP)) {
 1178 			/* TODO: Reset the hardware, and maybe move the
 1179 			 * netdev_err to a lower-priority context as well
 1180 			 * (work queue?)
 1181 			 */
 1182 			netdev_err(dev, "DMA bus error: HRESP not OK\n");
 1183 
 1184 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 1185 				queue_writel(queue, ISR, MACB_BIT(HRESP));
 1186 		}
 1187 
 1188 		status = queue_readl(queue, ISR);
 1189 	}
 1190 
 1191 	spin_unlock(&bp->lock);
 1192 
 1193 	return IRQ_HANDLED;
 1194 }
 1195 
 1196 #ifdef CONFIG_NET_POLL_CONTROLLER
 1197 /* Polling receive - used by netconsole and other diagnostic tools
 1198  * to allow network i/o with interrupts disabled.
 1199  */
 1200 static void macb_poll_controller(struct net_device *dev)
 1201 {
 1202 	struct macb *bp = netdev_priv(dev);
 1203 	struct macb_queue *queue;
 1204 	unsigned long flags;
 1205 	unsigned int q;
 1206 
 1207 	local_irq_save(flags);
 1208 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
 1209 		macb_interrupt(dev->irq, queue);
 1210 	local_irq_restore(flags);
 1211 }
 1212 #endif
 1213 
 1214 static unsigned int macb_tx_map(struct macb *bp,
 1215 				struct macb_queue *queue,
 1216 				struct sk_buff *skb)
 1217 {
 1218 	dma_addr_t mapping;
 1219 	unsigned int len, entry, i, tx_head = queue->tx_head;
 1220 	struct macb_tx_skb *tx_skb = NULL;
 1221 	struct macb_dma_desc *desc;
 1222 	unsigned int offset, size, count = 0;
 1223 	unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
 1224 	unsigned int eof = 1;
 1225 	u32 ctrl;
 1226 
 1227 	/* First, map non-paged data */
 1228 	len = skb_headlen(skb);
 1229 	offset = 0;
 1230 	while (len) {
 1231 		size = min(len, bp->max_tx_length);
 1232 		entry = macb_tx_ring_wrap(tx_head);
 1233 		tx_skb = &queue->tx_skb[entry];
 1234 
 1235 		mapping = dma_map_single(&bp->pdev->dev,
 1236 					 skb->data + offset,
 1237 					 size, DMA_TO_DEVICE);
 1238 		if (dma_mapping_error(&bp->pdev->dev, mapping))
 1239 			goto dma_error;
 1240 
 1241 		/* Save info to properly release resources */
 1242 		tx_skb->skb = NULL;
 1243 		tx_skb->mapping = mapping;
 1244 		tx_skb->size = size;
 1245 		tx_skb->mapped_as_page = false;
 1246 
 1247 		len -= size;
 1248 		offset += size;
 1249 		count++;
 1250 		tx_head++;
 1251 	}
 1252 
 1253 	/* Then, map paged data from fragments */
 1254 	for (f = 0; f < nr_frags; f++) {
 1255 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
 1256 
 1257 		len = skb_frag_size(frag);
 1258 		offset = 0;
 1259 		while (len) {
 1260 			size = min(len, bp->max_tx_length);
 1261 			entry = macb_tx_ring_wrap(tx_head);
 1262 			tx_skb = &queue->tx_skb[entry];
 1263 
 1264 			mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
 1265 						   offset, size, DMA_TO_DEVICE);
 1266 			if (dma_mapping_error(&bp->pdev->dev, mapping))
 1267 				goto dma_error;
 1268 
 1269 			/* Save info to properly release resources */
 1270 			tx_skb->skb = NULL;
 1271 			tx_skb->mapping = mapping;
 1272 			tx_skb->size = size;
 1273 			tx_skb->mapped_as_page = true;
 1274 
 1275 			len -= size;
 1276 			offset += size;
 1277 			count++;
 1278 			tx_head++;
 1279 		}
 1280 	}
 1281 
 1282 	/* Should never happen */
 1283 	if (unlikely(!tx_skb)) {
 1284 		netdev_err(bp->dev, "BUG! empty skb!\n");
 1285 		return 0;
 1286 	}
 1287 
 1288 	/* This is the last buffer of the frame: save socket buffer */
 1289 	tx_skb->skb = skb;
 1290 
 1291 	/* Update TX ring: update buffer descriptors in reverse order
 1292 	 * to avoid race condition
 1293 	 */
 1294 
 1295 	/* Set 'TX_USED' bit in buffer descriptor at tx_head position
 1296 	 * to set the end of TX queue
 1297 	 */
 1298 	i = tx_head;
 1299 	entry = macb_tx_ring_wrap(i);
 1300 	ctrl = MACB_BIT(TX_USED);
 1301 	desc = &queue->tx_ring[entry];
 1302 	desc->ctrl = ctrl;
 1303 
 1304 	do {
 1305 		i--;
 1306 		entry = macb_tx_ring_wrap(i);
 1307 		tx_skb = &queue->tx_skb[entry];
 1308 		desc = &queue->tx_ring[entry];
 1309 
 1310 		ctrl = (u32)tx_skb->size;
 1311 		if (eof) {
 1312 			ctrl |= MACB_BIT(TX_LAST);
 1313 			eof = 0;
 1314 		}
 1315 		if (unlikely(entry == (TX_RING_SIZE - 1)))
 1316 			ctrl |= MACB_BIT(TX_WRAP);
 1317 
 1318 		/* Set TX buffer descriptor */
 1319 		macb_set_addr(desc, tx_skb->mapping);
 1320 		/* desc->addr must be visible to hardware before clearing
 1321 		 * 'TX_USED' bit in desc->ctrl.
 1322 		 */
 1323 		wmb();
 1324 		desc->ctrl = ctrl;
 1325 	} while (i != queue->tx_head);
 1326 
 1327 	queue->tx_head = tx_head;
 1328 
 1329 	return count;
 1330 
 1331 dma_error:
 1332 	netdev_err(bp->dev, "TX DMA map failed\n");
 1333 
 1334 	for (i = queue->tx_head; i != tx_head; i++) {
 1335 		tx_skb = macb_tx_skb(queue, i);
 1336 
 1337 		macb_tx_unmap(bp, tx_skb);
 1338 	}
 1339 
 1340 	return 0;
 1341 }
 1342 
 1343 static inline int macb_clear_csum(struct sk_buff *skb)
 1344 {
 1345 	/* no change for packets without checksum offloading */
 1346 	if (skb->ip_summed != CHECKSUM_PARTIAL)
 1347 		return 0;
 1348 
 1349 	/* make sure we can modify the header */
 1350 	if (unlikely(skb_cow_head(skb, 0)))
 1351 		return -1;
 1352 
 1353 	/* initialize checksum field
 1354 	 * This is required - at least for Zynq, which otherwise calculates
 1355 	 * wrong UDP header checksums for UDP packets with UDP data len <=2
 1356 	 */
 1357 	*(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
 1358 	return 0;
 1359 }
 1360 
 1361 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
 1362 {
 1363 	u16 queue_index = skb_get_queue_mapping(skb);
 1364 	struct macb *bp = netdev_priv(dev);
 1365 	struct macb_queue *queue = &bp->queues[queue_index];
 1366 	unsigned long flags;
 1367 	unsigned int count, nr_frags, frag_size, f;
 1368 
 1369 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
 1370 	netdev_vdbg(bp->dev,
 1371 		    "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
 1372 		    queue_index, skb->len, skb->head, skb->data,
 1373 		    skb_tail_pointer(skb), skb_end_pointer(skb));
 1374 	print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
 1375 		       skb->data, 16, true);
 1376 #endif
 1377 
 1378 	/* Count how many TX buffer descriptors are needed to send this
 1379 	 * socket buffer: skb fragments of jumbo frames may need to be
 1380 	 * split into many buffer descriptors.
 1381 	 */
 1382 	count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
 1383 	nr_frags = skb_shinfo(skb)->nr_frags;
 1384 	for (f = 0; f < nr_frags; f++) {
 1385 		frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
 1386 		count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
 1387 	}
 1388 
 1389 	spin_lock_irqsave(&bp->lock, flags);
 1390 
 1391 	/* This is a hard error, log it. */
 1392 	if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) {
 1393 		netif_stop_subqueue(dev, queue_index);
 1394 		spin_unlock_irqrestore(&bp->lock, flags);
 1395 		netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
 1396 			   queue->tx_head, queue->tx_tail);
 1397 		return NETDEV_TX_BUSY;
 1398 	}
 1399 
 1400 	if (macb_clear_csum(skb)) {
 1401 		dev_kfree_skb_any(skb);
 1402 		goto unlock;
 1403 	}
 1404 
 1405 	/* Map socket buffer for DMA transfer */
 1406 	if (!macb_tx_map(bp, queue, skb)) {
 1407 		dev_kfree_skb_any(skb);
 1408 		goto unlock;
 1409 	}
 1410 
 1411 	/* Make newly initialized descriptor visible to hardware */
 1412 	wmb();
 1413 
 1414 	skb_tx_timestamp(skb);
 1415 
 1416 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
 1417 
 1418 	if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1)
 1419 		netif_stop_subqueue(dev, queue_index);
 1420 
 1421 unlock:
 1422 	spin_unlock_irqrestore(&bp->lock, flags);
 1423 
 1424 	return NETDEV_TX_OK;
 1425 }
 1426 
 1427 static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
 1428 {
 1429 	if (!macb_is_gem(bp)) {
 1430 		bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
 1431 	} else {
 1432 		bp->rx_buffer_size = size;
 1433 
 1434 		if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
 1435 			netdev_dbg(bp->dev,
 1436 				   "RX buffer must be multiple of %d bytes, expanding\n",
 1437 				   RX_BUFFER_MULTIPLE);
 1438 			bp->rx_buffer_size =
 1439 				roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
 1440 		}
 1441 	}
 1442 
 1443 	netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
 1444 		   bp->dev->mtu, bp->rx_buffer_size);
 1445 }
 1446 
 1447 static void gem_free_rx_buffers(struct macb *bp)
 1448 {
 1449 	struct sk_buff		*skb;
 1450 	struct macb_dma_desc	*desc;
 1451 	dma_addr_t		addr;
 1452 	int i;
 1453 
 1454 	if (!bp->rx_skbuff)
 1455 		return;
 1456 
 1457 	for (i = 0; i < RX_RING_SIZE; i++) {
 1458 		skb = bp->rx_skbuff[i];
 1459 
 1460 		if (!skb)
 1461 			continue;
 1462 
 1463 		desc = &bp->rx_ring[i];
 1464 		addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
 1465 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 1466 		addr |= ((u64)(desc->addrh) << 32);
 1467 #endif
 1468 		dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
 1469 				 DMA_FROM_DEVICE);
 1470 		dev_kfree_skb_any(skb);
 1471 		skb = NULL;
 1472 	}
 1473 
 1474 	kfree(bp->rx_skbuff);
 1475 	bp->rx_skbuff = NULL;
 1476 }
 1477 
 1478 static void macb_free_rx_buffers(struct macb *bp)
 1479 {
 1480 	if (bp->rx_buffers) {
 1481 		dma_free_coherent(&bp->pdev->dev,
 1482 				  RX_RING_SIZE * bp->rx_buffer_size,
 1483 				  bp->rx_buffers, bp->rx_buffers_dma);
 1484 		bp->rx_buffers = NULL;
 1485 	}
 1486 }
 1487 
 1488 static void macb_free_consistent(struct macb *bp)
 1489 {
 1490 	struct macb_queue *queue;
 1491 	unsigned int q;
 1492 
 1493 	bp->macbgem_ops.mog_free_rx_buffers(bp);
 1494 	if (bp->rx_ring) {
 1495 		dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
 1496 				  bp->rx_ring, bp->rx_ring_dma);
 1497 		bp->rx_ring = NULL;
 1498 	}
 1499 
 1500 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 1501 		kfree(queue->tx_skb);
 1502 		queue->tx_skb = NULL;
 1503 		if (queue->tx_ring) {
 1504 			dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
 1505 					  queue->tx_ring, queue->tx_ring_dma);
 1506 			queue->tx_ring = NULL;
 1507 		}
 1508 	}
 1509 }
 1510 
 1511 static int gem_alloc_rx_buffers(struct macb *bp)
 1512 {
 1513 	int size;
 1514 
 1515 	size = RX_RING_SIZE * sizeof(struct sk_buff *);
 1516 	bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
 1517 	if (!bp->rx_skbuff)
 1518 		return -ENOMEM;
 1519 
 1520 	netdev_dbg(bp->dev,
 1521 		   "Allocated %d RX struct sk_buff entries at %p\n",
 1522 		   RX_RING_SIZE, bp->rx_skbuff);
 1523 	return 0;
 1524 }
 1525 
 1526 static int macb_alloc_rx_buffers(struct macb *bp)
 1527 {
 1528 	int size;
 1529 
 1530 	size = RX_RING_SIZE * bp->rx_buffer_size;
 1531 	bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
 1532 					    &bp->rx_buffers_dma, GFP_KERNEL);
 1533 	if (!bp->rx_buffers)
 1534 		return -ENOMEM;
 1535 
 1536 	netdev_dbg(bp->dev,
 1537 		   "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
 1538 		   size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
 1539 	return 0;
 1540 }
 1541 
 1542 static int macb_alloc_consistent(struct macb *bp)
 1543 {
 1544 	struct macb_queue *queue;
 1545 	unsigned int q;
 1546 	int size;
 1547 
 1548 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 1549 		size = TX_RING_BYTES;
 1550 		queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
 1551 						    &queue->tx_ring_dma,
 1552 						    GFP_KERNEL);
 1553 		if (!queue->tx_ring)
 1554 			goto out_err;
 1555 		netdev_dbg(bp->dev,
 1556 			   "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
 1557 			   q, size, (unsigned long)queue->tx_ring_dma,
 1558 			   queue->tx_ring);
 1559 
 1560 		size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
 1561 		queue->tx_skb = kmalloc(size, GFP_KERNEL);
 1562 		if (!queue->tx_skb)
 1563 			goto out_err;
 1564 	}
 1565 
 1566 	size = RX_RING_BYTES;
 1567 	bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
 1568 					 &bp->rx_ring_dma, GFP_KERNEL);
 1569 	if (!bp->rx_ring)
 1570 		goto out_err;
 1571 	netdev_dbg(bp->dev,
 1572 		   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
 1573 		   size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
 1574 
 1575 	if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
 1576 		goto out_err;
 1577 
 1578 	return 0;
 1579 
 1580 out_err:
 1581 	macb_free_consistent(bp);
 1582 	return -ENOMEM;
 1583 }
 1584 
 1585 static void gem_init_rings(struct macb *bp)
 1586 {
 1587 	struct macb_queue *queue;
 1588 	unsigned int q;
 1589 	int i;
 1590 
 1591 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 1592 		for (i = 0; i < TX_RING_SIZE; i++) {
 1593 			macb_set_addr(&(queue->tx_ring[i]), 0);
 1594 			queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
 1595 		}
 1596 		queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
 1597 		queue->tx_head = 0;
 1598 		queue->tx_tail = 0;
 1599 	}
 1600 
 1601 	bp->rx_tail = 0;
 1602 	bp->rx_prepared_head = 0;
 1603 
 1604 	gem_rx_refill(bp);
 1605 }
 1606 
 1607 static void macb_init_rings(struct macb *bp)
 1608 {
 1609 	int i;
 1610 
 1611 	macb_init_rx_ring(bp);
 1612 
 1613 	for (i = 0; i < TX_RING_SIZE; i++) {
 1614 		bp->queues[0].tx_ring[i].addr = 0;
 1615 		bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
 1616 	}
 1617 	bp->queues[0].tx_head = 0;
 1618 	bp->queues[0].tx_tail = 0;
 1619 	bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
 1620 
 1621 	bp->rx_tail = 0;
 1622 }
 1623 
 1624 static void macb_reset_hw(struct macb *bp)
 1625 {
 1626 	struct macb_queue *queue;
 1627 	unsigned int q;
 1628 
 1629 	/* Disable RX and TX (XXX: Should we halt the transmission
 1630 	 * more gracefully?)
 1631 	 */
 1632 	macb_writel(bp, NCR, 0);
 1633 
 1634 	/* Clear the stats registers (XXX: Update stats first?) */
 1635 	macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
 1636 
 1637 	/* Clear all status flags */
 1638 	macb_writel(bp, TSR, -1);
 1639 	macb_writel(bp, RSR, -1);
 1640 
 1641 	/* Disable all interrupts */
 1642 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 1643 		queue_writel(queue, IDR, -1);
 1644 		queue_readl(queue, ISR);
 1645 		if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 1646 			queue_writel(queue, ISR, -1);
 1647 	}
 1648 }
 1649 
 1650 static u32 gem_mdc_clk_div(struct macb *bp)
 1651 {
 1652 	u32 config;
 1653 	unsigned long pclk_hz = clk_get_rate(bp->pclk);
 1654 
 1655 	if (pclk_hz <= 20000000)
 1656 		config = GEM_BF(CLK, GEM_CLK_DIV8);
 1657 	else if (pclk_hz <= 40000000)
 1658 		config = GEM_BF(CLK, GEM_CLK_DIV16);
 1659 	else if (pclk_hz <= 80000000)
 1660 		config = GEM_BF(CLK, GEM_CLK_DIV32);
 1661 	else if (pclk_hz <= 120000000)
 1662 		config = GEM_BF(CLK, GEM_CLK_DIV48);
 1663 	else if (pclk_hz <= 160000000)
 1664 		config = GEM_BF(CLK, GEM_CLK_DIV64);
 1665 	else
 1666 		config = GEM_BF(CLK, GEM_CLK_DIV96);
 1667 
 1668 	return config;
 1669 }
 1670 
 1671 static u32 macb_mdc_clk_div(struct macb *bp)
 1672 {
 1673 	u32 config;
 1674 	unsigned long pclk_hz;
 1675 
 1676 	if (macb_is_gem(bp))
 1677 		return gem_mdc_clk_div(bp);
 1678 
 1679 	pclk_hz = clk_get_rate(bp->pclk);
 1680 	if (pclk_hz <= 20000000)
 1681 		config = MACB_BF(CLK, MACB_CLK_DIV8);
 1682 	else if (pclk_hz <= 40000000)
 1683 		config = MACB_BF(CLK, MACB_CLK_DIV16);
 1684 	else if (pclk_hz <= 80000000)
 1685 		config = MACB_BF(CLK, MACB_CLK_DIV32);
 1686 	else
 1687 		config = MACB_BF(CLK, MACB_CLK_DIV64);
 1688 
 1689 	return config;
 1690 }
 1691 
 1692 /* Get the DMA bus width field of the network configuration register that we
 1693  * should program.  We find the width from decoding the design configuration
 1694  * register to find the maximum supported data bus width.
 1695  */
 1696 static u32 macb_dbw(struct macb *bp)
 1697 {
 1698 	if (!macb_is_gem(bp))
 1699 		return 0;
 1700 
 1701 	switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
 1702 	case 4:
 1703 		return GEM_BF(DBW, GEM_DBW128);
 1704 	case 2:
 1705 		return GEM_BF(DBW, GEM_DBW64);
 1706 	case 1:
 1707 	default:
 1708 		return GEM_BF(DBW, GEM_DBW32);
 1709 	}
 1710 }
 1711 
 1712 /* Configure the receive DMA engine
 1713  * - use the correct receive buffer size
 1714  * - set best burst length for DMA operations
 1715  *   (if not supported by FIFO, it will fallback to default)
 1716  * - set both rx/tx packet buffers to full memory size
 1717  * These are configurable parameters for GEM.
 1718  */
 1719 static void macb_configure_dma(struct macb *bp)
 1720 {
 1721 	u32 dmacfg;
 1722 
 1723 	if (macb_is_gem(bp)) {
 1724 		dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
 1725 		dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
 1726 		if (bp->dma_burst_length)
 1727 			dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
 1728 		dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
 1729 		dmacfg &= ~GEM_BIT(ENDIA_PKT);
 1730 
 1731 		if (bp->native_io)
 1732 			dmacfg &= ~GEM_BIT(ENDIA_DESC);
 1733 		else
 1734 			dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
 1735 
 1736 		if (bp->dev->features & NETIF_F_HW_CSUM)
 1737 			dmacfg |= GEM_BIT(TXCOEN);
 1738 		else
 1739 			dmacfg &= ~GEM_BIT(TXCOEN);
 1740 
 1741 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 1742 		dmacfg |= GEM_BIT(ADDR64);
 1743 #endif
 1744 		netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
 1745 			   dmacfg);
 1746 		gem_writel(bp, DMACFG, dmacfg);
 1747 	}
 1748 }
 1749 
 1750 static void macb_init_hw(struct macb *bp)
 1751 {
 1752 	struct macb_queue *queue;
 1753 	unsigned int q;
 1754 
 1755 	u32 config;
 1756 
 1757 	macb_reset_hw(bp);
 1758 	macb_set_hwaddr(bp);
 1759 
 1760 	config = macb_mdc_clk_div(bp);
 1761 	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
 1762 		config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
 1763 	config |= MACB_BF(RBOF, NET_IP_ALIGN);	/* Make eth data aligned */
 1764 	config |= MACB_BIT(PAE);		/* PAuse Enable */
 1765 	config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */
 1766 	if (bp->caps & MACB_CAPS_JUMBO)
 1767 		config |= MACB_BIT(JFRAME);	/* Enable jumbo frames */
 1768 	else
 1769 		config |= MACB_BIT(BIG);	/* Receive oversized frames */
 1770 	if (bp->dev->flags & IFF_PROMISC)
 1771 		config |= MACB_BIT(CAF);	/* Copy All Frames */
 1772 	else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
 1773 		config |= GEM_BIT(RXCOEN);
 1774 	if (!(bp->dev->flags & IFF_BROADCAST))
 1775 		config |= MACB_BIT(NBC);	/* No BroadCast */
 1776 	config |= macb_dbw(bp);
 1777 	macb_writel(bp, NCFGR, config);
 1778 	if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
 1779 		gem_writel(bp, JML, bp->jumbo_max_len);
 1780 	bp->speed = SPEED_10;
 1781 	bp->duplex = DUPLEX_HALF;
 1782 	bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
 1783 	if (bp->caps & MACB_CAPS_JUMBO)
 1784 		bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
 1785 
 1786 	macb_configure_dma(bp);
 1787 
 1788 	/* Initialize TX and RX buffers */
 1789 	macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma));
 1790 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 1791 	macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32));
 1792 #endif
 1793 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 1794 		queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
 1795 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 1796 		queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
 1797 #endif
 1798 
 1799 		/* Enable interrupts */
 1800 		queue_writel(queue, IER,
 1801 			     MACB_RX_INT_FLAGS |
 1802 			     MACB_TX_INT_FLAGS |
 1803 			     MACB_BIT(HRESP));
 1804 	}
 1805 
 1806 	/* Enable TX and RX */
 1807 	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
 1808 }
 1809 
 1810 /* The hash address register is 64 bits long and takes up two
 1811  * locations in the memory map.  The least significant bits are stored
 1812  * in EMAC_HSL and the most significant bits in EMAC_HSH.
 1813  *
 1814  * The unicast hash enable and the multicast hash enable bits in the
 1815  * network configuration register enable the reception of hash matched
 1816  * frames. The destination address is reduced to a 6 bit index into
 1817  * the 64 bit hash register using the following hash function.  The
 1818  * hash function is an exclusive or of every sixth bit of the
 1819  * destination address.
 1820  *
 1821  * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
 1822  * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
 1823  * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
 1824  * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
 1825  * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
 1826  * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
 1827  *
 1828  * da[0] represents the least significant bit of the first byte
 1829  * received, that is, the multicast/unicast indicator, and da[47]
 1830  * represents the most significant bit of the last byte received.  If
 1831  * the hash index, hi[n], points to a bit that is set in the hash
 1832  * register then the frame will be matched according to whether the
 1833  * frame is multicast or unicast.  A multicast match will be signalled
 1834  * if the multicast hash enable bit is set, da[0] is 1 and the hash
 1835  * index points to a bit set in the hash register.  A unicast match
 1836  * will be signalled if the unicast hash enable bit is set, da[0] is 0
 1837  * and the hash index points to a bit set in the hash register.  To
 1838  * receive all multicast frames, the hash register should be set with
 1839  * all ones and the multicast hash enable bit should be set in the
 1840  * network configuration register.
 1841  */
 1842 
 1843 static inline int hash_bit_value(int bitnr, __u8 *addr)
 1844 {
 1845 	if (addr[bitnr / 8] & (1 << (bitnr % 8)))
 1846 		return 1;
 1847 	return 0;
 1848 }
 1849 
 1850 /* Return the hash index value for the specified address. */
 1851 static int hash_get_index(__u8 *addr)
 1852 {
 1853 	int i, j, bitval;
 1854 	int hash_index = 0;
 1855 
 1856 	for (j = 0; j < 6; j++) {
 1857 		for (i = 0, bitval = 0; i < 8; i++)
 1858 			bitval ^= hash_bit_value(i * 6 + j, addr);
 1859 
 1860 		hash_index |= (bitval << j);
 1861 	}
 1862 
 1863 	return hash_index;
 1864 }
 1865 
 1866 /* Add multicast addresses to the internal multicast-hash table. */
 1867 static void macb_sethashtable(struct net_device *dev)
 1868 {
 1869 	struct netdev_hw_addr *ha;
 1870 	unsigned long mc_filter[2];
 1871 	unsigned int bitnr;
 1872 	struct macb *bp = netdev_priv(dev);
 1873 
 1874 	mc_filter[0] = 0;
 1875 	mc_filter[1] = 0;
 1876 
 1877 	netdev_for_each_mc_addr(ha, dev) {
 1878 		bitnr = hash_get_index(ha->addr);
 1879 		mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
 1880 	}
 1881 
 1882 	macb_or_gem_writel(bp, HRB, mc_filter[0]);
 1883 	macb_or_gem_writel(bp, HRT, mc_filter[1]);
 1884 }
 1885 
 1886 /* Enable/Disable promiscuous and multicast modes. */
 1887 static void macb_set_rx_mode(struct net_device *dev)
 1888 {
 1889 	unsigned long cfg;
 1890 	struct macb *bp = netdev_priv(dev);
 1891 
 1892 	cfg = macb_readl(bp, NCFGR);
 1893 
 1894 	if (dev->flags & IFF_PROMISC) {
 1895 		/* Enable promiscuous mode */
 1896 		cfg |= MACB_BIT(CAF);
 1897 
 1898 		/* Disable RX checksum offload */
 1899 		if (macb_is_gem(bp))
 1900 			cfg &= ~GEM_BIT(RXCOEN);
 1901 	} else {
 1902 		/* Disable promiscuous mode */
 1903 		cfg &= ~MACB_BIT(CAF);
 1904 
 1905 		/* Enable RX checksum offload only if requested */
 1906 		if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
 1907 			cfg |= GEM_BIT(RXCOEN);
 1908 	}
 1909 
 1910 	if (dev->flags & IFF_ALLMULTI) {
 1911 		/* Enable all multicast mode */
 1912 		macb_or_gem_writel(bp, HRB, -1);
 1913 		macb_or_gem_writel(bp, HRT, -1);
 1914 		cfg |= MACB_BIT(NCFGR_MTI);
 1915 	} else if (!netdev_mc_empty(dev)) {
 1916 		/* Enable specific multicasts */
 1917 		macb_sethashtable(dev);
 1918 		cfg |= MACB_BIT(NCFGR_MTI);
 1919 	} else if (dev->flags & (~IFF_ALLMULTI)) {
 1920 		/* Disable all multicast mode */
 1921 		macb_or_gem_writel(bp, HRB, 0);
 1922 		macb_or_gem_writel(bp, HRT, 0);
 1923 		cfg &= ~MACB_BIT(NCFGR_MTI);
 1924 	}
 1925 
 1926 	macb_writel(bp, NCFGR, cfg);
 1927 }
 1928 
 1929 static int macb_open(struct net_device *dev)
 1930 {
 1931 	struct macb *bp = netdev_priv(dev);
 1932 	size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
 1933 	int err;
 1934 
 1935 	netdev_dbg(bp->dev, "open\n");
 1936 
 1937 	/* carrier starts down */
 1938 	netif_carrier_off(dev);
 1939 
 1940 	/* if the phy is not yet register, retry later*/
 1941 	if (!dev->phydev)
 1942 		return -EAGAIN;
 1943 
 1944 	/* RX buffers initialization */
 1945 	macb_init_rx_buffer_size(bp, bufsz);
 1946 
 1947 	err = macb_alloc_consistent(bp);
 1948 	if (err) {
 1949 		netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
 1950 			   err);
 1951 		return err;
 1952 	}
 1953 
 1954 	napi_enable(&bp->napi);
 1955 
 1956 	bp->macbgem_ops.mog_init_rings(bp);
 1957 	macb_init_hw(bp);
 1958 
 1959 	/* schedule a link state check */
 1960 	phy_start(dev->phydev);
 1961 
 1962 	netif_tx_start_all_queues(dev);
 1963 
 1964 	return 0;
 1965 }
 1966 
 1967 static int macb_close(struct net_device *dev)
 1968 {
 1969 	struct macb *bp = netdev_priv(dev);
 1970 	unsigned long flags;
 1971 
 1972 	netif_tx_stop_all_queues(dev);
 1973 	napi_disable(&bp->napi);
 1974 
 1975 	if (dev->phydev)
 1976 		phy_stop(dev->phydev);
 1977 
 1978 	spin_lock_irqsave(&bp->lock, flags);
 1979 	macb_reset_hw(bp);
 1980 	netif_carrier_off(dev);
 1981 	spin_unlock_irqrestore(&bp->lock, flags);
 1982 
 1983 	macb_free_consistent(bp);
 1984 
 1985 	return 0;
 1986 }
 1987 
 1988 static int macb_change_mtu(struct net_device *dev, int new_mtu)
 1989 {
 1990 	struct macb *bp = netdev_priv(dev);
 1991 	u32 max_mtu;
 1992 
 1993 	if (netif_running(dev))
 1994 		return -EBUSY;
 1995 
 1996 	max_mtu = ETH_DATA_LEN;
 1997 	if (bp->caps & MACB_CAPS_JUMBO)
 1998 		max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
 1999 
 2000 	if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE))
 2001 		return -EINVAL;
 2002 
 2003 	dev->mtu = new_mtu;
 2004 
 2005 	return 0;
 2006 }
 2007 
 2008 static void gem_update_stats(struct macb *bp)
 2009 {
 2010 	unsigned int i;
 2011 	u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
 2012 
 2013 	for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
 2014 		u32 offset = gem_statistics[i].offset;
 2015 		u64 val = bp->macb_reg_readl(bp, offset);
 2016 
 2017 		bp->ethtool_stats[i] += val;
 2018 		*p += val;
 2019 
 2020 		if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
 2021 			/* Add GEM_OCTTXH, GEM_OCTRXH */
 2022 			val = bp->macb_reg_readl(bp, offset + 4);
 2023 			bp->ethtool_stats[i] += ((u64)val) << 32;
 2024 			*(++p) += val;
 2025 		}
 2026 	}
 2027 }
 2028 
 2029 static struct net_device_stats *gem_get_stats(struct macb *bp)
 2030 {
 2031 	struct gem_stats *hwstat = &bp->hw_stats.gem;
 2032 	struct net_device_stats *nstat = &bp->stats;
 2033 
 2034 	gem_update_stats(bp);
 2035 
 2036 	nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
 2037 			    hwstat->rx_alignment_errors +
 2038 			    hwstat->rx_resource_errors +
 2039 			    hwstat->rx_overruns +
 2040 			    hwstat->rx_oversize_frames +
 2041 			    hwstat->rx_jabbers +
 2042 			    hwstat->rx_undersized_frames +
 2043 			    hwstat->rx_length_field_frame_errors);
 2044 	nstat->tx_errors = (hwstat->tx_late_collisions +
 2045 			    hwstat->tx_excessive_collisions +
 2046 			    hwstat->tx_underrun +
 2047 			    hwstat->tx_carrier_sense_errors);
 2048 	nstat->multicast = hwstat->rx_multicast_frames;
 2049 	nstat->collisions = (hwstat->tx_single_collision_frames +
 2050 			     hwstat->tx_multiple_collision_frames +
 2051 			     hwstat->tx_excessive_collisions);
 2052 	nstat->rx_length_errors = (hwstat->rx_oversize_frames +
 2053 				   hwstat->rx_jabbers +
 2054 				   hwstat->rx_undersized_frames +
 2055 				   hwstat->rx_length_field_frame_errors);
 2056 	nstat->rx_over_errors = hwstat->rx_resource_errors;
 2057 	nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
 2058 	nstat->rx_frame_errors = hwstat->rx_alignment_errors;
 2059 	nstat->rx_fifo_errors = hwstat->rx_overruns;
 2060 	nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
 2061 	nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
 2062 	nstat->tx_fifo_errors = hwstat->tx_underrun;
 2063 
 2064 	return nstat;
 2065 }
 2066 
 2067 static void gem_get_ethtool_stats(struct net_device *dev,
 2068 				  struct ethtool_stats *stats, u64 *data)
 2069 {
 2070 	struct macb *bp;
 2071 
 2072 	bp = netdev_priv(dev);
 2073 	gem_update_stats(bp);
 2074 	memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
 2075 }
 2076 
 2077 static int gem_get_sset_count(struct net_device *dev, int sset)
 2078 {
 2079 	switch (sset) {
 2080 	case ETH_SS_STATS:
 2081 		return GEM_STATS_LEN;
 2082 	default:
 2083 		return -EOPNOTSUPP;
 2084 	}
 2085 }
 2086 
 2087 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
 2088 {
 2089 	unsigned int i;
 2090 
 2091 	switch (sset) {
 2092 	case ETH_SS_STATS:
 2093 		for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
 2094 			memcpy(p, gem_statistics[i].stat_string,
 2095 			       ETH_GSTRING_LEN);
 2096 		break;
 2097 	}
 2098 }
 2099 
 2100 static struct net_device_stats *macb_get_stats(struct net_device *dev)
 2101 {
 2102 	struct macb *bp = netdev_priv(dev);
 2103 	struct net_device_stats *nstat = &bp->stats;
 2104 	struct macb_stats *hwstat = &bp->hw_stats.macb;
 2105 
 2106 	if (macb_is_gem(bp))
 2107 		return gem_get_stats(bp);
 2108 
 2109 	/* read stats from hardware */
 2110 	macb_update_stats(bp);
 2111 
 2112 	/* Convert HW stats into netdevice stats */
 2113 	nstat->rx_errors = (hwstat->rx_fcs_errors +
 2114 			    hwstat->rx_align_errors +
 2115 			    hwstat->rx_resource_errors +
 2116 			    hwstat->rx_overruns +
 2117 			    hwstat->rx_oversize_pkts +
 2118 			    hwstat->rx_jabbers +
 2119 			    hwstat->rx_undersize_pkts +
 2120 			    hwstat->rx_length_mismatch);
 2121 	nstat->tx_errors = (hwstat->tx_late_cols +
 2122 			    hwstat->tx_excessive_cols +
 2123 			    hwstat->tx_underruns +
 2124 			    hwstat->tx_carrier_errors +
 2125 			    hwstat->sqe_test_errors);
 2126 	nstat->collisions = (hwstat->tx_single_cols +
 2127 			     hwstat->tx_multiple_cols +
 2128 			     hwstat->tx_excessive_cols);
 2129 	nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
 2130 				   hwstat->rx_jabbers +
 2131 				   hwstat->rx_undersize_pkts +
 2132 				   hwstat->rx_length_mismatch);
 2133 	nstat->rx_over_errors = hwstat->rx_resource_errors +
 2134 				   hwstat->rx_overruns;
 2135 	nstat->rx_crc_errors = hwstat->rx_fcs_errors;
 2136 	nstat->rx_frame_errors = hwstat->rx_align_errors;
 2137 	nstat->rx_fifo_errors = hwstat->rx_overruns;
 2138 	/* XXX: What does "missed" mean? */
 2139 	nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
 2140 	nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
 2141 	nstat->tx_fifo_errors = hwstat->tx_underruns;
 2142 	/* Don't know about heartbeat or window errors... */
 2143 
 2144 	return nstat;
 2145 }
 2146 
 2147 static int macb_get_regs_len(struct net_device *netdev)
 2148 {
 2149 	return MACB_GREGS_NBR * sizeof(u32);
 2150 }
 2151 
 2152 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 2153 			  void *p)
 2154 {
 2155 	struct macb *bp = netdev_priv(dev);
 2156 	unsigned int tail, head;
 2157 	u32 *regs_buff = p;
 2158 
 2159 	regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
 2160 			| MACB_GREGS_VERSION;
 2161 
 2162 	tail = macb_tx_ring_wrap(bp->queues[0].tx_tail);
 2163 	head = macb_tx_ring_wrap(bp->queues[0].tx_head);
 2164 
 2165 	regs_buff[0]  = macb_readl(bp, NCR);
 2166 	regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
 2167 	regs_buff[2]  = macb_readl(bp, NSR);
 2168 	regs_buff[3]  = macb_readl(bp, TSR);
 2169 	regs_buff[4]  = macb_readl(bp, RBQP);
 2170 	regs_buff[5]  = macb_readl(bp, TBQP);
 2171 	regs_buff[6]  = macb_readl(bp, RSR);
 2172 	regs_buff[7]  = macb_readl(bp, IMR);
 2173 
 2174 	regs_buff[8]  = tail;
 2175 	regs_buff[9]  = head;
 2176 	regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
 2177 	regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
 2178 
 2179 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
 2180 		regs_buff[12] = macb_or_gem_readl(bp, USRIO);
 2181 	if (macb_is_gem(bp))
 2182 		regs_buff[13] = gem_readl(bp, DMACFG);
 2183 }
 2184 
 2185 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 2186 {
 2187 	struct macb *bp = netdev_priv(netdev);
 2188 
 2189 	wol->supported = 0;
 2190 	wol->wolopts = 0;
 2191 
 2192 	if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
 2193 		wol->supported = WAKE_MAGIC;
 2194 
 2195 		if (bp->wol & MACB_WOL_ENABLED)
 2196 			wol->wolopts |= WAKE_MAGIC;
 2197 	}
 2198 }
 2199 
 2200 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 2201 {
 2202 	struct macb *bp = netdev_priv(netdev);
 2203 
 2204 	if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
 2205 	    (wol->wolopts & ~WAKE_MAGIC))
 2206 		return -EOPNOTSUPP;
 2207 
 2208 	if (wol->wolopts & WAKE_MAGIC)
 2209 		bp->wol |= MACB_WOL_ENABLED;
 2210 	else
 2211 		bp->wol &= ~MACB_WOL_ENABLED;
 2212 
 2213 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
 2214 
 2215 	return 0;
 2216 }
 2217 
 2218 static const struct ethtool_ops macb_ethtool_ops = {
 2219 	.get_regs_len		= macb_get_regs_len,
 2220 	.get_regs		= macb_get_regs,
 2221 	.get_link		= ethtool_op_get_link,
 2222 	.get_ts_info		= ethtool_op_get_ts_info,
 2223 	.get_wol		= macb_get_wol,
 2224 	.set_wol		= macb_set_wol,
 2225 	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
 2226 	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
 2227 };
 2228 
 2229 static const struct ethtool_ops gem_ethtool_ops = {
 2230 	.get_regs_len		= macb_get_regs_len,
 2231 	.get_regs		= macb_get_regs,
 2232 	.get_link		= ethtool_op_get_link,
 2233 	.get_ts_info		= ethtool_op_get_ts_info,
 2234 	.get_ethtool_stats	= gem_get_ethtool_stats,
 2235 	.get_strings		= gem_get_ethtool_strings,
 2236 	.get_sset_count		= gem_get_sset_count,
 2237 	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
 2238 	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
 2239 };
 2240 
 2241 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 2242 {
 2243 	struct phy_device *phydev = dev->phydev;
 2244 
 2245 	if (!netif_running(dev))
 2246 		return -EINVAL;
 2247 
 2248 	if (!phydev)
 2249 		return -ENODEV;
 2250 
 2251 	return phy_mii_ioctl(phydev, rq, cmd);
 2252 }
 2253 
 2254 static int macb_set_features(struct net_device *netdev,
 2255 			     netdev_features_t features)
 2256 {
 2257 	struct macb *bp = netdev_priv(netdev);
 2258 	netdev_features_t changed = features ^ netdev->features;
 2259 
 2260 	/* TX checksum offload */
 2261 	if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
 2262 		u32 dmacfg;
 2263 
 2264 		dmacfg = gem_readl(bp, DMACFG);
 2265 		if (features & NETIF_F_HW_CSUM)
 2266 			dmacfg |= GEM_BIT(TXCOEN);
 2267 		else
 2268 			dmacfg &= ~GEM_BIT(TXCOEN);
 2269 		gem_writel(bp, DMACFG, dmacfg);
 2270 	}
 2271 
 2272 	/* RX checksum offload */
 2273 	if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
 2274 		u32 netcfg;
 2275 
 2276 		netcfg = gem_readl(bp, NCFGR);
 2277 		if (features & NETIF_F_RXCSUM &&
 2278 		    !(netdev->flags & IFF_PROMISC))
 2279 			netcfg |= GEM_BIT(RXCOEN);
 2280 		else
 2281 			netcfg &= ~GEM_BIT(RXCOEN);
 2282 		gem_writel(bp, NCFGR, netcfg);
 2283 	}
 2284 
 2285 	return 0;
 2286 }
 2287 
 2288 static const struct net_device_ops macb_netdev_ops = {
 2289 	.ndo_open		= macb_open,
 2290 	.ndo_stop		= macb_close,
 2291 	.ndo_start_xmit		= macb_start_xmit,
 2292 	.ndo_set_rx_mode	= macb_set_rx_mode,
 2293 	.ndo_get_stats		= macb_get_stats,
 2294 	.ndo_do_ioctl		= macb_ioctl,
 2295 	.ndo_validate_addr	= eth_validate_addr,
 2296 	.ndo_change_mtu		= macb_change_mtu,
 2297 	.ndo_set_mac_address	= eth_mac_addr,
 2298 #ifdef CONFIG_NET_POLL_CONTROLLER
 2299 	.ndo_poll_controller	= macb_poll_controller,
 2300 #endif
 2301 	.ndo_set_features	= macb_set_features,
 2302 };
 2303 
 2304 /* Configure peripheral capabilities according to device tree
 2305  * and integration options used
 2306  */
 2307 static void macb_configure_caps(struct macb *bp,
 2308 				const struct macb_config *dt_conf)
 2309 {
 2310 	u32 dcfg;
 2311 
 2312 	if (dt_conf)
 2313 		bp->caps = dt_conf->caps;
 2314 
 2315 	if (hw_is_gem(bp->regs, bp->native_io)) {
 2316 		bp->caps |= MACB_CAPS_MACB_IS_GEM;
 2317 
 2318 		dcfg = gem_readl(bp, DCFG1);
 2319 		if (GEM_BFEXT(IRQCOR, dcfg) == 0)
 2320 			bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
 2321 		dcfg = gem_readl(bp, DCFG2);
 2322 		if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
 2323 			bp->caps |= MACB_CAPS_FIFO_MODE;
 2324 	}
 2325 
 2326 	dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
 2327 }
 2328 
 2329 static void macb_probe_queues(void __iomem *mem,
 2330 			      bool native_io,
 2331 			      unsigned int *queue_mask,
 2332 			      unsigned int *num_queues)
 2333 {
 2334 	unsigned int hw_q;
 2335 
 2336 	*queue_mask = 0x1;
 2337 	*num_queues = 1;
 2338 
 2339 	/* is it macb or gem ?
 2340 	 *
 2341 	 * We need to read directly from the hardware here because
 2342 	 * we are early in the probe process and don't have the
 2343 	 * MACB_CAPS_MACB_IS_GEM flag positioned
 2344 	 */
 2345 	if (!hw_is_gem(mem, native_io))
 2346 		return;
 2347 
 2348 	/* bit 0 is never set but queue 0 always exists */
 2349 	*queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
 2350 
 2351 	*queue_mask |= 0x1;
 2352 
 2353 	for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
 2354 		if (*queue_mask & (1 << hw_q))
 2355 			(*num_queues)++;
 2356 }
 2357 
 2358 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
 2359 			 struct clk **hclk, struct clk **tx_clk,
 2360 			 struct clk **rx_clk)
 2361 {
 2362 	int err;
 2363 
 2364 	*pclk = devm_clk_get(&pdev->dev, "pclk");
 2365 	if (IS_ERR(*pclk)) {
 2366 		err = PTR_ERR(*pclk);
 2367 		dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
 2368 		return err;
 2369 	}
 2370 
 2371 	*hclk = devm_clk_get(&pdev->dev, "hclk");
 2372 	if (IS_ERR(*hclk)) {
 2373 		err = PTR_ERR(*hclk);
 2374 		dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
 2375 		return err;
 2376 	}
 2377 
 2378 	*tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
 2379 	if (IS_ERR(*tx_clk))
 2380 		*tx_clk = NULL;
 2381 
 2382 	*rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
 2383 	if (IS_ERR(*rx_clk))
 2384 		*rx_clk = NULL;
 2385 
 2386 	err = clk_prepare_enable(*pclk);
 2387 	if (err) {
 2388 		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
 2389 		return err;
 2390 	}
 2391 
 2392 	err = clk_prepare_enable(*hclk);
 2393 	if (err) {
 2394 		dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
 2395 		goto err_disable_pclk;
 2396 	}
 2397 
 2398 	err = clk_prepare_enable(*tx_clk);
 2399 	if (err) {
 2400 		dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
 2401 		goto err_disable_hclk;
 2402 	}
 2403 
 2404 	err = clk_prepare_enable(*rx_clk);
 2405 	if (err) {
 2406 		dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
 2407 		goto err_disable_txclk;
 2408 	}
 2409 
 2410 	return 0;
 2411 
 2412 err_disable_txclk:
 2413 	clk_disable_unprepare(*tx_clk);
 2414 
 2415 err_disable_hclk:
 2416 	clk_disable_unprepare(*hclk);
 2417 
 2418 err_disable_pclk:
 2419 	clk_disable_unprepare(*pclk);
 2420 
 2421 	return err;
 2422 }
 2423 
 2424 static int macb_init(struct platform_device *pdev)
 2425 {
 2426 	struct net_device *dev = platform_get_drvdata(pdev);
 2427 	unsigned int hw_q, q;
 2428 	struct macb *bp = netdev_priv(dev);
 2429 	struct macb_queue *queue;
 2430 	int err;
 2431 	u32 val;
 2432 
 2433 	/* set the queue register mapping once for all: queue0 has a special
 2434 	 * register mapping but we don't want to test the queue index then
 2435 	 * compute the corresponding register offset at run time.
 2436 	 */
 2437 	for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
 2438 		if (!(bp->queue_mask & (1 << hw_q)))
 2439 			continue;
 2440 
 2441 		queue = &bp->queues[q];
 2442 		queue->bp = bp;
 2443 		if (hw_q) {
 2444 			queue->ISR  = GEM_ISR(hw_q - 1);
 2445 			queue->IER  = GEM_IER(hw_q - 1);
 2446 			queue->IDR  = GEM_IDR(hw_q - 1);
 2447 			queue->IMR  = GEM_IMR(hw_q - 1);
 2448 			queue->TBQP = GEM_TBQP(hw_q - 1);
 2449 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 2450 			queue->TBQPH = GEM_TBQPH(hw_q -1);
 2451 #endif
 2452 		} else {
 2453 			/* queue0 uses legacy registers */
 2454 			queue->ISR  = MACB_ISR;
 2455 			queue->IER  = MACB_IER;
 2456 			queue->IDR  = MACB_IDR;
 2457 			queue->IMR  = MACB_IMR;
 2458 			queue->TBQP = MACB_TBQP;
 2459 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 2460 			queue->TBQPH = MACB_TBQPH;
 2461 #endif
 2462 		}
 2463 
 2464 		/* get irq: here we use the linux queue index, not the hardware
 2465 		 * queue index. the queue irq definitions in the device tree
 2466 		 * must remove the optional gaps that could exist in the
 2467 		 * hardware queue mask.
 2468 		 */
 2469 		queue->irq = platform_get_irq(pdev, q);
 2470 		err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
 2471 				       IRQF_SHARED, dev->name, queue);
 2472 		if (err) {
 2473 			dev_err(&pdev->dev,
 2474 				"Unable to request IRQ %d (error %d)\n",
 2475 				queue->irq, err);
 2476 			return err;
 2477 		}
 2478 
 2479 		INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
 2480 		q++;
 2481 	}
 2482 
 2483 	dev->netdev_ops = &macb_netdev_ops;
 2484 	netif_napi_add(dev, &bp->napi, macb_poll, 64);
 2485 
 2486 	/* setup appropriated routines according to adapter type */
 2487 	if (macb_is_gem(bp)) {
 2488 		bp->max_tx_length = GEM_MAX_TX_LEN;
 2489 		bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
 2490 		bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
 2491 		bp->macbgem_ops.mog_init_rings = gem_init_rings;
 2492 		bp->macbgem_ops.mog_rx = gem_rx;
 2493 		dev->ethtool_ops = &gem_ethtool_ops;
 2494 	} else {
 2495 		bp->max_tx_length = MACB_MAX_TX_LEN;
 2496 		bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
 2497 		bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
 2498 		bp->macbgem_ops.mog_init_rings = macb_init_rings;
 2499 		bp->macbgem_ops.mog_rx = macb_rx;
 2500 		dev->ethtool_ops = &macb_ethtool_ops;
 2501 	}
 2502 
 2503 	/* Set features */
 2504 	dev->hw_features = NETIF_F_SG;
 2505 	/* Checksum offload is only available on gem with packet buffer */
 2506 	if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
 2507 		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
 2508 	if (bp->caps & MACB_CAPS_SG_DISABLED)
 2509 		dev->hw_features &= ~NETIF_F_SG;
 2510 	dev->features = dev->hw_features;
 2511 
 2512 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
 2513 		val = 0;
 2514 		if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
 2515 			val = GEM_BIT(RGMII);
 2516 		else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
 2517 			 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
 2518 			val = MACB_BIT(RMII);
 2519 		else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
 2520 			val = MACB_BIT(MII);
 2521 
 2522 		if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
 2523 			val |= MACB_BIT(CLKEN);
 2524 
 2525 		macb_or_gem_writel(bp, USRIO, val);
 2526 	}
 2527 
 2528 	/* Set MII management clock divider */
 2529 	val = macb_mdc_clk_div(bp);
 2530 	val |= macb_dbw(bp);
 2531 	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
 2532 		val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
 2533 	macb_writel(bp, NCFGR, val);
 2534 
 2535 	return 0;
 2536 }
 2537 
 2538 #if defined(CONFIG_OF)
 2539 /* 1518 rounded up */
 2540 #define AT91ETHER_MAX_RBUFF_SZ	0x600
 2541 /* max number of receive buffers */
 2542 #define AT91ETHER_MAX_RX_DESCR	9
 2543 
 2544 /* Initialize and start the Receiver and Transmit subsystems */
 2545 static int at91ether_start(struct net_device *dev)
 2546 {
 2547 	struct macb *lp = netdev_priv(dev);
 2548 	dma_addr_t addr;
 2549 	u32 ctl;
 2550 	int i;
 2551 
 2552 	lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
 2553 					 (AT91ETHER_MAX_RX_DESCR *
 2554 					  sizeof(struct macb_dma_desc)),
 2555 					 &lp->rx_ring_dma, GFP_KERNEL);
 2556 	if (!lp->rx_ring)
 2557 		return -ENOMEM;
 2558 
 2559 	lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
 2560 					    AT91ETHER_MAX_RX_DESCR *
 2561 					    AT91ETHER_MAX_RBUFF_SZ,
 2562 					    &lp->rx_buffers_dma, GFP_KERNEL);
 2563 	if (!lp->rx_buffers) {
 2564 		dma_free_coherent(&lp->pdev->dev,
 2565 				  AT91ETHER_MAX_RX_DESCR *
 2566 				  sizeof(struct macb_dma_desc),
 2567 				  lp->rx_ring, lp->rx_ring_dma);
 2568 		lp->rx_ring = NULL;
 2569 		return -ENOMEM;
 2570 	}
 2571 
 2572 	addr = lp->rx_buffers_dma;
 2573 	for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
 2574 		lp->rx_ring[i].addr = addr;
 2575 		lp->rx_ring[i].ctrl = 0;
 2576 		addr += AT91ETHER_MAX_RBUFF_SZ;
 2577 	}
 2578 
 2579 	/* Set the Wrap bit on the last descriptor */
 2580 	lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
 2581 
 2582 	/* Reset buffer index */
 2583 	lp->rx_tail = 0;
 2584 
 2585 	/* Program address of descriptor list in Rx Buffer Queue register */
 2586 	macb_writel(lp, RBQP, lp->rx_ring_dma);
 2587 
 2588 	/* Enable Receive and Transmit */
 2589 	ctl = macb_readl(lp, NCR);
 2590 	macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
 2591 
 2592 	return 0;
 2593 }
 2594 
 2595 /* Open the ethernet interface */
 2596 static int at91ether_open(struct net_device *dev)
 2597 {
 2598 	struct macb *lp = netdev_priv(dev);
 2599 	u32 ctl;
 2600 	int ret;
 2601 
 2602 	/* Clear internal statistics */
 2603 	ctl = macb_readl(lp, NCR);
 2604 	macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
 2605 
 2606 	macb_set_hwaddr(lp);
 2607 
 2608 	ret = at91ether_start(dev);
 2609 	if (ret)
 2610 		return ret;
 2611 
 2612 	/* Enable MAC interrupts */
 2613 	macb_writel(lp, IER, MACB_BIT(RCOMP)	|
 2614 			     MACB_BIT(RXUBR)	|
 2615 			     MACB_BIT(ISR_TUND)	|
 2616 			     MACB_BIT(ISR_RLE)	|
 2617 			     MACB_BIT(TCOMP)	|
 2618 			     MACB_BIT(ISR_ROVR)	|
 2619 			     MACB_BIT(HRESP));
 2620 
 2621 	/* schedule a link state check */
 2622 	phy_start(dev->phydev);
 2623 
 2624 	netif_start_queue(dev);
 2625 
 2626 	return 0;
 2627 }
 2628 
 2629 /* Close the interface */
 2630 static int at91ether_close(struct net_device *dev)
 2631 {
 2632 	struct macb *lp = netdev_priv(dev);
 2633 	u32 ctl;
 2634 
 2635 	/* Disable Receiver and Transmitter */
 2636 	ctl = macb_readl(lp, NCR);
 2637 	macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
 2638 
 2639 	/* Disable MAC interrupts */
 2640 	macb_writel(lp, IDR, MACB_BIT(RCOMP)	|
 2641 			     MACB_BIT(RXUBR)	|
 2642 			     MACB_BIT(ISR_TUND)	|
 2643 			     MACB_BIT(ISR_RLE)	|
 2644 			     MACB_BIT(TCOMP)	|
 2645 			     MACB_BIT(ISR_ROVR) |
 2646 			     MACB_BIT(HRESP));
 2647 
 2648 	netif_stop_queue(dev);
 2649 
 2650 	dma_free_coherent(&lp->pdev->dev,
 2651 			  AT91ETHER_MAX_RX_DESCR *
 2652 			  sizeof(struct macb_dma_desc),
 2653 			  lp->rx_ring, lp->rx_ring_dma);
 2654 	lp->rx_ring = NULL;
 2655 
 2656 	dma_free_coherent(&lp->pdev->dev,
 2657 			  AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
 2658 			  lp->rx_buffers, lp->rx_buffers_dma);
 2659 	lp->rx_buffers = NULL;
 2660 
 2661 	return 0;
 2662 }
 2663 
 2664 /* Transmit packet */
 2665 static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
 2666 {
 2667 	struct macb *lp = netdev_priv(dev);
 2668 
 2669 	if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
 2670 		netif_stop_queue(dev);
 2671 
 2672 		/* Store packet information (to free when Tx completed) */
 2673 		lp->skb = skb;
 2674 		lp->skb_length = skb->len;
 2675 		lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
 2676 							DMA_TO_DEVICE);
 2677 
 2678 		/* Set address of the data in the Transmit Address register */
 2679 		macb_writel(lp, TAR, lp->skb_physaddr);
 2680 		/* Set length of the packet in the Transmit Control register */
 2681 		macb_writel(lp, TCR, skb->len);
 2682 
 2683 	} else {
 2684 		netdev_err(dev, "%s called, but device is busy!\n", __func__);
 2685 		return NETDEV_TX_BUSY;
 2686 	}
 2687 
 2688 	return NETDEV_TX_OK;
 2689 }
 2690 
 2691 /* Extract received frame from buffer descriptors and sent to upper layers.
 2692  * (Called from interrupt context)
 2693  */
 2694 static void at91ether_rx(struct net_device *dev)
 2695 {
 2696 	struct macb *lp = netdev_priv(dev);
 2697 	unsigned char *p_recv;
 2698 	struct sk_buff *skb;
 2699 	unsigned int pktlen;
 2700 
 2701 	while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
 2702 		p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
 2703 		pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
 2704 		skb = netdev_alloc_skb(dev, pktlen + 2);
 2705 		if (skb) {
 2706 			skb_reserve(skb, 2);
 2707 			memcpy(skb_put(skb, pktlen), p_recv, pktlen);
 2708 
 2709 			skb->protocol = eth_type_trans(skb, dev);
 2710 			lp->stats.rx_packets++;
 2711 			lp->stats.rx_bytes += pktlen;
 2712 			netif_rx(skb);
 2713 		} else {
 2714 			lp->stats.rx_dropped++;
 2715 		}
 2716 
 2717 		if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
 2718 			lp->stats.multicast++;
 2719 
 2720 		/* reset ownership bit */
 2721 		lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
 2722 
 2723 		/* wrap after last buffer */
 2724 		if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
 2725 			lp->rx_tail = 0;
 2726 		else
 2727 			lp->rx_tail++;
 2728 	}
 2729 }
 2730 
 2731 /* MAC interrupt handler */
 2732 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
 2733 {
 2734 	struct net_device *dev = dev_id;
 2735 	struct macb *lp = netdev_priv(dev);
 2736 	u32 intstatus, ctl;
 2737 
 2738 	/* MAC Interrupt Status register indicates what interrupts are pending.
 2739 	 * It is automatically cleared once read.
 2740 	 */
 2741 	intstatus = macb_readl(lp, ISR);
 2742 
 2743 	/* Receive complete */
 2744 	if (intstatus & MACB_BIT(RCOMP))
 2745 		at91ether_rx(dev);
 2746 
 2747 	/* Transmit complete */
 2748 	if (intstatus & MACB_BIT(TCOMP)) {
 2749 		/* The TCOM bit is set even if the transmission failed */
 2750 		if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
 2751 			lp->stats.tx_errors++;
 2752 
 2753 		if (lp->skb) {
 2754 			dev_kfree_skb_irq(lp->skb);
 2755 			lp->skb = NULL;
 2756 			dma_unmap_single(NULL, lp->skb_physaddr,
 2757 					 lp->skb_length, DMA_TO_DEVICE);
 2758 			lp->stats.tx_packets++;
 2759 			lp->stats.tx_bytes += lp->skb_length;
 2760 		}
 2761 		netif_wake_queue(dev);
 2762 	}
 2763 
 2764 	/* Work-around for EMAC Errata section 41.3.1 */
 2765 	if (intstatus & MACB_BIT(RXUBR)) {
 2766 		ctl = macb_readl(lp, NCR);
 2767 		macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
 2768 		macb_writel(lp, NCR, ctl | MACB_BIT(RE));
 2769 	}
 2770 
 2771 	if (intstatus & MACB_BIT(ISR_ROVR))
 2772 		netdev_err(dev, "ROVR error\n");
 2773 
 2774 	return IRQ_HANDLED;
 2775 }
 2776 
 2777 #ifdef CONFIG_NET_POLL_CONTROLLER
 2778 static void at91ether_poll_controller(struct net_device *dev)
 2779 {
 2780 	unsigned long flags;
 2781 
 2782 	local_irq_save(flags);
 2783 	at91ether_interrupt(dev->irq, dev);
 2784 	local_irq_restore(flags);
 2785 }
 2786 #endif
 2787 
 2788 static const struct net_device_ops at91ether_netdev_ops = {
 2789 	.ndo_open		= at91ether_open,
 2790 	.ndo_stop		= at91ether_close,
 2791 	.ndo_start_xmit		= at91ether_start_xmit,
 2792 	.ndo_get_stats		= macb_get_stats,
 2793 	.ndo_set_rx_mode	= macb_set_rx_mode,
 2794 	.ndo_set_mac_address	= eth_mac_addr,
 2795 	.ndo_do_ioctl		= macb_ioctl,
 2796 	.ndo_validate_addr	= eth_validate_addr,
 2797 	.ndo_change_mtu		= eth_change_mtu,
 2798 #ifdef CONFIG_NET_POLL_CONTROLLER
 2799 	.ndo_poll_controller	= at91ether_poll_controller,
 2800 #endif
 2801 };
 2802 
 2803 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
 2804 			      struct clk **hclk, struct clk **tx_clk,
 2805 			      struct clk **rx_clk)
 2806 {
 2807 	int err;
 2808 
 2809 	*hclk = NULL;
 2810 	*tx_clk = NULL;
 2811 	*rx_clk = NULL;
 2812 
 2813 	*pclk = devm_clk_get(&pdev->dev, "ether_clk");
 2814 	if (IS_ERR(*pclk))
 2815 		return PTR_ERR(*pclk);
 2816 
 2817 	err = clk_prepare_enable(*pclk);
 2818 	if (err) {
 2819 		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
 2820 		return err;
 2821 	}
 2822 
 2823 	return 0;
 2824 }
 2825 
 2826 static int at91ether_init(struct platform_device *pdev)
 2827 {
 2828 	struct net_device *dev = platform_get_drvdata(pdev);
 2829 	struct macb *bp = netdev_priv(dev);
 2830 	int err;
 2831 	u32 reg;
 2832 
 2833 	dev->netdev_ops = &at91ether_netdev_ops;
 2834 	dev->ethtool_ops = &macb_ethtool_ops;
 2835 
 2836 	err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
 2837 			       0, dev->name, dev);
 2838 	if (err)
 2839 		return err;
 2840 
 2841 	macb_writel(bp, NCR, 0);
 2842 
 2843 	reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
 2844 	if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
 2845 		reg |= MACB_BIT(RM9200_RMII);
 2846 
 2847 	macb_writel(bp, NCFGR, reg);
 2848 
 2849 	return 0;
 2850 }
 2851 
 2852 static const struct macb_config at91sam9260_config = {
 2853 	.caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
 2854 	.clk_init = macb_clk_init,
 2855 	.init = macb_init,
 2856 };
 2857 
 2858 static const struct macb_config pc302gem_config = {
 2859 	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
 2860 	.dma_burst_length = 16,
 2861 	.clk_init = macb_clk_init,
 2862 	.init = macb_init,
 2863 };
 2864 
 2865 static const struct macb_config sama5d2_config = {
 2866 	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
 2867 	.dma_burst_length = 16,
 2868 	.clk_init = macb_clk_init,
 2869 	.init = macb_init,
 2870 };
 2871 
 2872 static const struct macb_config sama5d3_config = {
 2873 	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
 2874 	      | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
 2875 	.dma_burst_length = 16,
 2876 	.clk_init = macb_clk_init,
 2877 	.init = macb_init,
 2878 };
 2879 
 2880 static const struct macb_config sama5d4_config = {
 2881 	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
 2882 	.dma_burst_length = 4,
 2883 	.clk_init = macb_clk_init,
 2884 	.init = macb_init,
 2885 };
 2886 
 2887 static const struct macb_config emac_config = {
 2888 	.clk_init = at91ether_clk_init,
 2889 	.init = at91ether_init,
 2890 };
 2891 
 2892 static const struct macb_config np4_config = {
 2893 	.caps = MACB_CAPS_USRIO_DISABLED,
 2894 	.clk_init = macb_clk_init,
 2895 	.init = macb_init,
 2896 };
 2897 
 2898 static const struct macb_config zynqmp_config = {
 2899 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
 2900 	.dma_burst_length = 16,
 2901 	.clk_init = macb_clk_init,
 2902 	.init = macb_init,
 2903 	.jumbo_max_len = 10240,
 2904 };
 2905 
 2906 static const struct macb_config zynq_config = {
 2907 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
 2908 	.dma_burst_length = 16,
 2909 	.clk_init = macb_clk_init,
 2910 	.init = macb_init,
 2911 };
 2912 
 2913 static const struct of_device_id macb_dt_ids[] = {
 2914 	{ .compatible = "cdns,at32ap7000-macb" },
 2915 	{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
 2916 	{ .compatible = "cdns,macb" },
 2917 	{ .compatible = "cdns,np4-macb", .data = &np4_config },
 2918 	{ .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
 2919 	{ .compatible = "cdns,gem", .data = &pc302gem_config },
 2920 	{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
 2921 	{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
 2922 	{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
 2923 	{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
 2924 	{ .compatible = "cdns,emac", .data = &emac_config },
 2925 	{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
 2926 	{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
 2927 	{ /* sentinel */ }
 2928 };
 2929 MODULE_DEVICE_TABLE(of, macb_dt_ids);
 2930 #endif /* CONFIG_OF */
 2931 
 2932 static int macb_probe(struct platform_device *pdev)
 2933 {
 2934 	int (*clk_init)(struct platform_device *, struct clk **,
 2935 			struct clk **, struct clk **,  struct clk **)
 2936 					      = macb_clk_init;
 2937 	int (*init)(struct platform_device *) = macb_init;
 2938 	struct device_node *np = pdev->dev.of_node;
 2939 	struct device_node *phy_node;
 2940 	const struct macb_config *macb_config = NULL;
 2941 	struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
 2942 	unsigned int queue_mask, num_queues;
 2943 	struct macb_platform_data *pdata;
 2944 	bool native_io;
 2945 	struct phy_device *phydev;
 2946 	struct net_device *dev;
 2947 	struct resource *regs;
 2948 	void __iomem *mem;
 2949 	const char *mac;
 2950 	struct macb *bp;
 2951 	int err;
 2952 
 2953 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 2954 	mem = devm_ioremap_resource(&pdev->dev, regs);
 2955 	if (IS_ERR(mem))
 2956 		return PTR_ERR(mem);
 2957 
 2958 	if (np) {
 2959 		const struct of_device_id *match;
 2960 
 2961 		match = of_match_node(macb_dt_ids, np);
 2962 		if (match && match->data) {
 2963 			macb_config = match->data;
 2964 			clk_init = macb_config->clk_init;
 2965 			init = macb_config->init;
 2966 		}
 2967 	}
 2968 
 2969 	err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
 2970 	if (err)
 2971 		return err;
 2972 
 2973 	native_io = hw_is_native_io(mem);
 2974 
 2975 	macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
 2976 	dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
 2977 	if (!dev) {
 2978 		err = -ENOMEM;
 2979 		goto err_disable_clocks;
 2980 	}
 2981 
 2982 	dev->base_addr = regs->start;
 2983 
 2984 	SET_NETDEV_DEV(dev, &pdev->dev);
 2985 
 2986 	bp = netdev_priv(dev);
 2987 	bp->pdev = pdev;
 2988 	bp->dev = dev;
 2989 	bp->regs = mem;
 2990 	bp->native_io = native_io;
 2991 	if (native_io) {
 2992 		bp->macb_reg_readl = hw_readl_native;
 2993 		bp->macb_reg_writel = hw_writel_native;
 2994 	} else {
 2995 		bp->macb_reg_readl = hw_readl;
 2996 		bp->macb_reg_writel = hw_writel;
 2997 	}
 2998 	bp->num_queues = num_queues;
 2999 	bp->queue_mask = queue_mask;
 3000 	if (macb_config)
 3001 		bp->dma_burst_length = macb_config->dma_burst_length;
 3002 	bp->pclk = pclk;
 3003 	bp->hclk = hclk;
 3004 	bp->tx_clk = tx_clk;
 3005 	bp->rx_clk = rx_clk;
 3006 	if (macb_config)
 3007 		bp->jumbo_max_len = macb_config->jumbo_max_len;
 3008 
 3009 	bp->wol = 0;
 3010 	if (of_get_property(np, "magic-packet", NULL))
 3011 		bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
 3012 	device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
 3013 
 3014 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3015 	if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32)
 3016 		dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
 3017 #endif
 3018 
 3019 	spin_lock_init(&bp->lock);
 3020 
 3021 	/* setup capabilities */
 3022 	macb_configure_caps(bp, macb_config);
 3023 
 3024 	platform_set_drvdata(pdev, dev);
 3025 
 3026 	dev->irq = platform_get_irq(pdev, 0);
 3027 	if (dev->irq < 0) {
 3028 		err = dev->irq;
 3029 		goto err_out_free_netdev;
 3030 	}
 3031 
 3032 	mac = of_get_mac_address(np);
 3033 	if (mac)
 3034 		ether_addr_copy(bp->dev->dev_addr, mac);
 3035 	else
 3036 		macb_get_hwaddr(bp);
 3037 
 3038 	/* Power up the PHY if there is a GPIO reset */
 3039 	phy_node =  of_get_next_available_child(np, NULL);
 3040 	if (phy_node) {
 3041 		int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
 3042 
 3043 		if (gpio_is_valid(gpio)) {
 3044 			bp->reset_gpio = gpio_to_desc(gpio);
 3045 			gpiod_direction_output(bp->reset_gpio, 1);
 3046 		}
 3047 	}
 3048 	of_node_put(phy_node);
 3049 
 3050 	err = of_get_phy_mode(np);
 3051 	if (err < 0) {
 3052 		pdata = dev_get_platdata(&pdev->dev);
 3053 		if (pdata && pdata->is_rmii)
 3054 			bp->phy_interface = PHY_INTERFACE_MODE_RMII;
 3055 		else
 3056 			bp->phy_interface = PHY_INTERFACE_MODE_MII;
 3057 	} else {
 3058 		bp->phy_interface = err;
 3059 	}
 3060 
 3061 	/* IP specific init */
 3062 	err = init(pdev);
 3063 	if (err)
 3064 		goto err_out_free_netdev;
 3065 
 3066 	err = macb_mii_init(bp);
 3067 	if (err)
 3068 		goto err_out_free_netdev;
 3069 
 3070 	phydev = dev->phydev;
 3071 
 3072 	netif_carrier_off(dev);
 3073 
 3074 	err = register_netdev(dev);
 3075 	if (err) {
 3076 		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
 3077 		goto err_out_unregister_mdio;
 3078 	}
 3079 
 3080 	phy_attached_info(phydev);
 3081 
 3082 	netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
 3083 		    macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
 3084 		    dev->base_addr, dev->irq, dev->dev_addr);
 3085 
 3086 	return 0;
 3087 
 3088 err_out_unregister_mdio:
 3089 	phy_disconnect(dev->phydev);
 3090 	mdiobus_unregister(bp->mii_bus);
 3091 	mdiobus_free(bp->mii_bus);
 3092 
 3093 	/* Shutdown the PHY if there is a GPIO reset */
 3094 	if (bp->reset_gpio)
 3095 		gpiod_set_value(bp->reset_gpio, 0);
 3096 
 3097 err_out_free_netdev:
 3098 	free_netdev(dev);
 3099 
 3100 err_disable_clocks:
 3101 	clk_disable_unprepare(tx_clk);
 3102 	clk_disable_unprepare(hclk);
 3103 	clk_disable_unprepare(pclk);
 3104 	clk_disable_unprepare(rx_clk);
 3105 
 3106 	return err;
 3107 }
 3108 
 3109 static int macb_remove(struct platform_device *pdev)
 3110 {
 3111 	struct net_device *dev;
 3112 	struct macb *bp;
 3113 
 3114 	dev = platform_get_drvdata(pdev);
 3115 
 3116 	if (dev) {
 3117 		bp = netdev_priv(dev);
 3118 		if (dev->phydev)
 3119 			phy_disconnect(dev->phydev);
 3120 		mdiobus_unregister(bp->mii_bus);
 3121 		dev->phydev = NULL;
 3122 		mdiobus_free(bp->mii_bus);
 3123 
 3124 		/* Shutdown the PHY if there is a GPIO reset */
 3125 		if (bp->reset_gpio)
 3126 			gpiod_set_value(bp->reset_gpio, 0);
 3127 
 3128 		unregister_netdev(dev);
 3129 		clk_disable_unprepare(bp->tx_clk);
 3130 		clk_disable_unprepare(bp->hclk);
 3131 		clk_disable_unprepare(bp->pclk);
 3132 		clk_disable_unprepare(bp->rx_clk);
 3133 		free_netdev(dev);
 3134 	}
 3135 
 3136 	return 0;
 3137 }
 3138 
 3139 static int __maybe_unused macb_suspend(struct device *dev)
 3140 {
 3141 	struct platform_device *pdev = to_platform_device(dev);
 3142 	struct net_device *netdev = platform_get_drvdata(pdev);
 3143 	struct macb *bp = netdev_priv(netdev);
 3144 
 3145 	netif_carrier_off(netdev);
 3146 	netif_device_detach(netdev);
 3147 
 3148 	if (bp->wol & MACB_WOL_ENABLED) {
 3149 		macb_writel(bp, IER, MACB_BIT(WOL));
 3150 		macb_writel(bp, WOL, MACB_BIT(MAG));
 3151 		enable_irq_wake(bp->queues[0].irq);
 3152 	} else {
 3153 		clk_disable_unprepare(bp->tx_clk);
 3154 		clk_disable_unprepare(bp->hclk);
 3155 		clk_disable_unprepare(bp->pclk);
 3156 		clk_disable_unprepare(bp->rx_clk);
 3157 	}
 3158 
 3159 	return 0;
 3160 }
 3161 
 3162 static int __maybe_unused macb_resume(struct device *dev)
 3163 {
 3164 	struct platform_device *pdev = to_platform_device(dev);
 3165 	struct net_device *netdev = platform_get_drvdata(pdev);
 3166 	struct macb *bp = netdev_priv(netdev);
 3167 
 3168 	if (bp->wol & MACB_WOL_ENABLED) {
 3169 		macb_writel(bp, IDR, MACB_BIT(WOL));
 3170 		macb_writel(bp, WOL, 0);
 3171 		disable_irq_wake(bp->queues[0].irq);
 3172 	} else {
 3173 		clk_prepare_enable(bp->pclk);
 3174 		clk_prepare_enable(bp->hclk);
 3175 		clk_prepare_enable(bp->tx_clk);
 3176 		clk_prepare_enable(bp->rx_clk);
 3177 	}
 3178 
 3179 	netif_device_attach(netdev);
 3180 
 3181 	return 0;
 3182 }
 3183 
 3184 static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
 3185 
 3186 static struct platform_driver macb_driver = {
 3187 	.probe		= macb_probe,
 3188 	.remove		= macb_remove,
 3189 	.driver		= {
 3190 		.name		= "macb",
 3191 		.of_match_table	= of_match_ptr(macb_dt_ids),
 3192 		.pm	= &macb_pm_ops,
 3193 	},
 3194 };
 3195 
 3196 module_platform_driver(macb_driver);
 3197 
 3198 MODULE_LICENSE("GPL");
 3199 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
 3200 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
 3201 MODULE_ALIAS("platform:macb");
 3202 
 3203 
 3204 
 3205 
 3206 
 3207 /* LDV_COMMENT_BEGIN_MAIN */
 3208 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
 3209 
 3210 /*###########################################################################*/
 3211 
 3212 /*############## Driver Environment Generator 0.2 output ####################*/
 3213 
 3214 /*###########################################################################*/
 3215 
 3216 
 3217 
 3218 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
 3219 void ldv_check_final_state(void);
 3220 
 3221 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
 3222 void ldv_check_return_value(int res);
 3223 
 3224 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
 3225 void ldv_check_return_value_probe(int res);
 3226 
 3227 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
 3228 void ldv_initialize(void);
 3229 
 3230 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
 3231 void ldv_handler_precall(void);
 3232 
 3233 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
 3234 int nondet_int(void);
 3235 
 3236 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
 3237 int LDV_IN_INTERRUPT;
 3238 
 3239 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
 3240 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
 3241 
 3242 
 3243 
 3244 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
 3245 	/*============================= VARIABLE DECLARATION PART   =============================*/
 3246 	/** STRUCT: struct type: ethtool_ops, struct name: macb_ethtool_ops **/
 3247 	/* content: static int macb_get_regs_len(struct net_device *netdev)*/
 3248 	/* LDV_COMMENT_BEGIN_PREP */
 3249 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3250 	#define MACB_RX_BUFFER_SIZE	128
 3251 	#define RX_BUFFER_MULTIPLE	64  
 3252 	#define RX_RING_SIZE		512 
 3253 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3254 	#define TX_RING_SIZE		128 
 3255 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3256 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3257 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3258 				 | MACB_BIT(ISR_ROVR))
 3259 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3260 					| MACB_BIT(ISR_RLE)		\
 3261 					| MACB_BIT(TXERR))
 3262 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3263 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3264 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3265 	#define GEM_MTU_MIN_SIZE	68
 3266 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3267 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3268 	#define MACB_HALT_TIMEOUT	1230
 3269 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3270 	#endif
 3271 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3272 	#endif
 3273 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3274 	#endif
 3275 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3276 	#endif
 3277 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3278 	#endif
 3279 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3280 	#endif
 3281 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3282 	#endif
 3283 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3284 	#endif
 3285 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3286 	#endif
 3287 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3288 	#endif
 3289 	/* LDV_COMMENT_END_PREP */
 3290 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_get_regs_len" */
 3291 	struct net_device * var_group1;
 3292 	/* LDV_COMMENT_BEGIN_PREP */
 3293 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3294 	#endif
 3295 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3296 	#endif
 3297 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3298 	#endif
 3299 	#if defined(CONFIG_OF)
 3300 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3301 	#define AT91ETHER_MAX_RX_DESCR	9
 3302 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3303 	#endif
 3304 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3305 	#endif
 3306 	#endif 
 3307 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3308 	#endif
 3309 	/* LDV_COMMENT_END_PREP */
 3310 	/* content: static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)*/
 3311 	/* LDV_COMMENT_BEGIN_PREP */
 3312 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3313 	#define MACB_RX_BUFFER_SIZE	128
 3314 	#define RX_BUFFER_MULTIPLE	64  
 3315 	#define RX_RING_SIZE		512 
 3316 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3317 	#define TX_RING_SIZE		128 
 3318 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3319 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3320 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3321 				 | MACB_BIT(ISR_ROVR))
 3322 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3323 					| MACB_BIT(ISR_RLE)		\
 3324 					| MACB_BIT(TXERR))
 3325 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3326 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3327 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3328 	#define GEM_MTU_MIN_SIZE	68
 3329 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3330 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3331 	#define MACB_HALT_TIMEOUT	1230
 3332 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3333 	#endif
 3334 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3335 	#endif
 3336 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3337 	#endif
 3338 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3339 	#endif
 3340 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3341 	#endif
 3342 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3343 	#endif
 3344 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3345 	#endif
 3346 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3347 	#endif
 3348 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3349 	#endif
 3350 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3351 	#endif
 3352 	/* LDV_COMMENT_END_PREP */
 3353 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_get_regs" */
 3354 	struct ethtool_regs * var_group2;
 3355 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_get_regs" */
 3356 	void * var_macb_get_regs_68_p2;
 3357 	/* LDV_COMMENT_BEGIN_PREP */
 3358 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3359 	#endif
 3360 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3361 	#endif
 3362 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3363 	#endif
 3364 	#if defined(CONFIG_OF)
 3365 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3366 	#define AT91ETHER_MAX_RX_DESCR	9
 3367 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3368 	#endif
 3369 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3370 	#endif
 3371 	#endif 
 3372 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3373 	#endif
 3374 	/* LDV_COMMENT_END_PREP */
 3375 	/* content: static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)*/
 3376 	/* LDV_COMMENT_BEGIN_PREP */
 3377 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3378 	#define MACB_RX_BUFFER_SIZE	128
 3379 	#define RX_BUFFER_MULTIPLE	64  
 3380 	#define RX_RING_SIZE		512 
 3381 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3382 	#define TX_RING_SIZE		128 
 3383 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3384 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3385 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3386 				 | MACB_BIT(ISR_ROVR))
 3387 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3388 					| MACB_BIT(ISR_RLE)		\
 3389 					| MACB_BIT(TXERR))
 3390 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3391 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3392 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3393 	#define GEM_MTU_MIN_SIZE	68
 3394 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3395 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3396 	#define MACB_HALT_TIMEOUT	1230
 3397 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3398 	#endif
 3399 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3400 	#endif
 3401 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3402 	#endif
 3403 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3404 	#endif
 3405 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3406 	#endif
 3407 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3408 	#endif
 3409 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3410 	#endif
 3411 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3412 	#endif
 3413 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3414 	#endif
 3415 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3416 	#endif
 3417 	/* LDV_COMMENT_END_PREP */
 3418 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_get_wol" */
 3419 	struct ethtool_wolinfo * var_group3;
 3420 	/* LDV_COMMENT_BEGIN_PREP */
 3421 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3422 	#endif
 3423 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3424 	#endif
 3425 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3426 	#endif
 3427 	#if defined(CONFIG_OF)
 3428 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3429 	#define AT91ETHER_MAX_RX_DESCR	9
 3430 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3431 	#endif
 3432 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3433 	#endif
 3434 	#endif 
 3435 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3436 	#endif
 3437 	/* LDV_COMMENT_END_PREP */
 3438 	/* content: static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)*/
 3439 	/* LDV_COMMENT_BEGIN_PREP */
 3440 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3441 	#define MACB_RX_BUFFER_SIZE	128
 3442 	#define RX_BUFFER_MULTIPLE	64  
 3443 	#define RX_RING_SIZE		512 
 3444 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3445 	#define TX_RING_SIZE		128 
 3446 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3447 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3448 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3449 				 | MACB_BIT(ISR_ROVR))
 3450 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3451 					| MACB_BIT(ISR_RLE)		\
 3452 					| MACB_BIT(TXERR))
 3453 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3454 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3455 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3456 	#define GEM_MTU_MIN_SIZE	68
 3457 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3458 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3459 	#define MACB_HALT_TIMEOUT	1230
 3460 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3461 	#endif
 3462 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3463 	#endif
 3464 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3465 	#endif
 3466 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3467 	#endif
 3468 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3469 	#endif
 3470 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3471 	#endif
 3472 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3473 	#endif
 3474 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3475 	#endif
 3476 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3477 	#endif
 3478 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3479 	#endif
 3480 	/* LDV_COMMENT_END_PREP */
 3481 	/* LDV_COMMENT_BEGIN_PREP */
 3482 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3483 	#endif
 3484 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3485 	#endif
 3486 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3487 	#endif
 3488 	#if defined(CONFIG_OF)
 3489 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3490 	#define AT91ETHER_MAX_RX_DESCR	9
 3491 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3492 	#endif
 3493 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3494 	#endif
 3495 	#endif 
 3496 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3497 	#endif
 3498 	/* LDV_COMMENT_END_PREP */
 3499 
 3500 	/** STRUCT: struct type: ethtool_ops, struct name: gem_ethtool_ops **/
 3501 	/* content: static int macb_get_regs_len(struct net_device *netdev)*/
 3502 	/* LDV_COMMENT_BEGIN_PREP */
 3503 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3504 	#define MACB_RX_BUFFER_SIZE	128
 3505 	#define RX_BUFFER_MULTIPLE	64  
 3506 	#define RX_RING_SIZE		512 
 3507 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3508 	#define TX_RING_SIZE		128 
 3509 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3510 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3511 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3512 				 | MACB_BIT(ISR_ROVR))
 3513 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3514 					| MACB_BIT(ISR_RLE)		\
 3515 					| MACB_BIT(TXERR))
 3516 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3517 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3518 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3519 	#define GEM_MTU_MIN_SIZE	68
 3520 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3521 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3522 	#define MACB_HALT_TIMEOUT	1230
 3523 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3524 	#endif
 3525 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3526 	#endif
 3527 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3528 	#endif
 3529 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3530 	#endif
 3531 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3532 	#endif
 3533 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3534 	#endif
 3535 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3536 	#endif
 3537 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3538 	#endif
 3539 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3540 	#endif
 3541 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3542 	#endif
 3543 	/* LDV_COMMENT_END_PREP */
 3544 	/* LDV_COMMENT_BEGIN_PREP */
 3545 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3546 	#endif
 3547 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3548 	#endif
 3549 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3550 	#endif
 3551 	#if defined(CONFIG_OF)
 3552 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3553 	#define AT91ETHER_MAX_RX_DESCR	9
 3554 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3555 	#endif
 3556 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3557 	#endif
 3558 	#endif 
 3559 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3560 	#endif
 3561 	/* LDV_COMMENT_END_PREP */
 3562 	/* content: static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)*/
 3563 	/* LDV_COMMENT_BEGIN_PREP */
 3564 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3565 	#define MACB_RX_BUFFER_SIZE	128
 3566 	#define RX_BUFFER_MULTIPLE	64  
 3567 	#define RX_RING_SIZE		512 
 3568 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3569 	#define TX_RING_SIZE		128 
 3570 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3571 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3572 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3573 				 | MACB_BIT(ISR_ROVR))
 3574 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3575 					| MACB_BIT(ISR_RLE)		\
 3576 					| MACB_BIT(TXERR))
 3577 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3578 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3579 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3580 	#define GEM_MTU_MIN_SIZE	68
 3581 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3582 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3583 	#define MACB_HALT_TIMEOUT	1230
 3584 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3585 	#endif
 3586 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3587 	#endif
 3588 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3589 	#endif
 3590 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3591 	#endif
 3592 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3593 	#endif
 3594 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3595 	#endif
 3596 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3597 	#endif
 3598 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3599 	#endif
 3600 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3601 	#endif
 3602 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3603 	#endif
 3604 	/* LDV_COMMENT_END_PREP */
 3605 	/* LDV_COMMENT_BEGIN_PREP */
 3606 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3607 	#endif
 3608 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3609 	#endif
 3610 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3611 	#endif
 3612 	#if defined(CONFIG_OF)
 3613 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3614 	#define AT91ETHER_MAX_RX_DESCR	9
 3615 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3616 	#endif
 3617 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3618 	#endif
 3619 	#endif 
 3620 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3621 	#endif
 3622 	/* LDV_COMMENT_END_PREP */
 3623 	/* content: static void gem_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)*/
 3624 	/* LDV_COMMENT_BEGIN_PREP */
 3625 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3626 	#define MACB_RX_BUFFER_SIZE	128
 3627 	#define RX_BUFFER_MULTIPLE	64  
 3628 	#define RX_RING_SIZE		512 
 3629 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3630 	#define TX_RING_SIZE		128 
 3631 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3632 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3633 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3634 				 | MACB_BIT(ISR_ROVR))
 3635 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3636 					| MACB_BIT(ISR_RLE)		\
 3637 					| MACB_BIT(TXERR))
 3638 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3639 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3640 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3641 	#define GEM_MTU_MIN_SIZE	68
 3642 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3643 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3644 	#define MACB_HALT_TIMEOUT	1230
 3645 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3646 	#endif
 3647 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3648 	#endif
 3649 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3650 	#endif
 3651 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3652 	#endif
 3653 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3654 	#endif
 3655 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3656 	#endif
 3657 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3658 	#endif
 3659 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3660 	#endif
 3661 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3662 	#endif
 3663 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3664 	#endif
 3665 	/* LDV_COMMENT_END_PREP */
 3666 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "gem_get_ethtool_stats" */
 3667 	struct ethtool_stats * var_group4;
 3668 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "gem_get_ethtool_stats" */
 3669 	u64 * var_gem_get_ethtool_stats_63_p2;
 3670 	/* LDV_COMMENT_BEGIN_PREP */
 3671 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3672 	#endif
 3673 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3674 	#endif
 3675 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3676 	#endif
 3677 	#if defined(CONFIG_OF)
 3678 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3679 	#define AT91ETHER_MAX_RX_DESCR	9
 3680 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3681 	#endif
 3682 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3683 	#endif
 3684 	#endif 
 3685 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3686 	#endif
 3687 	/* LDV_COMMENT_END_PREP */
 3688 	/* content: static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)*/
 3689 	/* LDV_COMMENT_BEGIN_PREP */
 3690 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3691 	#define MACB_RX_BUFFER_SIZE	128
 3692 	#define RX_BUFFER_MULTIPLE	64  
 3693 	#define RX_RING_SIZE		512 
 3694 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3695 	#define TX_RING_SIZE		128 
 3696 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3697 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3698 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3699 				 | MACB_BIT(ISR_ROVR))
 3700 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3701 					| MACB_BIT(ISR_RLE)		\
 3702 					| MACB_BIT(TXERR))
 3703 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3704 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3705 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3706 	#define GEM_MTU_MIN_SIZE	68
 3707 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3708 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3709 	#define MACB_HALT_TIMEOUT	1230
 3710 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3711 	#endif
 3712 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3713 	#endif
 3714 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3715 	#endif
 3716 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3717 	#endif
 3718 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3719 	#endif
 3720 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3721 	#endif
 3722 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3723 	#endif
 3724 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3725 	#endif
 3726 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3727 	#endif
 3728 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3729 	#endif
 3730 	/* LDV_COMMENT_END_PREP */
 3731 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "gem_get_ethtool_strings" */
 3732 	u32  var_gem_get_ethtool_strings_65_p1;
 3733 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "gem_get_ethtool_strings" */
 3734 	u8 * var_gem_get_ethtool_strings_65_p2;
 3735 	/* LDV_COMMENT_BEGIN_PREP */
 3736 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3737 	#endif
 3738 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3739 	#endif
 3740 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3741 	#endif
 3742 	#if defined(CONFIG_OF)
 3743 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3744 	#define AT91ETHER_MAX_RX_DESCR	9
 3745 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3746 	#endif
 3747 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3748 	#endif
 3749 	#endif 
 3750 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3751 	#endif
 3752 	/* LDV_COMMENT_END_PREP */
 3753 	/* content: static int gem_get_sset_count(struct net_device *dev, int sset)*/
 3754 	/* LDV_COMMENT_BEGIN_PREP */
 3755 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3756 	#define MACB_RX_BUFFER_SIZE	128
 3757 	#define RX_BUFFER_MULTIPLE	64  
 3758 	#define RX_RING_SIZE		512 
 3759 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3760 	#define TX_RING_SIZE		128 
 3761 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3762 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3763 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3764 				 | MACB_BIT(ISR_ROVR))
 3765 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3766 					| MACB_BIT(ISR_RLE)		\
 3767 					| MACB_BIT(TXERR))
 3768 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3769 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3770 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3771 	#define GEM_MTU_MIN_SIZE	68
 3772 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3773 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3774 	#define MACB_HALT_TIMEOUT	1230
 3775 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3776 	#endif
 3777 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3778 	#endif
 3779 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3780 	#endif
 3781 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3782 	#endif
 3783 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3784 	#endif
 3785 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3786 	#endif
 3787 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3788 	#endif
 3789 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3790 	#endif
 3791 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3792 	#endif
 3793 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3794 	#endif
 3795 	/* LDV_COMMENT_END_PREP */
 3796 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "gem_get_sset_count" */
 3797 	int  var_gem_get_sset_count_64_p1;
 3798 	/* LDV_COMMENT_BEGIN_PREP */
 3799 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3800 	#endif
 3801 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3802 	#endif
 3803 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3804 	#endif
 3805 	#if defined(CONFIG_OF)
 3806 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3807 	#define AT91ETHER_MAX_RX_DESCR	9
 3808 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3809 	#endif
 3810 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3811 	#endif
 3812 	#endif 
 3813 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3814 	#endif
 3815 	/* LDV_COMMENT_END_PREP */
 3816 
 3817 	/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 3818 	/* content: static int macb_open(struct net_device *dev)*/
 3819 	/* LDV_COMMENT_BEGIN_PREP */
 3820 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3821 	#define MACB_RX_BUFFER_SIZE	128
 3822 	#define RX_BUFFER_MULTIPLE	64  
 3823 	#define RX_RING_SIZE		512 
 3824 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3825 	#define TX_RING_SIZE		128 
 3826 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3827 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3828 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3829 				 | MACB_BIT(ISR_ROVR))
 3830 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3831 					| MACB_BIT(ISR_RLE)		\
 3832 					| MACB_BIT(TXERR))
 3833 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3834 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3835 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3836 	#define GEM_MTU_MIN_SIZE	68
 3837 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3838 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3839 	#define MACB_HALT_TIMEOUT	1230
 3840 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3841 	#endif
 3842 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3843 	#endif
 3844 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3845 	#endif
 3846 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3847 	#endif
 3848 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3849 	#endif
 3850 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3851 	#endif
 3852 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3853 	#endif
 3854 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3855 	#endif
 3856 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3857 	#endif
 3858 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3859 	#endif
 3860 	/* LDV_COMMENT_END_PREP */
 3861 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "macb_open" */
 3862 	static int res_macb_open_58;
 3863 	/* LDV_COMMENT_BEGIN_PREP */
 3864 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3865 	#endif
 3866 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3867 	#endif
 3868 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3869 	#endif
 3870 	#if defined(CONFIG_OF)
 3871 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3872 	#define AT91ETHER_MAX_RX_DESCR	9
 3873 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3874 	#endif
 3875 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3876 	#endif
 3877 	#endif 
 3878 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3879 	#endif
 3880 	/* LDV_COMMENT_END_PREP */
 3881 	/* content: static int macb_close(struct net_device *dev)*/
 3882 	/* LDV_COMMENT_BEGIN_PREP */
 3883 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3884 	#define MACB_RX_BUFFER_SIZE	128
 3885 	#define RX_BUFFER_MULTIPLE	64  
 3886 	#define RX_RING_SIZE		512 
 3887 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3888 	#define TX_RING_SIZE		128 
 3889 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3890 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3891 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3892 				 | MACB_BIT(ISR_ROVR))
 3893 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3894 					| MACB_BIT(ISR_RLE)		\
 3895 					| MACB_BIT(TXERR))
 3896 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3897 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3898 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3899 	#define GEM_MTU_MIN_SIZE	68
 3900 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3901 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3902 	#define MACB_HALT_TIMEOUT	1230
 3903 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3904 	#endif
 3905 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3906 	#endif
 3907 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3908 	#endif
 3909 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3910 	#endif
 3911 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3912 	#endif
 3913 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3914 	#endif
 3915 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3916 	#endif
 3917 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3918 	#endif
 3919 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3920 	#endif
 3921 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3922 	#endif
 3923 	/* LDV_COMMENT_END_PREP */
 3924 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "macb_close" */
 3925 	static int res_macb_close_59;
 3926 	/* LDV_COMMENT_BEGIN_PREP */
 3927 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3928 	#endif
 3929 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3930 	#endif
 3931 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3932 	#endif
 3933 	#if defined(CONFIG_OF)
 3934 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3935 	#define AT91ETHER_MAX_RX_DESCR	9
 3936 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3937 	#endif
 3938 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3939 	#endif
 3940 	#endif 
 3941 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3942 	#endif
 3943 	/* LDV_COMMENT_END_PREP */
 3944 	/* content: static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)*/
 3945 	/* LDV_COMMENT_BEGIN_PREP */
 3946 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3947 	#define MACB_RX_BUFFER_SIZE	128
 3948 	#define RX_BUFFER_MULTIPLE	64  
 3949 	#define RX_RING_SIZE		512 
 3950 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3951 	#define TX_RING_SIZE		128 
 3952 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3953 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3954 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3955 				 | MACB_BIT(ISR_ROVR))
 3956 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3957 					| MACB_BIT(ISR_RLE)		\
 3958 					| MACB_BIT(TXERR))
 3959 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3960 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3961 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3962 	#define GEM_MTU_MIN_SIZE	68
 3963 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3964 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3965 	#define MACB_HALT_TIMEOUT	1230
 3966 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3967 	#endif
 3968 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3969 	#endif
 3970 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3971 	#endif
 3972 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3973 	#endif
 3974 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3975 	#endif
 3976 	/* LDV_COMMENT_END_PREP */
 3977 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_start_xmit" */
 3978 	struct sk_buff * var_group5;
 3979 	/* LDV_COMMENT_BEGIN_PREP */
 3980 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3981 	#endif
 3982 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3983 	#endif
 3984 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3985 	#endif
 3986 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3987 	#endif
 3988 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3989 	#endif
 3990 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3991 	#endif
 3992 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3993 	#endif
 3994 	#if defined(CONFIG_OF)
 3995 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3996 	#define AT91ETHER_MAX_RX_DESCR	9
 3997 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3998 	#endif
 3999 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4000 	#endif
 4001 	#endif 
 4002 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4003 	#endif
 4004 	/* LDV_COMMENT_END_PREP */
 4005 	/* content: static void macb_set_rx_mode(struct net_device *dev)*/
 4006 	/* LDV_COMMENT_BEGIN_PREP */
 4007 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4008 	#define MACB_RX_BUFFER_SIZE	128
 4009 	#define RX_BUFFER_MULTIPLE	64  
 4010 	#define RX_RING_SIZE		512 
 4011 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4012 	#define TX_RING_SIZE		128 
 4013 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4014 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4015 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4016 				 | MACB_BIT(ISR_ROVR))
 4017 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4018 					| MACB_BIT(ISR_RLE)		\
 4019 					| MACB_BIT(TXERR))
 4020 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4021 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4022 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4023 	#define GEM_MTU_MIN_SIZE	68
 4024 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4025 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4026 	#define MACB_HALT_TIMEOUT	1230
 4027 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4028 	#endif
 4029 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4030 	#endif
 4031 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4032 	#endif
 4033 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4034 	#endif
 4035 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4036 	#endif
 4037 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4038 	#endif
 4039 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4040 	#endif
 4041 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4042 	#endif
 4043 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4044 	#endif
 4045 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4046 	#endif
 4047 	/* LDV_COMMENT_END_PREP */
 4048 	/* LDV_COMMENT_BEGIN_PREP */
 4049 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4050 	#endif
 4051 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4052 	#endif
 4053 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4054 	#endif
 4055 	#if defined(CONFIG_OF)
 4056 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4057 	#define AT91ETHER_MAX_RX_DESCR	9
 4058 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4059 	#endif
 4060 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4061 	#endif
 4062 	#endif 
 4063 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4064 	#endif
 4065 	/* LDV_COMMENT_END_PREP */
 4066 	/* content: static struct net_device_stats *macb_get_stats(struct net_device *dev)*/
 4067 	/* LDV_COMMENT_BEGIN_PREP */
 4068 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4069 	#define MACB_RX_BUFFER_SIZE	128
 4070 	#define RX_BUFFER_MULTIPLE	64  
 4071 	#define RX_RING_SIZE		512 
 4072 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4073 	#define TX_RING_SIZE		128 
 4074 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4075 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4076 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4077 				 | MACB_BIT(ISR_ROVR))
 4078 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4079 					| MACB_BIT(ISR_RLE)		\
 4080 					| MACB_BIT(TXERR))
 4081 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4082 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4083 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4084 	#define GEM_MTU_MIN_SIZE	68
 4085 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4086 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4087 	#define MACB_HALT_TIMEOUT	1230
 4088 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4089 	#endif
 4090 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4091 	#endif
 4092 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4093 	#endif
 4094 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4095 	#endif
 4096 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4097 	#endif
 4098 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4099 	#endif
 4100 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4101 	#endif
 4102 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4103 	#endif
 4104 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4105 	#endif
 4106 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4107 	#endif
 4108 	/* LDV_COMMENT_END_PREP */
 4109 	/* LDV_COMMENT_BEGIN_PREP */
 4110 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4111 	#endif
 4112 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4113 	#endif
 4114 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4115 	#endif
 4116 	#if defined(CONFIG_OF)
 4117 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4118 	#define AT91ETHER_MAX_RX_DESCR	9
 4119 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4120 	#endif
 4121 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4122 	#endif
 4123 	#endif 
 4124 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4125 	#endif
 4126 	/* LDV_COMMENT_END_PREP */
 4127 	/* content: static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/
 4128 	/* LDV_COMMENT_BEGIN_PREP */
 4129 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4130 	#define MACB_RX_BUFFER_SIZE	128
 4131 	#define RX_BUFFER_MULTIPLE	64  
 4132 	#define RX_RING_SIZE		512 
 4133 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4134 	#define TX_RING_SIZE		128 
 4135 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4136 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4137 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4138 				 | MACB_BIT(ISR_ROVR))
 4139 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4140 					| MACB_BIT(ISR_RLE)		\
 4141 					| MACB_BIT(TXERR))
 4142 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4143 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4144 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4145 	#define GEM_MTU_MIN_SIZE	68
 4146 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4147 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4148 	#define MACB_HALT_TIMEOUT	1230
 4149 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4150 	#endif
 4151 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4152 	#endif
 4153 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4154 	#endif
 4155 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4156 	#endif
 4157 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4158 	#endif
 4159 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4160 	#endif
 4161 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4162 	#endif
 4163 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4164 	#endif
 4165 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4166 	#endif
 4167 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4168 	#endif
 4169 	/* LDV_COMMENT_END_PREP */
 4170 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_ioctl" */
 4171 	struct ifreq * var_group6;
 4172 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_ioctl" */
 4173 	int  var_macb_ioctl_71_p2;
 4174 	/* LDV_COMMENT_BEGIN_PREP */
 4175 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4176 	#endif
 4177 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4178 	#endif
 4179 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4180 	#endif
 4181 	#if defined(CONFIG_OF)
 4182 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4183 	#define AT91ETHER_MAX_RX_DESCR	9
 4184 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4185 	#endif
 4186 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4187 	#endif
 4188 	#endif 
 4189 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4190 	#endif
 4191 	/* LDV_COMMENT_END_PREP */
 4192 	/* content: static int macb_change_mtu(struct net_device *dev, int new_mtu)*/
 4193 	/* LDV_COMMENT_BEGIN_PREP */
 4194 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4195 	#define MACB_RX_BUFFER_SIZE	128
 4196 	#define RX_BUFFER_MULTIPLE	64  
 4197 	#define RX_RING_SIZE		512 
 4198 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4199 	#define TX_RING_SIZE		128 
 4200 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4201 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4202 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4203 				 | MACB_BIT(ISR_ROVR))
 4204 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4205 					| MACB_BIT(ISR_RLE)		\
 4206 					| MACB_BIT(TXERR))
 4207 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4208 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4209 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4210 	#define GEM_MTU_MIN_SIZE	68
 4211 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4212 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4213 	#define MACB_HALT_TIMEOUT	1230
 4214 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4215 	#endif
 4216 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4217 	#endif
 4218 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4219 	#endif
 4220 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4221 	#endif
 4222 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4223 	#endif
 4224 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4225 	#endif
 4226 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4227 	#endif
 4228 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4229 	#endif
 4230 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4231 	#endif
 4232 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4233 	#endif
 4234 	/* LDV_COMMENT_END_PREP */
 4235 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_change_mtu" */
 4236 	int  var_macb_change_mtu_60_p1;
 4237 	/* LDV_COMMENT_BEGIN_PREP */
 4238 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4239 	#endif
 4240 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4241 	#endif
 4242 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4243 	#endif
 4244 	#if defined(CONFIG_OF)
 4245 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4246 	#define AT91ETHER_MAX_RX_DESCR	9
 4247 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4248 	#endif
 4249 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4250 	#endif
 4251 	#endif 
 4252 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4253 	#endif
 4254 	/* LDV_COMMENT_END_PREP */
 4255 	/* content: static void macb_poll_controller(struct net_device *dev)*/
 4256 	/* LDV_COMMENT_BEGIN_PREP */
 4257 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4258 	#define MACB_RX_BUFFER_SIZE	128
 4259 	#define RX_BUFFER_MULTIPLE	64  
 4260 	#define RX_RING_SIZE		512 
 4261 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4262 	#define TX_RING_SIZE		128 
 4263 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4264 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4265 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4266 				 | MACB_BIT(ISR_ROVR))
 4267 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4268 					| MACB_BIT(ISR_RLE)		\
 4269 					| MACB_BIT(TXERR))
 4270 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4271 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4272 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4273 	#define GEM_MTU_MIN_SIZE	68
 4274 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4275 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4276 	#define MACB_HALT_TIMEOUT	1230
 4277 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4278 	#endif
 4279 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4280 	#endif
 4281 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4282 	#endif
 4283 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4284 	#endif
 4285 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4286 	/* LDV_COMMENT_END_PREP */
 4287 	/* LDV_COMMENT_BEGIN_PREP */
 4288 	#endif
 4289 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4290 	#endif
 4291 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4292 	#endif
 4293 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4294 	#endif
 4295 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4296 	#endif
 4297 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4298 	#endif
 4299 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4300 	#endif
 4301 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4302 	#endif
 4303 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4304 	#endif
 4305 	#if defined(CONFIG_OF)
 4306 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4307 	#define AT91ETHER_MAX_RX_DESCR	9
 4308 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4309 	#endif
 4310 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4311 	#endif
 4312 	#endif 
 4313 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4314 	#endif
 4315 	/* LDV_COMMENT_END_PREP */
 4316 	/* content: static int macb_set_features(struct net_device *netdev, netdev_features_t features)*/
 4317 	/* LDV_COMMENT_BEGIN_PREP */
 4318 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4319 	#define MACB_RX_BUFFER_SIZE	128
 4320 	#define RX_BUFFER_MULTIPLE	64  
 4321 	#define RX_RING_SIZE		512 
 4322 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4323 	#define TX_RING_SIZE		128 
 4324 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4325 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4326 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4327 				 | MACB_BIT(ISR_ROVR))
 4328 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4329 					| MACB_BIT(ISR_RLE)		\
 4330 					| MACB_BIT(TXERR))
 4331 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4332 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4333 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4334 	#define GEM_MTU_MIN_SIZE	68
 4335 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4336 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4337 	#define MACB_HALT_TIMEOUT	1230
 4338 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4339 	#endif
 4340 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4341 	#endif
 4342 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4343 	#endif
 4344 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4345 	#endif
 4346 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4347 	#endif
 4348 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4349 	#endif
 4350 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4351 	#endif
 4352 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4353 	#endif
 4354 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4355 	#endif
 4356 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4357 	#endif
 4358 	/* LDV_COMMENT_END_PREP */
 4359 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_set_features" */
 4360 	netdev_features_t  var_macb_set_features_72_p1;
 4361 	/* LDV_COMMENT_BEGIN_PREP */
 4362 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4363 	#endif
 4364 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4365 	#endif
 4366 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4367 	#endif
 4368 	#if defined(CONFIG_OF)
 4369 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4370 	#define AT91ETHER_MAX_RX_DESCR	9
 4371 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4372 	#endif
 4373 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4374 	#endif
 4375 	#endif 
 4376 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4377 	#endif
 4378 	/* LDV_COMMENT_END_PREP */
 4379 
 4380 	/** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/
 4381 	/* content: static int at91ether_open(struct net_device *dev)*/
 4382 	/* LDV_COMMENT_BEGIN_PREP */
 4383 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4384 	#define MACB_RX_BUFFER_SIZE	128
 4385 	#define RX_BUFFER_MULTIPLE	64  
 4386 	#define RX_RING_SIZE		512 
 4387 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4388 	#define TX_RING_SIZE		128 
 4389 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4390 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4391 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4392 				 | MACB_BIT(ISR_ROVR))
 4393 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4394 					| MACB_BIT(ISR_RLE)		\
 4395 					| MACB_BIT(TXERR))
 4396 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4397 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4398 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4399 	#define GEM_MTU_MIN_SIZE	68
 4400 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4401 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4402 	#define MACB_HALT_TIMEOUT	1230
 4403 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4404 	#endif
 4405 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4406 	#endif
 4407 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4408 	#endif
 4409 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4410 	#endif
 4411 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4412 	#endif
 4413 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4414 	#endif
 4415 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4416 	#endif
 4417 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4418 	#endif
 4419 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4420 	#endif
 4421 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4422 	#endif
 4423 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4424 	#endif
 4425 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4426 	#endif
 4427 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4428 	#endif
 4429 	#if defined(CONFIG_OF)
 4430 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4431 	#define AT91ETHER_MAX_RX_DESCR	9
 4432 	/* LDV_COMMENT_END_PREP */
 4433 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "at91ether_open" */
 4434 	static int res_at91ether_open_78;
 4435 	/* LDV_COMMENT_BEGIN_PREP */
 4436 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4437 	#endif
 4438 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4439 	#endif
 4440 	#endif 
 4441 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4442 	#endif
 4443 	/* LDV_COMMENT_END_PREP */
 4444 	/* content: static int at91ether_close(struct net_device *dev)*/
 4445 	/* LDV_COMMENT_BEGIN_PREP */
 4446 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4447 	#define MACB_RX_BUFFER_SIZE	128
 4448 	#define RX_BUFFER_MULTIPLE	64  
 4449 	#define RX_RING_SIZE		512 
 4450 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4451 	#define TX_RING_SIZE		128 
 4452 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4453 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4454 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4455 				 | MACB_BIT(ISR_ROVR))
 4456 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4457 					| MACB_BIT(ISR_RLE)		\
 4458 					| MACB_BIT(TXERR))
 4459 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4460 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4461 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4462 	#define GEM_MTU_MIN_SIZE	68
 4463 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4464 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4465 	#define MACB_HALT_TIMEOUT	1230
 4466 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4467 	#endif
 4468 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4469 	#endif
 4470 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4471 	#endif
 4472 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4473 	#endif
 4474 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4475 	#endif
 4476 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4477 	#endif
 4478 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4479 	#endif
 4480 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4481 	#endif
 4482 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4483 	#endif
 4484 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4485 	#endif
 4486 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4487 	#endif
 4488 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4489 	#endif
 4490 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4491 	#endif
 4492 	#if defined(CONFIG_OF)
 4493 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4494 	#define AT91ETHER_MAX_RX_DESCR	9
 4495 	/* LDV_COMMENT_END_PREP */
 4496 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "at91ether_close" */
 4497 	static int res_at91ether_close_79;
 4498 	/* LDV_COMMENT_BEGIN_PREP */
 4499 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4500 	#endif
 4501 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4502 	#endif
 4503 	#endif 
 4504 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4505 	#endif
 4506 	/* LDV_COMMENT_END_PREP */
 4507 	/* content: static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)*/
 4508 	/* LDV_COMMENT_BEGIN_PREP */
 4509 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4510 	#define MACB_RX_BUFFER_SIZE	128
 4511 	#define RX_BUFFER_MULTIPLE	64  
 4512 	#define RX_RING_SIZE		512 
 4513 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4514 	#define TX_RING_SIZE		128 
 4515 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4516 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4517 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4518 				 | MACB_BIT(ISR_ROVR))
 4519 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4520 					| MACB_BIT(ISR_RLE)		\
 4521 					| MACB_BIT(TXERR))
 4522 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4523 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4524 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4525 	#define GEM_MTU_MIN_SIZE	68
 4526 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4527 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4528 	#define MACB_HALT_TIMEOUT	1230
 4529 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4530 	#endif
 4531 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4532 	#endif
 4533 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4534 	#endif
 4535 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4536 	#endif
 4537 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4538 	#endif
 4539 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4540 	#endif
 4541 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4542 	#endif
 4543 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4544 	#endif
 4545 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4546 	#endif
 4547 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4548 	#endif
 4549 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4550 	#endif
 4551 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4552 	#endif
 4553 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4554 	#endif
 4555 	#if defined(CONFIG_OF)
 4556 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4557 	#define AT91ETHER_MAX_RX_DESCR	9
 4558 	/* LDV_COMMENT_END_PREP */
 4559 	/* LDV_COMMENT_BEGIN_PREP */
 4560 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4561 	#endif
 4562 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4563 	#endif
 4564 	#endif 
 4565 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4566 	#endif
 4567 	/* LDV_COMMENT_END_PREP */
 4568 	/* content: static struct net_device_stats *macb_get_stats(struct net_device *dev)*/
 4569 	/* LDV_COMMENT_BEGIN_PREP */
 4570 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4571 	#define MACB_RX_BUFFER_SIZE	128
 4572 	#define RX_BUFFER_MULTIPLE	64  
 4573 	#define RX_RING_SIZE		512 
 4574 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4575 	#define TX_RING_SIZE		128 
 4576 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4577 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4578 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4579 				 | MACB_BIT(ISR_ROVR))
 4580 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4581 					| MACB_BIT(ISR_RLE)		\
 4582 					| MACB_BIT(TXERR))
 4583 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4584 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4585 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4586 	#define GEM_MTU_MIN_SIZE	68
 4587 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4588 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4589 	#define MACB_HALT_TIMEOUT	1230
 4590 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4591 	#endif
 4592 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4593 	#endif
 4594 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4595 	#endif
 4596 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4597 	#endif
 4598 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4599 	#endif
 4600 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4601 	#endif
 4602 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4603 	#endif
 4604 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4605 	#endif
 4606 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4607 	#endif
 4608 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4609 	#endif
 4610 	/* LDV_COMMENT_END_PREP */
 4611 	/* LDV_COMMENT_BEGIN_PREP */
 4612 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4613 	#endif
 4614 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4615 	#endif
 4616 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4617 	#endif
 4618 	#if defined(CONFIG_OF)
 4619 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4620 	#define AT91ETHER_MAX_RX_DESCR	9
 4621 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4622 	#endif
 4623 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4624 	#endif
 4625 	#endif 
 4626 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4627 	#endif
 4628 	/* LDV_COMMENT_END_PREP */
 4629 	/* content: static void macb_set_rx_mode(struct net_device *dev)*/
 4630 	/* LDV_COMMENT_BEGIN_PREP */
 4631 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4632 	#define MACB_RX_BUFFER_SIZE	128
 4633 	#define RX_BUFFER_MULTIPLE	64  
 4634 	#define RX_RING_SIZE		512 
 4635 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4636 	#define TX_RING_SIZE		128 
 4637 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4638 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4639 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4640 				 | MACB_BIT(ISR_ROVR))
 4641 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4642 					| MACB_BIT(ISR_RLE)		\
 4643 					| MACB_BIT(TXERR))
 4644 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4645 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4646 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4647 	#define GEM_MTU_MIN_SIZE	68
 4648 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4649 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4650 	#define MACB_HALT_TIMEOUT	1230
 4651 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4652 	#endif
 4653 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4654 	#endif
 4655 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4656 	#endif
 4657 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4658 	#endif
 4659 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4660 	#endif
 4661 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4662 	#endif
 4663 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4664 	#endif
 4665 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4666 	#endif
 4667 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4668 	#endif
 4669 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4670 	#endif
 4671 	/* LDV_COMMENT_END_PREP */
 4672 	/* LDV_COMMENT_BEGIN_PREP */
 4673 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4674 	#endif
 4675 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4676 	#endif
 4677 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4678 	#endif
 4679 	#if defined(CONFIG_OF)
 4680 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4681 	#define AT91ETHER_MAX_RX_DESCR	9
 4682 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4683 	#endif
 4684 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4685 	#endif
 4686 	#endif 
 4687 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4688 	#endif
 4689 	/* LDV_COMMENT_END_PREP */
 4690 	/* content: static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/
 4691 	/* LDV_COMMENT_BEGIN_PREP */
 4692 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4693 	#define MACB_RX_BUFFER_SIZE	128
 4694 	#define RX_BUFFER_MULTIPLE	64  
 4695 	#define RX_RING_SIZE		512 
 4696 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4697 	#define TX_RING_SIZE		128 
 4698 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4699 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4700 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4701 				 | MACB_BIT(ISR_ROVR))
 4702 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4703 					| MACB_BIT(ISR_RLE)		\
 4704 					| MACB_BIT(TXERR))
 4705 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4706 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4707 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4708 	#define GEM_MTU_MIN_SIZE	68
 4709 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4710 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4711 	#define MACB_HALT_TIMEOUT	1230
 4712 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4713 	#endif
 4714 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4715 	#endif
 4716 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4717 	#endif
 4718 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4719 	#endif
 4720 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4721 	#endif
 4722 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4723 	#endif
 4724 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4725 	#endif
 4726 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4727 	#endif
 4728 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4729 	#endif
 4730 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4731 	#endif
 4732 	/* LDV_COMMENT_END_PREP */
 4733 	/* LDV_COMMENT_BEGIN_PREP */
 4734 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4735 	#endif
 4736 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4737 	#endif
 4738 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4739 	#endif
 4740 	#if defined(CONFIG_OF)
 4741 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4742 	#define AT91ETHER_MAX_RX_DESCR	9
 4743 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4744 	#endif
 4745 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4746 	#endif
 4747 	#endif 
 4748 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4749 	#endif
 4750 	/* LDV_COMMENT_END_PREP */
 4751 	/* content: static void at91ether_poll_controller(struct net_device *dev)*/
 4752 	/* LDV_COMMENT_BEGIN_PREP */
 4753 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4754 	#define MACB_RX_BUFFER_SIZE	128
 4755 	#define RX_BUFFER_MULTIPLE	64  
 4756 	#define RX_RING_SIZE		512 
 4757 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4758 	#define TX_RING_SIZE		128 
 4759 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4760 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4761 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4762 				 | MACB_BIT(ISR_ROVR))
 4763 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4764 					| MACB_BIT(ISR_RLE)		\
 4765 					| MACB_BIT(TXERR))
 4766 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4767 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4768 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4769 	#define GEM_MTU_MIN_SIZE	68
 4770 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4771 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4772 	#define MACB_HALT_TIMEOUT	1230
 4773 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4774 	#endif
 4775 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4776 	#endif
 4777 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4778 	#endif
 4779 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4780 	#endif
 4781 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4782 	#endif
 4783 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4784 	#endif
 4785 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4786 	#endif
 4787 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4788 	#endif
 4789 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4790 	#endif
 4791 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4792 	#endif
 4793 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4794 	#endif
 4795 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4796 	#endif
 4797 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4798 	#endif
 4799 	#if defined(CONFIG_OF)
 4800 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4801 	#define AT91ETHER_MAX_RX_DESCR	9
 4802 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4803 	/* LDV_COMMENT_END_PREP */
 4804 	/* LDV_COMMENT_BEGIN_PREP */
 4805 	#endif
 4806 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4807 	#endif
 4808 	#endif 
 4809 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4810 	#endif
 4811 	/* LDV_COMMENT_END_PREP */
 4812 
 4813 	/** STRUCT: struct type: macb_config, struct name: at91sam9260_config **/
 4814 	/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 4815 	/* LDV_COMMENT_BEGIN_PREP */
 4816 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4817 	#define MACB_RX_BUFFER_SIZE	128
 4818 	#define RX_BUFFER_MULTIPLE	64  
 4819 	#define RX_RING_SIZE		512 
 4820 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4821 	#define TX_RING_SIZE		128 
 4822 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4823 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4824 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4825 				 | MACB_BIT(ISR_ROVR))
 4826 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4827 					| MACB_BIT(ISR_RLE)		\
 4828 					| MACB_BIT(TXERR))
 4829 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4830 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4831 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4832 	#define GEM_MTU_MIN_SIZE	68
 4833 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4834 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4835 	#define MACB_HALT_TIMEOUT	1230
 4836 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4837 	#endif
 4838 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4839 	#endif
 4840 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4841 	#endif
 4842 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4843 	#endif
 4844 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4845 	#endif
 4846 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4847 	#endif
 4848 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4849 	#endif
 4850 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4851 	#endif
 4852 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4853 	#endif
 4854 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4855 	#endif
 4856 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4857 	#endif
 4858 	/* LDV_COMMENT_END_PREP */
 4859 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_clk_init" */
 4860 	struct platform_device * var_group7;
 4861 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_clk_init" */
 4862 	struct clk ** var_group8;
 4863 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_clk_init" */
 4864 	struct clk ** var_macb_clk_init_75_p2;
 4865 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_clk_init" */
 4866 	struct clk ** var_macb_clk_init_75_p3;
 4867 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_clk_init" */
 4868 	struct clk ** var_macb_clk_init_75_p4;
 4869 	/* LDV_COMMENT_BEGIN_PREP */
 4870 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4871 	#endif
 4872 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4873 	#endif
 4874 	#if defined(CONFIG_OF)
 4875 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4876 	#define AT91ETHER_MAX_RX_DESCR	9
 4877 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4878 	#endif
 4879 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4880 	#endif
 4881 	#endif 
 4882 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4883 	#endif
 4884 	/* LDV_COMMENT_END_PREP */
 4885 	/* content: static int macb_init(struct platform_device *pdev)*/
 4886 	/* LDV_COMMENT_BEGIN_PREP */
 4887 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4888 	#define MACB_RX_BUFFER_SIZE	128
 4889 	#define RX_BUFFER_MULTIPLE	64  
 4890 	#define RX_RING_SIZE		512 
 4891 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4892 	#define TX_RING_SIZE		128 
 4893 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4894 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4895 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4896 				 | MACB_BIT(ISR_ROVR))
 4897 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4898 					| MACB_BIT(ISR_RLE)		\
 4899 					| MACB_BIT(TXERR))
 4900 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4901 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4902 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4903 	#define GEM_MTU_MIN_SIZE	68
 4904 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4905 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4906 	#define MACB_HALT_TIMEOUT	1230
 4907 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4908 	#endif
 4909 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4910 	#endif
 4911 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4912 	#endif
 4913 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4914 	#endif
 4915 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4916 	#endif
 4917 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4918 	#endif
 4919 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4920 	#endif
 4921 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4922 	#endif
 4923 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4924 	#endif
 4925 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4926 	#endif
 4927 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4928 	#endif
 4929 	/* LDV_COMMENT_END_PREP */
 4930 	/* LDV_COMMENT_BEGIN_PREP */
 4931 	#if defined(CONFIG_OF)
 4932 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4933 	#define AT91ETHER_MAX_RX_DESCR	9
 4934 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4935 	#endif
 4936 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4937 	#endif
 4938 	#endif 
 4939 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4940 	#endif
 4941 	/* LDV_COMMENT_END_PREP */
 4942 
 4943 	/** STRUCT: struct type: macb_config, struct name: pc302gem_config **/
 4944 	/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 4945 	/* LDV_COMMENT_BEGIN_PREP */
 4946 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4947 	#define MACB_RX_BUFFER_SIZE	128
 4948 	#define RX_BUFFER_MULTIPLE	64  
 4949 	#define RX_RING_SIZE		512 
 4950 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4951 	#define TX_RING_SIZE		128 
 4952 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4953 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4954 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4955 				 | MACB_BIT(ISR_ROVR))
 4956 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4957 					| MACB_BIT(ISR_RLE)		\
 4958 					| MACB_BIT(TXERR))
 4959 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4960 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4961 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4962 	#define GEM_MTU_MIN_SIZE	68
 4963 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4964 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4965 	#define MACB_HALT_TIMEOUT	1230
 4966 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4967 	#endif
 4968 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4969 	#endif
 4970 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4971 	#endif
 4972 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4973 	#endif
 4974 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4975 	#endif
 4976 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4977 	#endif
 4978 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4979 	#endif
 4980 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4981 	#endif
 4982 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4983 	#endif
 4984 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4985 	#endif
 4986 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4987 	#endif
 4988 	/* LDV_COMMENT_END_PREP */
 4989 	/* LDV_COMMENT_BEGIN_PREP */
 4990 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4991 	#endif
 4992 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4993 	#endif
 4994 	#if defined(CONFIG_OF)
 4995 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4996 	#define AT91ETHER_MAX_RX_DESCR	9
 4997 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4998 	#endif
 4999 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5000 	#endif
 5001 	#endif 
 5002 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5003 	#endif
 5004 	/* LDV_COMMENT_END_PREP */
 5005 	/* content: static int macb_init(struct platform_device *pdev)*/
 5006 	/* LDV_COMMENT_BEGIN_PREP */
 5007 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5008 	#define MACB_RX_BUFFER_SIZE	128
 5009 	#define RX_BUFFER_MULTIPLE	64  
 5010 	#define RX_RING_SIZE		512 
 5011 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5012 	#define TX_RING_SIZE		128 
 5013 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5014 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5015 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5016 				 | MACB_BIT(ISR_ROVR))
 5017 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5018 					| MACB_BIT(ISR_RLE)		\
 5019 					| MACB_BIT(TXERR))
 5020 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5021 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5022 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5023 	#define GEM_MTU_MIN_SIZE	68
 5024 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5025 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5026 	#define MACB_HALT_TIMEOUT	1230
 5027 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5028 	#endif
 5029 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5030 	#endif
 5031 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5032 	#endif
 5033 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5034 	#endif
 5035 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5036 	#endif
 5037 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5038 	#endif
 5039 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5040 	#endif
 5041 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5042 	#endif
 5043 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5044 	#endif
 5045 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5046 	#endif
 5047 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5048 	#endif
 5049 	/* LDV_COMMENT_END_PREP */
 5050 	/* LDV_COMMENT_BEGIN_PREP */
 5051 	#if defined(CONFIG_OF)
 5052 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5053 	#define AT91ETHER_MAX_RX_DESCR	9
 5054 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5055 	#endif
 5056 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5057 	#endif
 5058 	#endif 
 5059 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5060 	#endif
 5061 	/* LDV_COMMENT_END_PREP */
 5062 
 5063 	/** STRUCT: struct type: macb_config, struct name: sama5d2_config **/
 5064 	/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 5065 	/* LDV_COMMENT_BEGIN_PREP */
 5066 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5067 	#define MACB_RX_BUFFER_SIZE	128
 5068 	#define RX_BUFFER_MULTIPLE	64  
 5069 	#define RX_RING_SIZE		512 
 5070 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5071 	#define TX_RING_SIZE		128 
 5072 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5073 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5074 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5075 				 | MACB_BIT(ISR_ROVR))
 5076 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5077 					| MACB_BIT(ISR_RLE)		\
 5078 					| MACB_BIT(TXERR))
 5079 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5080 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5081 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5082 	#define GEM_MTU_MIN_SIZE	68
 5083 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5084 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5085 	#define MACB_HALT_TIMEOUT	1230
 5086 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5087 	#endif
 5088 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5089 	#endif
 5090 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5091 	#endif
 5092 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5093 	#endif
 5094 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5095 	#endif
 5096 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5097 	#endif
 5098 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5099 	#endif
 5100 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5101 	#endif
 5102 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5103 	#endif
 5104 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5105 	#endif
 5106 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5107 	#endif
 5108 	/* LDV_COMMENT_END_PREP */
 5109 	/* LDV_COMMENT_BEGIN_PREP */
 5110 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5111 	#endif
 5112 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5113 	#endif
 5114 	#if defined(CONFIG_OF)
 5115 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5116 	#define AT91ETHER_MAX_RX_DESCR	9
 5117 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5118 	#endif
 5119 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5120 	#endif
 5121 	#endif 
 5122 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5123 	#endif
 5124 	/* LDV_COMMENT_END_PREP */
 5125 	/* content: static int macb_init(struct platform_device *pdev)*/
 5126 	/* LDV_COMMENT_BEGIN_PREP */
 5127 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5128 	#define MACB_RX_BUFFER_SIZE	128
 5129 	#define RX_BUFFER_MULTIPLE	64  
 5130 	#define RX_RING_SIZE		512 
 5131 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5132 	#define TX_RING_SIZE		128 
 5133 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5134 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5135 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5136 				 | MACB_BIT(ISR_ROVR))
 5137 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5138 					| MACB_BIT(ISR_RLE)		\
 5139 					| MACB_BIT(TXERR))
 5140 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5141 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5142 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5143 	#define GEM_MTU_MIN_SIZE	68
 5144 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5145 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5146 	#define MACB_HALT_TIMEOUT	1230
 5147 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5148 	#endif
 5149 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5150 	#endif
 5151 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5152 	#endif
 5153 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5154 	#endif
 5155 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5156 	#endif
 5157 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5158 	#endif
 5159 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5160 	#endif
 5161 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5162 	#endif
 5163 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5164 	#endif
 5165 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5166 	#endif
 5167 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5168 	#endif
 5169 	/* LDV_COMMENT_END_PREP */
 5170 	/* LDV_COMMENT_BEGIN_PREP */
 5171 	#if defined(CONFIG_OF)
 5172 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5173 	#define AT91ETHER_MAX_RX_DESCR	9
 5174 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5175 	#endif
 5176 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5177 	#endif
 5178 	#endif 
 5179 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5180 	#endif
 5181 	/* LDV_COMMENT_END_PREP */
 5182 
 5183 	/** STRUCT: struct type: macb_config, struct name: sama5d3_config **/
 5184 	/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 5185 	/* LDV_COMMENT_BEGIN_PREP */
 5186 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5187 	#define MACB_RX_BUFFER_SIZE	128
 5188 	#define RX_BUFFER_MULTIPLE	64  
 5189 	#define RX_RING_SIZE		512 
 5190 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5191 	#define TX_RING_SIZE		128 
 5192 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5193 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5194 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5195 				 | MACB_BIT(ISR_ROVR))
 5196 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5197 					| MACB_BIT(ISR_RLE)		\
 5198 					| MACB_BIT(TXERR))
 5199 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5200 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5201 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5202 	#define GEM_MTU_MIN_SIZE	68
 5203 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5204 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5205 	#define MACB_HALT_TIMEOUT	1230
 5206 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5207 	#endif
 5208 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5209 	#endif
 5210 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5211 	#endif
 5212 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5213 	#endif
 5214 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5215 	#endif
 5216 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5217 	#endif
 5218 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5219 	#endif
 5220 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5221 	#endif
 5222 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5223 	#endif
 5224 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5225 	#endif
 5226 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5227 	#endif
 5228 	/* LDV_COMMENT_END_PREP */
 5229 	/* LDV_COMMENT_BEGIN_PREP */
 5230 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5231 	#endif
 5232 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5233 	#endif
 5234 	#if defined(CONFIG_OF)
 5235 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5236 	#define AT91ETHER_MAX_RX_DESCR	9
 5237 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5238 	#endif
 5239 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5240 	#endif
 5241 	#endif 
 5242 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5243 	#endif
 5244 	/* LDV_COMMENT_END_PREP */
 5245 	/* content: static int macb_init(struct platform_device *pdev)*/
 5246 	/* LDV_COMMENT_BEGIN_PREP */
 5247 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5248 	#define MACB_RX_BUFFER_SIZE	128
 5249 	#define RX_BUFFER_MULTIPLE	64  
 5250 	#define RX_RING_SIZE		512 
 5251 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5252 	#define TX_RING_SIZE		128 
 5253 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5254 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5255 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5256 				 | MACB_BIT(ISR_ROVR))
 5257 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5258 					| MACB_BIT(ISR_RLE)		\
 5259 					| MACB_BIT(TXERR))
 5260 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5261 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5262 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5263 	#define GEM_MTU_MIN_SIZE	68
 5264 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5265 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5266 	#define MACB_HALT_TIMEOUT	1230
 5267 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5268 	#endif
 5269 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5270 	#endif
 5271 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5272 	#endif
 5273 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5274 	#endif
 5275 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5276 	#endif
 5277 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5278 	#endif
 5279 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5280 	#endif
 5281 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5282 	#endif
 5283 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5284 	#endif
 5285 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5286 	#endif
 5287 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5288 	#endif
 5289 	/* LDV_COMMENT_END_PREP */
 5290 	/* LDV_COMMENT_BEGIN_PREP */
 5291 	#if defined(CONFIG_OF)
 5292 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5293 	#define AT91ETHER_MAX_RX_DESCR	9
 5294 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5295 	#endif
 5296 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5297 	#endif
 5298 	#endif 
 5299 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5300 	#endif
 5301 	/* LDV_COMMENT_END_PREP */
 5302 
 5303 	/** STRUCT: struct type: macb_config, struct name: sama5d4_config **/
 5304 	/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 5305 	/* LDV_COMMENT_BEGIN_PREP */
 5306 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5307 	#define MACB_RX_BUFFER_SIZE	128
 5308 	#define RX_BUFFER_MULTIPLE	64  
 5309 	#define RX_RING_SIZE		512 
 5310 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5311 	#define TX_RING_SIZE		128 
 5312 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5313 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5314 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5315 				 | MACB_BIT(ISR_ROVR))
 5316 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5317 					| MACB_BIT(ISR_RLE)		\
 5318 					| MACB_BIT(TXERR))
 5319 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5320 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5321 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5322 	#define GEM_MTU_MIN_SIZE	68
 5323 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5324 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5325 	#define MACB_HALT_TIMEOUT	1230
 5326 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5327 	#endif
 5328 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5329 	#endif
 5330 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5331 	#endif
 5332 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5333 	#endif
 5334 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5335 	#endif
 5336 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5337 	#endif
 5338 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5339 	#endif
 5340 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5341 	#endif
 5342 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5343 	#endif
 5344 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5345 	#endif
 5346 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5347 	#endif
 5348 	/* LDV_COMMENT_END_PREP */
 5349 	/* LDV_COMMENT_BEGIN_PREP */
 5350 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5351 	#endif
 5352 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5353 	#endif
 5354 	#if defined(CONFIG_OF)
 5355 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5356 	#define AT91ETHER_MAX_RX_DESCR	9
 5357 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5358 	#endif
 5359 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5360 	#endif
 5361 	#endif 
 5362 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5363 	#endif
 5364 	/* LDV_COMMENT_END_PREP */
 5365 	/* content: static int macb_init(struct platform_device *pdev)*/
 5366 	/* LDV_COMMENT_BEGIN_PREP */
 5367 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5368 	#define MACB_RX_BUFFER_SIZE	128
 5369 	#define RX_BUFFER_MULTIPLE	64  
 5370 	#define RX_RING_SIZE		512 
 5371 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5372 	#define TX_RING_SIZE		128 
 5373 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5374 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5375 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5376 				 | MACB_BIT(ISR_ROVR))
 5377 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5378 					| MACB_BIT(ISR_RLE)		\
 5379 					| MACB_BIT(TXERR))
 5380 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5381 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5382 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5383 	#define GEM_MTU_MIN_SIZE	68
 5384 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5385 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5386 	#define MACB_HALT_TIMEOUT	1230
 5387 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5388 	#endif
 5389 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5390 	#endif
 5391 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5392 	#endif
 5393 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5394 	#endif
 5395 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5396 	#endif
 5397 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5398 	#endif
 5399 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5400 	#endif
 5401 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5402 	#endif
 5403 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5404 	#endif
 5405 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5406 	#endif
 5407 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5408 	#endif
 5409 	/* LDV_COMMENT_END_PREP */
 5410 	/* LDV_COMMENT_BEGIN_PREP */
 5411 	#if defined(CONFIG_OF)
 5412 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5413 	#define AT91ETHER_MAX_RX_DESCR	9
 5414 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5415 	#endif
 5416 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5417 	#endif
 5418 	#endif 
 5419 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5420 	#endif
 5421 	/* LDV_COMMENT_END_PREP */
 5422 
 5423 	/** STRUCT: struct type: macb_config, struct name: emac_config **/
 5424 	/* content: static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 5425 	/* LDV_COMMENT_BEGIN_PREP */
 5426 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5427 	#define MACB_RX_BUFFER_SIZE	128
 5428 	#define RX_BUFFER_MULTIPLE	64  
 5429 	#define RX_RING_SIZE		512 
 5430 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5431 	#define TX_RING_SIZE		128 
 5432 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5433 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5434 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5435 				 | MACB_BIT(ISR_ROVR))
 5436 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5437 					| MACB_BIT(ISR_RLE)		\
 5438 					| MACB_BIT(TXERR))
 5439 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5440 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5441 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5442 	#define GEM_MTU_MIN_SIZE	68
 5443 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5444 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5445 	#define MACB_HALT_TIMEOUT	1230
 5446 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5447 	#endif
 5448 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5449 	#endif
 5450 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5451 	#endif
 5452 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5453 	#endif
 5454 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5455 	#endif
 5456 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5457 	#endif
 5458 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5459 	#endif
 5460 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5461 	#endif
 5462 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5463 	#endif
 5464 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5465 	#endif
 5466 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5467 	#endif
 5468 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5469 	#endif
 5470 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5471 	#endif
 5472 	#if defined(CONFIG_OF)
 5473 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5474 	#define AT91ETHER_MAX_RX_DESCR	9
 5475 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5476 	#endif
 5477 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5478 	#endif
 5479 	/* LDV_COMMENT_END_PREP */
 5480 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "at91ether_clk_init" */
 5481 	struct clk ** var_at91ether_clk_init_84_p2;
 5482 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "at91ether_clk_init" */
 5483 	struct clk ** var_at91ether_clk_init_84_p3;
 5484 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "at91ether_clk_init" */
 5485 	struct clk ** var_at91ether_clk_init_84_p4;
 5486 	/* LDV_COMMENT_BEGIN_PREP */
 5487 	#endif 
 5488 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5489 	#endif
 5490 	/* LDV_COMMENT_END_PREP */
 5491 	/* content: static int at91ether_init(struct platform_device *pdev)*/
 5492 	/* LDV_COMMENT_BEGIN_PREP */
 5493 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5494 	#define MACB_RX_BUFFER_SIZE	128
 5495 	#define RX_BUFFER_MULTIPLE	64  
 5496 	#define RX_RING_SIZE		512 
 5497 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5498 	#define TX_RING_SIZE		128 
 5499 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5500 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5501 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5502 				 | MACB_BIT(ISR_ROVR))
 5503 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5504 					| MACB_BIT(ISR_RLE)		\
 5505 					| MACB_BIT(TXERR))
 5506 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5507 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5508 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5509 	#define GEM_MTU_MIN_SIZE	68
 5510 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5511 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5512 	#define MACB_HALT_TIMEOUT	1230
 5513 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5514 	#endif
 5515 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5516 	#endif
 5517 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5518 	#endif
 5519 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5520 	#endif
 5521 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5522 	#endif
 5523 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5524 	#endif
 5525 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5526 	#endif
 5527 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5528 	#endif
 5529 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5530 	#endif
 5531 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5532 	#endif
 5533 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5534 	#endif
 5535 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5536 	#endif
 5537 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5538 	#endif
 5539 	#if defined(CONFIG_OF)
 5540 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5541 	#define AT91ETHER_MAX_RX_DESCR	9
 5542 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5543 	#endif
 5544 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5545 	#endif
 5546 	/* LDV_COMMENT_END_PREP */
 5547 	/* LDV_COMMENT_BEGIN_PREP */
 5548 	#endif 
 5549 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5550 	#endif
 5551 	/* LDV_COMMENT_END_PREP */
 5552 
 5553 	/** STRUCT: struct type: macb_config, struct name: np4_config **/
 5554 	/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 5555 	/* LDV_COMMENT_BEGIN_PREP */
 5556 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5557 	#define MACB_RX_BUFFER_SIZE	128
 5558 	#define RX_BUFFER_MULTIPLE	64  
 5559 	#define RX_RING_SIZE		512 
 5560 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5561 	#define TX_RING_SIZE		128 
 5562 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5563 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5564 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5565 				 | MACB_BIT(ISR_ROVR))
 5566 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5567 					| MACB_BIT(ISR_RLE)		\
 5568 					| MACB_BIT(TXERR))
 5569 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5570 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5571 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5572 	#define GEM_MTU_MIN_SIZE	68
 5573 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5574 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5575 	#define MACB_HALT_TIMEOUT	1230
 5576 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5577 	#endif
 5578 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5579 	#endif
 5580 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5581 	#endif
 5582 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5583 	#endif
 5584 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5585 	#endif
 5586 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5587 	#endif
 5588 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5589 	#endif
 5590 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5591 	#endif
 5592 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5593 	#endif
 5594 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5595 	#endif
 5596 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5597 	#endif
 5598 	/* LDV_COMMENT_END_PREP */
 5599 	/* LDV_COMMENT_BEGIN_PREP */
 5600 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5601 	#endif
 5602 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5603 	#endif
 5604 	#if defined(CONFIG_OF)
 5605 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5606 	#define AT91ETHER_MAX_RX_DESCR	9
 5607 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5608 	#endif
 5609 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5610 	#endif
 5611 	#endif 
 5612 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5613 	#endif
 5614 	/* LDV_COMMENT_END_PREP */
 5615 	/* content: static int macb_init(struct platform_device *pdev)*/
 5616 	/* LDV_COMMENT_BEGIN_PREP */
 5617 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5618 	#define MACB_RX_BUFFER_SIZE	128
 5619 	#define RX_BUFFER_MULTIPLE	64  
 5620 	#define RX_RING_SIZE		512 
 5621 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5622 	#define TX_RING_SIZE		128 
 5623 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5624 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5625 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5626 				 | MACB_BIT(ISR_ROVR))
 5627 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5628 					| MACB_BIT(ISR_RLE)		\
 5629 					| MACB_BIT(TXERR))
 5630 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5631 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5632 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5633 	#define GEM_MTU_MIN_SIZE	68
 5634 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5635 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5636 	#define MACB_HALT_TIMEOUT	1230
 5637 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5638 	#endif
 5639 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5640 	#endif
 5641 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5642 	#endif
 5643 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5644 	#endif
 5645 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5646 	#endif
 5647 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5648 	#endif
 5649 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5650 	#endif
 5651 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5652 	#endif
 5653 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5654 	#endif
 5655 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5656 	#endif
 5657 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5658 	#endif
 5659 	/* LDV_COMMENT_END_PREP */
 5660 	/* LDV_COMMENT_BEGIN_PREP */
 5661 	#if defined(CONFIG_OF)
 5662 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5663 	#define AT91ETHER_MAX_RX_DESCR	9
 5664 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5665 	#endif
 5666 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5667 	#endif
 5668 	#endif 
 5669 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5670 	#endif
 5671 	/* LDV_COMMENT_END_PREP */
 5672 
 5673 	/** STRUCT: struct type: macb_config, struct name: zynqmp_config **/
 5674 	/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 5675 	/* LDV_COMMENT_BEGIN_PREP */
 5676 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5677 	#define MACB_RX_BUFFER_SIZE	128
 5678 	#define RX_BUFFER_MULTIPLE	64  
 5679 	#define RX_RING_SIZE		512 
 5680 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5681 	#define TX_RING_SIZE		128 
 5682 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5683 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5684 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5685 				 | MACB_BIT(ISR_ROVR))
 5686 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5687 					| MACB_BIT(ISR_RLE)		\
 5688 					| MACB_BIT(TXERR))
 5689 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5690 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5691 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5692 	#define GEM_MTU_MIN_SIZE	68
 5693 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5694 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5695 	#define MACB_HALT_TIMEOUT	1230
 5696 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5697 	#endif
 5698 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5699 	#endif
 5700 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5701 	#endif
 5702 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5703 	#endif
 5704 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5705 	#endif
 5706 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5707 	#endif
 5708 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5709 	#endif
 5710 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5711 	#endif
 5712 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5713 	#endif
 5714 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5715 	#endif
 5716 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5717 	#endif
 5718 	/* LDV_COMMENT_END_PREP */
 5719 	/* LDV_COMMENT_BEGIN_PREP */
 5720 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5721 	#endif
 5722 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5723 	#endif
 5724 	#if defined(CONFIG_OF)
 5725 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5726 	#define AT91ETHER_MAX_RX_DESCR	9
 5727 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5728 	#endif
 5729 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5730 	#endif
 5731 	#endif 
 5732 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5733 	#endif
 5734 	/* LDV_COMMENT_END_PREP */
 5735 	/* content: static int macb_init(struct platform_device *pdev)*/
 5736 	/* LDV_COMMENT_BEGIN_PREP */
 5737 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5738 	#define MACB_RX_BUFFER_SIZE	128
 5739 	#define RX_BUFFER_MULTIPLE	64  
 5740 	#define RX_RING_SIZE		512 
 5741 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5742 	#define TX_RING_SIZE		128 
 5743 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5744 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5745 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5746 				 | MACB_BIT(ISR_ROVR))
 5747 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5748 					| MACB_BIT(ISR_RLE)		\
 5749 					| MACB_BIT(TXERR))
 5750 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5751 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5752 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5753 	#define GEM_MTU_MIN_SIZE	68
 5754 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5755 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5756 	#define MACB_HALT_TIMEOUT	1230
 5757 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5758 	#endif
 5759 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5760 	#endif
 5761 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5762 	#endif
 5763 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5764 	#endif
 5765 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5766 	#endif
 5767 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5768 	#endif
 5769 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5770 	#endif
 5771 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5772 	#endif
 5773 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5774 	#endif
 5775 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5776 	#endif
 5777 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5778 	#endif
 5779 	/* LDV_COMMENT_END_PREP */
 5780 	/* LDV_COMMENT_BEGIN_PREP */
 5781 	#if defined(CONFIG_OF)
 5782 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5783 	#define AT91ETHER_MAX_RX_DESCR	9
 5784 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5785 	#endif
 5786 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5787 	#endif
 5788 	#endif 
 5789 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5790 	#endif
 5791 	/* LDV_COMMENT_END_PREP */
 5792 
 5793 	/** STRUCT: struct type: macb_config, struct name: zynq_config **/
 5794 	/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 5795 	/* LDV_COMMENT_BEGIN_PREP */
 5796 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5797 	#define MACB_RX_BUFFER_SIZE	128
 5798 	#define RX_BUFFER_MULTIPLE	64  
 5799 	#define RX_RING_SIZE		512 
 5800 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5801 	#define TX_RING_SIZE		128 
 5802 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5803 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5804 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5805 				 | MACB_BIT(ISR_ROVR))
 5806 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5807 					| MACB_BIT(ISR_RLE)		\
 5808 					| MACB_BIT(TXERR))
 5809 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5810 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5811 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5812 	#define GEM_MTU_MIN_SIZE	68
 5813 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5814 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5815 	#define MACB_HALT_TIMEOUT	1230
 5816 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5817 	#endif
 5818 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5819 	#endif
 5820 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5821 	#endif
 5822 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5823 	#endif
 5824 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5825 	#endif
 5826 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5827 	#endif
 5828 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5829 	#endif
 5830 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5831 	#endif
 5832 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5833 	#endif
 5834 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5835 	#endif
 5836 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5837 	#endif
 5838 	/* LDV_COMMENT_END_PREP */
 5839 	/* LDV_COMMENT_BEGIN_PREP */
 5840 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5841 	#endif
 5842 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5843 	#endif
 5844 	#if defined(CONFIG_OF)
 5845 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5846 	#define AT91ETHER_MAX_RX_DESCR	9
 5847 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5848 	#endif
 5849 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5850 	#endif
 5851 	#endif 
 5852 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5853 	#endif
 5854 	/* LDV_COMMENT_END_PREP */
 5855 	/* content: static int macb_init(struct platform_device *pdev)*/
 5856 	/* LDV_COMMENT_BEGIN_PREP */
 5857 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5858 	#define MACB_RX_BUFFER_SIZE	128
 5859 	#define RX_BUFFER_MULTIPLE	64  
 5860 	#define RX_RING_SIZE		512 
 5861 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5862 	#define TX_RING_SIZE		128 
 5863 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5864 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5865 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5866 				 | MACB_BIT(ISR_ROVR))
 5867 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5868 					| MACB_BIT(ISR_RLE)		\
 5869 					| MACB_BIT(TXERR))
 5870 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5871 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5872 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5873 	#define GEM_MTU_MIN_SIZE	68
 5874 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5875 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5876 	#define MACB_HALT_TIMEOUT	1230
 5877 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5878 	#endif
 5879 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5880 	#endif
 5881 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5882 	#endif
 5883 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5884 	#endif
 5885 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5886 	#endif
 5887 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5888 	#endif
 5889 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5890 	#endif
 5891 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5892 	#endif
 5893 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5894 	#endif
 5895 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5896 	#endif
 5897 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5898 	#endif
 5899 	/* LDV_COMMENT_END_PREP */
 5900 	/* LDV_COMMENT_BEGIN_PREP */
 5901 	#if defined(CONFIG_OF)
 5902 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5903 	#define AT91ETHER_MAX_RX_DESCR	9
 5904 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5905 	#endif
 5906 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5907 	#endif
 5908 	#endif 
 5909 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5910 	#endif
 5911 	/* LDV_COMMENT_END_PREP */
 5912 
 5913 	/** STRUCT: struct type: platform_driver, struct name: macb_driver **/
 5914 	/* content: static int macb_probe(struct platform_device *pdev)*/
 5915 	/* LDV_COMMENT_BEGIN_PREP */
 5916 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5917 	#define MACB_RX_BUFFER_SIZE	128
 5918 	#define RX_BUFFER_MULTIPLE	64  
 5919 	#define RX_RING_SIZE		512 
 5920 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5921 	#define TX_RING_SIZE		128 
 5922 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5923 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5924 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5925 				 | MACB_BIT(ISR_ROVR))
 5926 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5927 					| MACB_BIT(ISR_RLE)		\
 5928 					| MACB_BIT(TXERR))
 5929 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5930 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5931 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5932 	#define GEM_MTU_MIN_SIZE	68
 5933 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5934 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5935 	#define MACB_HALT_TIMEOUT	1230
 5936 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5937 	#endif
 5938 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5939 	#endif
 5940 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5941 	#endif
 5942 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5943 	#endif
 5944 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5945 	#endif
 5946 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5947 	#endif
 5948 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5949 	#endif
 5950 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5951 	#endif
 5952 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5953 	#endif
 5954 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5955 	#endif
 5956 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5957 	#endif
 5958 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5959 	#endif
 5960 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5961 	#endif
 5962 	#if defined(CONFIG_OF)
 5963 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5964 	#define AT91ETHER_MAX_RX_DESCR	9
 5965 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5966 	#endif
 5967 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5968 	#endif
 5969 	#endif 
 5970 	/* LDV_COMMENT_END_PREP */
 5971 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "macb_probe" */
 5972 	static int res_macb_probe_86;
 5973 	/* content: static int macb_remove(struct platform_device *pdev)*/
 5974 	/* LDV_COMMENT_BEGIN_PREP */
 5975 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5976 	#define MACB_RX_BUFFER_SIZE	128
 5977 	#define RX_BUFFER_MULTIPLE	64  
 5978 	#define RX_RING_SIZE		512 
 5979 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5980 	#define TX_RING_SIZE		128 
 5981 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5982 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5983 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5984 				 | MACB_BIT(ISR_ROVR))
 5985 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5986 					| MACB_BIT(ISR_RLE)		\
 5987 					| MACB_BIT(TXERR))
 5988 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5989 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5990 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5991 	#define GEM_MTU_MIN_SIZE	68
 5992 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5993 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5994 	#define MACB_HALT_TIMEOUT	1230
 5995 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5996 	#endif
 5997 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5998 	#endif
 5999 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6000 	#endif
 6001 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6002 	#endif
 6003 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6004 	#endif
 6005 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6006 	#endif
 6007 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6008 	#endif
 6009 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6010 	#endif
 6011 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6012 	#endif
 6013 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6014 	#endif
 6015 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6016 	#endif
 6017 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6018 	#endif
 6019 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6020 	#endif
 6021 	#if defined(CONFIG_OF)
 6022 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6023 	#define AT91ETHER_MAX_RX_DESCR	9
 6024 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6025 	#endif
 6026 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6027 	#endif
 6028 	#endif 
 6029 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6030 	#endif
 6031 	/* LDV_COMMENT_END_PREP */
 6032 
 6033 	/** CALLBACK SECTION request_irq **/
 6034 	/* content: static irqreturn_t at91ether_interrupt(int irq, void *dev_id)*/
 6035 	/* LDV_COMMENT_BEGIN_PREP */
 6036 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6037 	#define MACB_RX_BUFFER_SIZE	128
 6038 	#define RX_BUFFER_MULTIPLE	64  
 6039 	#define RX_RING_SIZE		512 
 6040 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6041 	#define TX_RING_SIZE		128 
 6042 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6043 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6044 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6045 				 | MACB_BIT(ISR_ROVR))
 6046 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6047 					| MACB_BIT(ISR_RLE)		\
 6048 					| MACB_BIT(TXERR))
 6049 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6050 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6051 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6052 	#define GEM_MTU_MIN_SIZE	68
 6053 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6054 	#define MACB_WOL_ENABLED		(0x1 << 1)
 6055 	#define MACB_HALT_TIMEOUT	1230
 6056 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6057 	#endif
 6058 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6059 	#endif
 6060 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6061 	#endif
 6062 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6063 	#endif
 6064 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6065 	#endif
 6066 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6067 	#endif
 6068 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6069 	#endif
 6070 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6071 	#endif
 6072 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6073 	#endif
 6074 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6075 	#endif
 6076 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6077 	#endif
 6078 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6079 	#endif
 6080 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6081 	#endif
 6082 	#if defined(CONFIG_OF)
 6083 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6084 	#define AT91ETHER_MAX_RX_DESCR	9
 6085 	/* LDV_COMMENT_END_PREP */
 6086 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "at91ether_interrupt" */
 6087 	int  var_at91ether_interrupt_82_p0;
 6088 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "at91ether_interrupt" */
 6089 	void * var_at91ether_interrupt_82_p1;
 6090 	/* LDV_COMMENT_BEGIN_PREP */
 6091 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6092 	#endif
 6093 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6094 	#endif
 6095 	#endif 
 6096 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6097 	#endif
 6098 	/* LDV_COMMENT_END_PREP */
 6099 	/* content: static irqreturn_t macb_interrupt(int irq, void *dev_id)*/
 6100 	/* LDV_COMMENT_BEGIN_PREP */
 6101 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6102 	#define MACB_RX_BUFFER_SIZE	128
 6103 	#define RX_BUFFER_MULTIPLE	64  
 6104 	#define RX_RING_SIZE		512 
 6105 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6106 	#define TX_RING_SIZE		128 
 6107 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6108 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6109 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6110 				 | MACB_BIT(ISR_ROVR))
 6111 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6112 					| MACB_BIT(ISR_RLE)		\
 6113 					| MACB_BIT(TXERR))
 6114 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6115 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6116 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6117 	#define GEM_MTU_MIN_SIZE	68
 6118 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6119 	#define MACB_WOL_ENABLED		(0x1 << 1)
 6120 	#define MACB_HALT_TIMEOUT	1230
 6121 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6122 	#endif
 6123 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6124 	#endif
 6125 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6126 	#endif
 6127 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6128 	#endif
 6129 	/* LDV_COMMENT_END_PREP */
 6130 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_interrupt" */
 6131 	int  var_macb_interrupt_34_p0;
 6132 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_interrupt" */
 6133 	void * var_macb_interrupt_34_p1;
 6134 	/* LDV_COMMENT_BEGIN_PREP */
 6135 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6136 	#endif
 6137 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6138 	#endif
 6139 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6140 	#endif
 6141 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6142 	#endif
 6143 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6144 	#endif
 6145 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6146 	#endif
 6147 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6148 	#endif
 6149 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6150 	#endif
 6151 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6152 	#endif
 6153 	#if defined(CONFIG_OF)
 6154 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6155 	#define AT91ETHER_MAX_RX_DESCR	9
 6156 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6157 	#endif
 6158 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6159 	#endif
 6160 	#endif 
 6161 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6162 	#endif
 6163 	/* LDV_COMMENT_END_PREP */
 6164 
 6165 
 6166 
 6167 
 6168 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 6169 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 6170 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 6171 	LDV_IN_INTERRUPT=1;
 6172 
 6173 
 6174 
 6175 
 6176 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 6177 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 6178 	/*============================= FUNCTION CALL SECTION       =============================*/
 6179 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 6180 	ldv_initialize();
 6181 	
 6182 
 6183 	
 6184 
 6185 	int ldv_s_macb_netdev_ops_net_device_ops = 0;
 6186 	
 6187 
 6188 	int ldv_s_at91ether_netdev_ops_net_device_ops = 0;
 6189 	
 6190 
 6191 	
 6192 
 6193 	
 6194 
 6195 	
 6196 
 6197 	
 6198 
 6199 	
 6200 
 6201 	
 6202 
 6203 	
 6204 
 6205 	
 6206 
 6207 	
 6208 
 6209 	int ldv_s_macb_driver_platform_driver = 0;
 6210 
 6211 	
 6212 
 6213 
 6214 	while(  nondet_int()
 6215 		|| !(ldv_s_macb_netdev_ops_net_device_ops == 0)
 6216 		|| !(ldv_s_at91ether_netdev_ops_net_device_ops == 0)
 6217 		|| !(ldv_s_macb_driver_platform_driver == 0)
 6218 	) {
 6219 
 6220 		switch(nondet_int()) {
 6221 
 6222 			case 0: {
 6223 
 6224 				/** STRUCT: struct type: ethtool_ops, struct name: macb_ethtool_ops **/
 6225 				
 6226 
 6227 				/* content: static int macb_get_regs_len(struct net_device *netdev)*/
 6228 				/* LDV_COMMENT_BEGIN_PREP */
 6229 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6230 				#define MACB_RX_BUFFER_SIZE	128
 6231 				#define RX_BUFFER_MULTIPLE	64  
 6232 				#define RX_RING_SIZE		512 
 6233 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6234 				#define TX_RING_SIZE		128 
 6235 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6236 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6237 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6238 				 | MACB_BIT(ISR_ROVR))
 6239 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6240 					| MACB_BIT(ISR_RLE)		\
 6241 					| MACB_BIT(TXERR))
 6242 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6243 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6244 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6245 				#define GEM_MTU_MIN_SIZE	68
 6246 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6247 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6248 				#define MACB_HALT_TIMEOUT	1230
 6249 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6250 				#endif
 6251 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6252 				#endif
 6253 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6254 				#endif
 6255 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6256 				#endif
 6257 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6258 				#endif
 6259 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6260 				#endif
 6261 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6262 				#endif
 6263 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6264 				#endif
 6265 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6266 				#endif
 6267 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6268 				#endif
 6269 				/* LDV_COMMENT_END_PREP */
 6270 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_regs_len" from driver structure with callbacks "macb_ethtool_ops" */
 6271 				ldv_handler_precall();
 6272 				macb_get_regs_len( var_group1);
 6273 				/* LDV_COMMENT_BEGIN_PREP */
 6274 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6275 				#endif
 6276 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6277 				#endif
 6278 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6279 				#endif
 6280 				#if defined(CONFIG_OF)
 6281 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6282 				#define AT91ETHER_MAX_RX_DESCR	9
 6283 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6284 				#endif
 6285 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6286 				#endif
 6287 				#endif 
 6288 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6289 				#endif
 6290 				/* LDV_COMMENT_END_PREP */
 6291 				
 6292 
 6293 				
 6294 
 6295 			}
 6296 
 6297 			break;
 6298 			case 1: {
 6299 
 6300 				/** STRUCT: struct type: ethtool_ops, struct name: macb_ethtool_ops **/
 6301 				
 6302 
 6303 				/* content: static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)*/
 6304 				/* LDV_COMMENT_BEGIN_PREP */
 6305 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6306 				#define MACB_RX_BUFFER_SIZE	128
 6307 				#define RX_BUFFER_MULTIPLE	64  
 6308 				#define RX_RING_SIZE		512 
 6309 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6310 				#define TX_RING_SIZE		128 
 6311 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6312 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6313 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6314 				 | MACB_BIT(ISR_ROVR))
 6315 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6316 					| MACB_BIT(ISR_RLE)		\
 6317 					| MACB_BIT(TXERR))
 6318 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6319 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6320 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6321 				#define GEM_MTU_MIN_SIZE	68
 6322 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6323 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6324 				#define MACB_HALT_TIMEOUT	1230
 6325 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6326 				#endif
 6327 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6328 				#endif
 6329 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6330 				#endif
 6331 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6332 				#endif
 6333 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6334 				#endif
 6335 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6336 				#endif
 6337 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6338 				#endif
 6339 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6340 				#endif
 6341 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6342 				#endif
 6343 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6344 				#endif
 6345 				/* LDV_COMMENT_END_PREP */
 6346 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_regs" from driver structure with callbacks "macb_ethtool_ops" */
 6347 				ldv_handler_precall();
 6348 				macb_get_regs( var_group1, var_group2, var_macb_get_regs_68_p2);
 6349 				/* LDV_COMMENT_BEGIN_PREP */
 6350 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6351 				#endif
 6352 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6353 				#endif
 6354 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6355 				#endif
 6356 				#if defined(CONFIG_OF)
 6357 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6358 				#define AT91ETHER_MAX_RX_DESCR	9
 6359 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6360 				#endif
 6361 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6362 				#endif
 6363 				#endif 
 6364 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6365 				#endif
 6366 				/* LDV_COMMENT_END_PREP */
 6367 				
 6368 
 6369 				
 6370 
 6371 			}
 6372 
 6373 			break;
 6374 			case 2: {
 6375 
 6376 				/** STRUCT: struct type: ethtool_ops, struct name: macb_ethtool_ops **/
 6377 				
 6378 
 6379 				/* content: static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)*/
 6380 				/* LDV_COMMENT_BEGIN_PREP */
 6381 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6382 				#define MACB_RX_BUFFER_SIZE	128
 6383 				#define RX_BUFFER_MULTIPLE	64  
 6384 				#define RX_RING_SIZE		512 
 6385 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6386 				#define TX_RING_SIZE		128 
 6387 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6388 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6389 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6390 				 | MACB_BIT(ISR_ROVR))
 6391 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6392 					| MACB_BIT(ISR_RLE)		\
 6393 					| MACB_BIT(TXERR))
 6394 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6395 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6396 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6397 				#define GEM_MTU_MIN_SIZE	68
 6398 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6399 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6400 				#define MACB_HALT_TIMEOUT	1230
 6401 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6402 				#endif
 6403 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6404 				#endif
 6405 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6406 				#endif
 6407 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6408 				#endif
 6409 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6410 				#endif
 6411 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6412 				#endif
 6413 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6414 				#endif
 6415 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6416 				#endif
 6417 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6418 				#endif
 6419 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6420 				#endif
 6421 				/* LDV_COMMENT_END_PREP */
 6422 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_wol" from driver structure with callbacks "macb_ethtool_ops" */
 6423 				ldv_handler_precall();
 6424 				macb_get_wol( var_group1, var_group3);
 6425 				/* LDV_COMMENT_BEGIN_PREP */
 6426 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6427 				#endif
 6428 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6429 				#endif
 6430 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6431 				#endif
 6432 				#if defined(CONFIG_OF)
 6433 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6434 				#define AT91ETHER_MAX_RX_DESCR	9
 6435 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6436 				#endif
 6437 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6438 				#endif
 6439 				#endif 
 6440 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6441 				#endif
 6442 				/* LDV_COMMENT_END_PREP */
 6443 				
 6444 
 6445 				
 6446 
 6447 			}
 6448 
 6449 			break;
 6450 			case 3: {
 6451 
 6452 				/** STRUCT: struct type: ethtool_ops, struct name: macb_ethtool_ops **/
 6453 				
 6454 
 6455 				/* content: static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)*/
 6456 				/* LDV_COMMENT_BEGIN_PREP */
 6457 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6458 				#define MACB_RX_BUFFER_SIZE	128
 6459 				#define RX_BUFFER_MULTIPLE	64  
 6460 				#define RX_RING_SIZE		512 
 6461 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6462 				#define TX_RING_SIZE		128 
 6463 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6464 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6465 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6466 				 | MACB_BIT(ISR_ROVR))
 6467 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6468 					| MACB_BIT(ISR_RLE)		\
 6469 					| MACB_BIT(TXERR))
 6470 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6471 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6472 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6473 				#define GEM_MTU_MIN_SIZE	68
 6474 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6475 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6476 				#define MACB_HALT_TIMEOUT	1230
 6477 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6478 				#endif
 6479 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6480 				#endif
 6481 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6482 				#endif
 6483 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6484 				#endif
 6485 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6486 				#endif
 6487 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6488 				#endif
 6489 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6490 				#endif
 6491 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6492 				#endif
 6493 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6494 				#endif
 6495 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6496 				#endif
 6497 				/* LDV_COMMENT_END_PREP */
 6498 				/* LDV_COMMENT_FUNCTION_CALL Function from field "set_wol" from driver structure with callbacks "macb_ethtool_ops" */
 6499 				ldv_handler_precall();
 6500 				macb_set_wol( var_group1, var_group3);
 6501 				/* LDV_COMMENT_BEGIN_PREP */
 6502 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6503 				#endif
 6504 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6505 				#endif
 6506 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6507 				#endif
 6508 				#if defined(CONFIG_OF)
 6509 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6510 				#define AT91ETHER_MAX_RX_DESCR	9
 6511 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6512 				#endif
 6513 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6514 				#endif
 6515 				#endif 
 6516 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6517 				#endif
 6518 				/* LDV_COMMENT_END_PREP */
 6519 				
 6520 
 6521 				
 6522 
 6523 			}
 6524 
 6525 			break;
 6526 			case 4: {
 6527 
 6528 				/** STRUCT: struct type: ethtool_ops, struct name: gem_ethtool_ops **/
 6529 				
 6530 
 6531 				/* content: static int macb_get_regs_len(struct net_device *netdev)*/
 6532 				/* LDV_COMMENT_BEGIN_PREP */
 6533 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6534 				#define MACB_RX_BUFFER_SIZE	128
 6535 				#define RX_BUFFER_MULTIPLE	64  
 6536 				#define RX_RING_SIZE		512 
 6537 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6538 				#define TX_RING_SIZE		128 
 6539 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6540 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6541 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6542 				 | MACB_BIT(ISR_ROVR))
 6543 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6544 					| MACB_BIT(ISR_RLE)		\
 6545 					| MACB_BIT(TXERR))
 6546 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6547 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6548 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6549 				#define GEM_MTU_MIN_SIZE	68
 6550 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6551 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6552 				#define MACB_HALT_TIMEOUT	1230
 6553 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6554 				#endif
 6555 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6556 				#endif
 6557 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6558 				#endif
 6559 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6560 				#endif
 6561 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6562 				#endif
 6563 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6564 				#endif
 6565 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6566 				#endif
 6567 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6568 				#endif
 6569 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6570 				#endif
 6571 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6572 				#endif
 6573 				/* LDV_COMMENT_END_PREP */
 6574 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_regs_len" from driver structure with callbacks "gem_ethtool_ops" */
 6575 				ldv_handler_precall();
 6576 				macb_get_regs_len( var_group1);
 6577 				/* LDV_COMMENT_BEGIN_PREP */
 6578 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6579 				#endif
 6580 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6581 				#endif
 6582 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6583 				#endif
 6584 				#if defined(CONFIG_OF)
 6585 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6586 				#define AT91ETHER_MAX_RX_DESCR	9
 6587 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6588 				#endif
 6589 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6590 				#endif
 6591 				#endif 
 6592 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6593 				#endif
 6594 				/* LDV_COMMENT_END_PREP */
 6595 				
 6596 
 6597 				
 6598 
 6599 			}
 6600 
 6601 			break;
 6602 			case 5: {
 6603 
 6604 				/** STRUCT: struct type: ethtool_ops, struct name: gem_ethtool_ops **/
 6605 				
 6606 
 6607 				/* content: static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)*/
 6608 				/* LDV_COMMENT_BEGIN_PREP */
 6609 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6610 				#define MACB_RX_BUFFER_SIZE	128
 6611 				#define RX_BUFFER_MULTIPLE	64  
 6612 				#define RX_RING_SIZE		512 
 6613 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6614 				#define TX_RING_SIZE		128 
 6615 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6616 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6617 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6618 				 | MACB_BIT(ISR_ROVR))
 6619 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6620 					| MACB_BIT(ISR_RLE)		\
 6621 					| MACB_BIT(TXERR))
 6622 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6623 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6624 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6625 				#define GEM_MTU_MIN_SIZE	68
 6626 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6627 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6628 				#define MACB_HALT_TIMEOUT	1230
 6629 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6630 				#endif
 6631 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6632 				#endif
 6633 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6634 				#endif
 6635 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6636 				#endif
 6637 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6638 				#endif
 6639 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6640 				#endif
 6641 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6642 				#endif
 6643 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6644 				#endif
 6645 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6646 				#endif
 6647 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6648 				#endif
 6649 				/* LDV_COMMENT_END_PREP */
 6650 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_regs" from driver structure with callbacks "gem_ethtool_ops" */
 6651 				ldv_handler_precall();
 6652 				macb_get_regs( var_group1, var_group2, var_macb_get_regs_68_p2);
 6653 				/* LDV_COMMENT_BEGIN_PREP */
 6654 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6655 				#endif
 6656 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6657 				#endif
 6658 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6659 				#endif
 6660 				#if defined(CONFIG_OF)
 6661 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6662 				#define AT91ETHER_MAX_RX_DESCR	9
 6663 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6664 				#endif
 6665 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6666 				#endif
 6667 				#endif 
 6668 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6669 				#endif
 6670 				/* LDV_COMMENT_END_PREP */
 6671 				
 6672 
 6673 				
 6674 
 6675 			}
 6676 
 6677 			break;
 6678 			case 6: {
 6679 
 6680 				/** STRUCT: struct type: ethtool_ops, struct name: gem_ethtool_ops **/
 6681 				
 6682 
 6683 				/* content: static void gem_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)*/
 6684 				/* LDV_COMMENT_BEGIN_PREP */
 6685 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6686 				#define MACB_RX_BUFFER_SIZE	128
 6687 				#define RX_BUFFER_MULTIPLE	64  
 6688 				#define RX_RING_SIZE		512 
 6689 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6690 				#define TX_RING_SIZE		128 
 6691 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6692 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6693 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6694 				 | MACB_BIT(ISR_ROVR))
 6695 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6696 					| MACB_BIT(ISR_RLE)		\
 6697 					| MACB_BIT(TXERR))
 6698 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6699 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6700 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6701 				#define GEM_MTU_MIN_SIZE	68
 6702 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6703 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6704 				#define MACB_HALT_TIMEOUT	1230
 6705 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6706 				#endif
 6707 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6708 				#endif
 6709 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6710 				#endif
 6711 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6712 				#endif
 6713 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6714 				#endif
 6715 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6716 				#endif
 6717 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6718 				#endif
 6719 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6720 				#endif
 6721 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6722 				#endif
 6723 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6724 				#endif
 6725 				/* LDV_COMMENT_END_PREP */
 6726 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_ethtool_stats" from driver structure with callbacks "gem_ethtool_ops" */
 6727 				ldv_handler_precall();
 6728 				gem_get_ethtool_stats( var_group1, var_group4, var_gem_get_ethtool_stats_63_p2);
 6729 				/* LDV_COMMENT_BEGIN_PREP */
 6730 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6731 				#endif
 6732 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6733 				#endif
 6734 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6735 				#endif
 6736 				#if defined(CONFIG_OF)
 6737 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6738 				#define AT91ETHER_MAX_RX_DESCR	9
 6739 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6740 				#endif
 6741 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6742 				#endif
 6743 				#endif 
 6744 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6745 				#endif
 6746 				/* LDV_COMMENT_END_PREP */
 6747 				
 6748 
 6749 				
 6750 
 6751 			}
 6752 
 6753 			break;
 6754 			case 7: {
 6755 
 6756 				/** STRUCT: struct type: ethtool_ops, struct name: gem_ethtool_ops **/
 6757 				
 6758 
 6759 				/* content: static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)*/
 6760 				/* LDV_COMMENT_BEGIN_PREP */
 6761 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6762 				#define MACB_RX_BUFFER_SIZE	128
 6763 				#define RX_BUFFER_MULTIPLE	64  
 6764 				#define RX_RING_SIZE		512 
 6765 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6766 				#define TX_RING_SIZE		128 
 6767 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6768 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6769 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6770 				 | MACB_BIT(ISR_ROVR))
 6771 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6772 					| MACB_BIT(ISR_RLE)		\
 6773 					| MACB_BIT(TXERR))
 6774 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6775 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6776 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6777 				#define GEM_MTU_MIN_SIZE	68
 6778 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6779 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6780 				#define MACB_HALT_TIMEOUT	1230
 6781 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6782 				#endif
 6783 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6784 				#endif
 6785 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6786 				#endif
 6787 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6788 				#endif
 6789 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6790 				#endif
 6791 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6792 				#endif
 6793 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6794 				#endif
 6795 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6796 				#endif
 6797 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6798 				#endif
 6799 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6800 				#endif
 6801 				/* LDV_COMMENT_END_PREP */
 6802 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_strings" from driver structure with callbacks "gem_ethtool_ops" */
 6803 				ldv_handler_precall();
 6804 				gem_get_ethtool_strings( var_group1, var_gem_get_ethtool_strings_65_p1, var_gem_get_ethtool_strings_65_p2);
 6805 				/* LDV_COMMENT_BEGIN_PREP */
 6806 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6807 				#endif
 6808 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6809 				#endif
 6810 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6811 				#endif
 6812 				#if defined(CONFIG_OF)
 6813 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6814 				#define AT91ETHER_MAX_RX_DESCR	9
 6815 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6816 				#endif
 6817 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6818 				#endif
 6819 				#endif 
 6820 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6821 				#endif
 6822 				/* LDV_COMMENT_END_PREP */
 6823 				
 6824 
 6825 				
 6826 
 6827 			}
 6828 
 6829 			break;
 6830 			case 8: {
 6831 
 6832 				/** STRUCT: struct type: ethtool_ops, struct name: gem_ethtool_ops **/
 6833 				
 6834 
 6835 				/* content: static int gem_get_sset_count(struct net_device *dev, int sset)*/
 6836 				/* LDV_COMMENT_BEGIN_PREP */
 6837 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6838 				#define MACB_RX_BUFFER_SIZE	128
 6839 				#define RX_BUFFER_MULTIPLE	64  
 6840 				#define RX_RING_SIZE		512 
 6841 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6842 				#define TX_RING_SIZE		128 
 6843 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6844 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6845 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6846 				 | MACB_BIT(ISR_ROVR))
 6847 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6848 					| MACB_BIT(ISR_RLE)		\
 6849 					| MACB_BIT(TXERR))
 6850 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6851 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6852 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6853 				#define GEM_MTU_MIN_SIZE	68
 6854 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6855 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6856 				#define MACB_HALT_TIMEOUT	1230
 6857 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6858 				#endif
 6859 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6860 				#endif
 6861 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6862 				#endif
 6863 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6864 				#endif
 6865 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6866 				#endif
 6867 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6868 				#endif
 6869 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6870 				#endif
 6871 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6872 				#endif
 6873 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6874 				#endif
 6875 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6876 				#endif
 6877 				/* LDV_COMMENT_END_PREP */
 6878 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_sset_count" from driver structure with callbacks "gem_ethtool_ops" */
 6879 				ldv_handler_precall();
 6880 				gem_get_sset_count( var_group1, var_gem_get_sset_count_64_p1);
 6881 				/* LDV_COMMENT_BEGIN_PREP */
 6882 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6883 				#endif
 6884 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6885 				#endif
 6886 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6887 				#endif
 6888 				#if defined(CONFIG_OF)
 6889 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6890 				#define AT91ETHER_MAX_RX_DESCR	9
 6891 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6892 				#endif
 6893 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6894 				#endif
 6895 				#endif 
 6896 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6897 				#endif
 6898 				/* LDV_COMMENT_END_PREP */
 6899 				
 6900 
 6901 				
 6902 
 6903 			}
 6904 
 6905 			break;
 6906 			case 9: {
 6907 
 6908 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 6909 				if(ldv_s_macb_netdev_ops_net_device_ops==0) {
 6910 
 6911 				/* content: static int macb_open(struct net_device *dev)*/
 6912 				/* LDV_COMMENT_BEGIN_PREP */
 6913 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6914 				#define MACB_RX_BUFFER_SIZE	128
 6915 				#define RX_BUFFER_MULTIPLE	64  
 6916 				#define RX_RING_SIZE		512 
 6917 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6918 				#define TX_RING_SIZE		128 
 6919 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6920 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6921 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6922 				 | MACB_BIT(ISR_ROVR))
 6923 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6924 					| MACB_BIT(ISR_RLE)		\
 6925 					| MACB_BIT(TXERR))
 6926 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6927 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6928 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6929 				#define GEM_MTU_MIN_SIZE	68
 6930 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6931 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6932 				#define MACB_HALT_TIMEOUT	1230
 6933 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6934 				#endif
 6935 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6936 				#endif
 6937 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6938 				#endif
 6939 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6940 				#endif
 6941 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6942 				#endif
 6943 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6944 				#endif
 6945 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6946 				#endif
 6947 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6948 				#endif
 6949 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6950 				#endif
 6951 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6952 				#endif
 6953 				/* LDV_COMMENT_END_PREP */
 6954 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "macb_netdev_ops". Standart function test for correct return result. */
 6955 				ldv_handler_precall();
 6956 				res_macb_open_58 = macb_open( var_group1);
 6957 				 ldv_check_return_value(res_macb_open_58);
 6958 				 if(res_macb_open_58 < 0) 
 6959 					goto ldv_module_exit;
 6960 				/* LDV_COMMENT_BEGIN_PREP */
 6961 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6962 				#endif
 6963 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6964 				#endif
 6965 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6966 				#endif
 6967 				#if defined(CONFIG_OF)
 6968 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6969 				#define AT91ETHER_MAX_RX_DESCR	9
 6970 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6971 				#endif
 6972 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6973 				#endif
 6974 				#endif 
 6975 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6976 				#endif
 6977 				/* LDV_COMMENT_END_PREP */
 6978 				ldv_s_macb_netdev_ops_net_device_ops++;
 6979 
 6980 				}
 6981 
 6982 			}
 6983 
 6984 			break;
 6985 			case 10: {
 6986 
 6987 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 6988 				if(ldv_s_macb_netdev_ops_net_device_ops==1) {
 6989 
 6990 				/* content: static int macb_close(struct net_device *dev)*/
 6991 				/* LDV_COMMENT_BEGIN_PREP */
 6992 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6993 				#define MACB_RX_BUFFER_SIZE	128
 6994 				#define RX_BUFFER_MULTIPLE	64  
 6995 				#define RX_RING_SIZE		512 
 6996 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6997 				#define TX_RING_SIZE		128 
 6998 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6999 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7000 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7001 				 | MACB_BIT(ISR_ROVR))
 7002 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7003 					| MACB_BIT(ISR_RLE)		\
 7004 					| MACB_BIT(TXERR))
 7005 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7006 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7007 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7008 				#define GEM_MTU_MIN_SIZE	68
 7009 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7010 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7011 				#define MACB_HALT_TIMEOUT	1230
 7012 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7013 				#endif
 7014 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7015 				#endif
 7016 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7017 				#endif
 7018 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7019 				#endif
 7020 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7021 				#endif
 7022 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7023 				#endif
 7024 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7025 				#endif
 7026 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7027 				#endif
 7028 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7029 				#endif
 7030 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7031 				#endif
 7032 				/* LDV_COMMENT_END_PREP */
 7033 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "macb_netdev_ops". Standart function test for correct return result. */
 7034 				ldv_handler_precall();
 7035 				res_macb_close_59 = macb_close( var_group1);
 7036 				 ldv_check_return_value(res_macb_close_59);
 7037 				 if(res_macb_close_59) 
 7038 					goto ldv_module_exit;
 7039 				/* LDV_COMMENT_BEGIN_PREP */
 7040 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7041 				#endif
 7042 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7043 				#endif
 7044 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7045 				#endif
 7046 				#if defined(CONFIG_OF)
 7047 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7048 				#define AT91ETHER_MAX_RX_DESCR	9
 7049 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7050 				#endif
 7051 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7052 				#endif
 7053 				#endif 
 7054 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7055 				#endif
 7056 				/* LDV_COMMENT_END_PREP */
 7057 				ldv_s_macb_netdev_ops_net_device_ops=0;
 7058 
 7059 				}
 7060 
 7061 			}
 7062 
 7063 			break;
 7064 			case 11: {
 7065 
 7066 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 7067 				
 7068 
 7069 				/* content: static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)*/
 7070 				/* LDV_COMMENT_BEGIN_PREP */
 7071 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7072 				#define MACB_RX_BUFFER_SIZE	128
 7073 				#define RX_BUFFER_MULTIPLE	64  
 7074 				#define RX_RING_SIZE		512 
 7075 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7076 				#define TX_RING_SIZE		128 
 7077 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7078 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7079 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7080 				 | MACB_BIT(ISR_ROVR))
 7081 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7082 					| MACB_BIT(ISR_RLE)		\
 7083 					| MACB_BIT(TXERR))
 7084 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7085 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7086 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7087 				#define GEM_MTU_MIN_SIZE	68
 7088 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7089 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7090 				#define MACB_HALT_TIMEOUT	1230
 7091 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7092 				#endif
 7093 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7094 				#endif
 7095 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7096 				#endif
 7097 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7098 				#endif
 7099 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7100 				#endif
 7101 				/* LDV_COMMENT_END_PREP */
 7102 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_start_xmit" from driver structure with callbacks "macb_netdev_ops" */
 7103 				ldv_handler_precall();
 7104 				macb_start_xmit( var_group5, var_group1);
 7105 				/* LDV_COMMENT_BEGIN_PREP */
 7106 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7107 				#endif
 7108 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7109 				#endif
 7110 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7111 				#endif
 7112 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7113 				#endif
 7114 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7115 				#endif
 7116 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7117 				#endif
 7118 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7119 				#endif
 7120 				#if defined(CONFIG_OF)
 7121 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7122 				#define AT91ETHER_MAX_RX_DESCR	9
 7123 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7124 				#endif
 7125 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7126 				#endif
 7127 				#endif 
 7128 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7129 				#endif
 7130 				/* LDV_COMMENT_END_PREP */
 7131 				
 7132 
 7133 				
 7134 
 7135 			}
 7136 
 7137 			break;
 7138 			case 12: {
 7139 
 7140 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 7141 				
 7142 
 7143 				/* content: static void macb_set_rx_mode(struct net_device *dev)*/
 7144 				/* LDV_COMMENT_BEGIN_PREP */
 7145 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7146 				#define MACB_RX_BUFFER_SIZE	128
 7147 				#define RX_BUFFER_MULTIPLE	64  
 7148 				#define RX_RING_SIZE		512 
 7149 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7150 				#define TX_RING_SIZE		128 
 7151 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7152 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7153 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7154 				 | MACB_BIT(ISR_ROVR))
 7155 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7156 					| MACB_BIT(ISR_RLE)		\
 7157 					| MACB_BIT(TXERR))
 7158 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7159 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7160 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7161 				#define GEM_MTU_MIN_SIZE	68
 7162 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7163 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7164 				#define MACB_HALT_TIMEOUT	1230
 7165 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7166 				#endif
 7167 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7168 				#endif
 7169 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7170 				#endif
 7171 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7172 				#endif
 7173 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7174 				#endif
 7175 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7176 				#endif
 7177 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7178 				#endif
 7179 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7180 				#endif
 7181 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7182 				#endif
 7183 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7184 				#endif
 7185 				/* LDV_COMMENT_END_PREP */
 7186 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_set_rx_mode" from driver structure with callbacks "macb_netdev_ops" */
 7187 				ldv_handler_precall();
 7188 				macb_set_rx_mode( var_group1);
 7189 				/* LDV_COMMENT_BEGIN_PREP */
 7190 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7191 				#endif
 7192 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7193 				#endif
 7194 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7195 				#endif
 7196 				#if defined(CONFIG_OF)
 7197 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7198 				#define AT91ETHER_MAX_RX_DESCR	9
 7199 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7200 				#endif
 7201 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7202 				#endif
 7203 				#endif 
 7204 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7205 				#endif
 7206 				/* LDV_COMMENT_END_PREP */
 7207 				
 7208 
 7209 				
 7210 
 7211 			}
 7212 
 7213 			break;
 7214 			case 13: {
 7215 
 7216 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 7217 				
 7218 
 7219 				/* content: static struct net_device_stats *macb_get_stats(struct net_device *dev)*/
 7220 				/* LDV_COMMENT_BEGIN_PREP */
 7221 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7222 				#define MACB_RX_BUFFER_SIZE	128
 7223 				#define RX_BUFFER_MULTIPLE	64  
 7224 				#define RX_RING_SIZE		512 
 7225 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7226 				#define TX_RING_SIZE		128 
 7227 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7228 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7229 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7230 				 | MACB_BIT(ISR_ROVR))
 7231 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7232 					| MACB_BIT(ISR_RLE)		\
 7233 					| MACB_BIT(TXERR))
 7234 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7235 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7236 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7237 				#define GEM_MTU_MIN_SIZE	68
 7238 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7239 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7240 				#define MACB_HALT_TIMEOUT	1230
 7241 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7242 				#endif
 7243 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7244 				#endif
 7245 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7246 				#endif
 7247 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7248 				#endif
 7249 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7250 				#endif
 7251 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7252 				#endif
 7253 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7254 				#endif
 7255 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7256 				#endif
 7257 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7258 				#endif
 7259 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7260 				#endif
 7261 				/* LDV_COMMENT_END_PREP */
 7262 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_get_stats" from driver structure with callbacks "macb_netdev_ops" */
 7263 				ldv_handler_precall();
 7264 				macb_get_stats( var_group1);
 7265 				/* LDV_COMMENT_BEGIN_PREP */
 7266 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7267 				#endif
 7268 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7269 				#endif
 7270 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7271 				#endif
 7272 				#if defined(CONFIG_OF)
 7273 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7274 				#define AT91ETHER_MAX_RX_DESCR	9
 7275 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7276 				#endif
 7277 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7278 				#endif
 7279 				#endif 
 7280 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7281 				#endif
 7282 				/* LDV_COMMENT_END_PREP */
 7283 				
 7284 
 7285 				
 7286 
 7287 			}
 7288 
 7289 			break;
 7290 			case 14: {
 7291 
 7292 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 7293 				
 7294 
 7295 				/* content: static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/
 7296 				/* LDV_COMMENT_BEGIN_PREP */
 7297 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7298 				#define MACB_RX_BUFFER_SIZE	128
 7299 				#define RX_BUFFER_MULTIPLE	64  
 7300 				#define RX_RING_SIZE		512 
 7301 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7302 				#define TX_RING_SIZE		128 
 7303 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7304 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7305 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7306 				 | MACB_BIT(ISR_ROVR))
 7307 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7308 					| MACB_BIT(ISR_RLE)		\
 7309 					| MACB_BIT(TXERR))
 7310 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7311 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7312 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7313 				#define GEM_MTU_MIN_SIZE	68
 7314 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7315 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7316 				#define MACB_HALT_TIMEOUT	1230
 7317 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7318 				#endif
 7319 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7320 				#endif
 7321 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7322 				#endif
 7323 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7324 				#endif
 7325 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7326 				#endif
 7327 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7328 				#endif
 7329 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7330 				#endif
 7331 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7332 				#endif
 7333 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7334 				#endif
 7335 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7336 				#endif
 7337 				/* LDV_COMMENT_END_PREP */
 7338 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_do_ioctl" from driver structure with callbacks "macb_netdev_ops" */
 7339 				ldv_handler_precall();
 7340 				macb_ioctl( var_group1, var_group6, var_macb_ioctl_71_p2);
 7341 				/* LDV_COMMENT_BEGIN_PREP */
 7342 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7343 				#endif
 7344 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7345 				#endif
 7346 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7347 				#endif
 7348 				#if defined(CONFIG_OF)
 7349 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7350 				#define AT91ETHER_MAX_RX_DESCR	9
 7351 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7352 				#endif
 7353 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7354 				#endif
 7355 				#endif 
 7356 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7357 				#endif
 7358 				/* LDV_COMMENT_END_PREP */
 7359 				
 7360 
 7361 				
 7362 
 7363 			}
 7364 
 7365 			break;
 7366 			case 15: {
 7367 
 7368 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 7369 				
 7370 
 7371 				/* content: static int macb_change_mtu(struct net_device *dev, int new_mtu)*/
 7372 				/* LDV_COMMENT_BEGIN_PREP */
 7373 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7374 				#define MACB_RX_BUFFER_SIZE	128
 7375 				#define RX_BUFFER_MULTIPLE	64  
 7376 				#define RX_RING_SIZE		512 
 7377 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7378 				#define TX_RING_SIZE		128 
 7379 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7380 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7381 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7382 				 | MACB_BIT(ISR_ROVR))
 7383 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7384 					| MACB_BIT(ISR_RLE)		\
 7385 					| MACB_BIT(TXERR))
 7386 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7387 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7388 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7389 				#define GEM_MTU_MIN_SIZE	68
 7390 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7391 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7392 				#define MACB_HALT_TIMEOUT	1230
 7393 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7394 				#endif
 7395 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7396 				#endif
 7397 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7398 				#endif
 7399 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7400 				#endif
 7401 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7402 				#endif
 7403 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7404 				#endif
 7405 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7406 				#endif
 7407 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7408 				#endif
 7409 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7410 				#endif
 7411 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7412 				#endif
 7413 				/* LDV_COMMENT_END_PREP */
 7414 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_change_mtu" from driver structure with callbacks "macb_netdev_ops" */
 7415 				ldv_handler_precall();
 7416 				macb_change_mtu( var_group1, var_macb_change_mtu_60_p1);
 7417 				/* LDV_COMMENT_BEGIN_PREP */
 7418 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7419 				#endif
 7420 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7421 				#endif
 7422 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7423 				#endif
 7424 				#if defined(CONFIG_OF)
 7425 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7426 				#define AT91ETHER_MAX_RX_DESCR	9
 7427 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7428 				#endif
 7429 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7430 				#endif
 7431 				#endif 
 7432 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7433 				#endif
 7434 				/* LDV_COMMENT_END_PREP */
 7435 				
 7436 
 7437 				
 7438 
 7439 			}
 7440 
 7441 			break;
 7442 			case 16: {
 7443 
 7444 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 7445 				
 7446 
 7447 				/* content: static void macb_poll_controller(struct net_device *dev)*/
 7448 				/* LDV_COMMENT_BEGIN_PREP */
 7449 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7450 				#define MACB_RX_BUFFER_SIZE	128
 7451 				#define RX_BUFFER_MULTIPLE	64  
 7452 				#define RX_RING_SIZE		512 
 7453 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7454 				#define TX_RING_SIZE		128 
 7455 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7456 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7457 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7458 				 | MACB_BIT(ISR_ROVR))
 7459 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7460 					| MACB_BIT(ISR_RLE)		\
 7461 					| MACB_BIT(TXERR))
 7462 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7463 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7464 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7465 				#define GEM_MTU_MIN_SIZE	68
 7466 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7467 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7468 				#define MACB_HALT_TIMEOUT	1230
 7469 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7470 				#endif
 7471 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7472 				#endif
 7473 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7474 				#endif
 7475 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7476 				#endif
 7477 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7478 				/* LDV_COMMENT_END_PREP */
 7479 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_poll_controller" from driver structure with callbacks "macb_netdev_ops" */
 7480 				ldv_handler_precall();
 7481 				macb_poll_controller( var_group1);
 7482 				/* LDV_COMMENT_BEGIN_PREP */
 7483 				#endif
 7484 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7485 				#endif
 7486 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7487 				#endif
 7488 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7489 				#endif
 7490 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7491 				#endif
 7492 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7493 				#endif
 7494 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7495 				#endif
 7496 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7497 				#endif
 7498 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7499 				#endif
 7500 				#if defined(CONFIG_OF)
 7501 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7502 				#define AT91ETHER_MAX_RX_DESCR	9
 7503 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7504 				#endif
 7505 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7506 				#endif
 7507 				#endif 
 7508 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7509 				#endif
 7510 				/* LDV_COMMENT_END_PREP */
 7511 				
 7512 
 7513 				
 7514 
 7515 			}
 7516 
 7517 			break;
 7518 			case 17: {
 7519 
 7520 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 7521 				
 7522 
 7523 				/* content: static int macb_set_features(struct net_device *netdev, netdev_features_t features)*/
 7524 				/* LDV_COMMENT_BEGIN_PREP */
 7525 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7526 				#define MACB_RX_BUFFER_SIZE	128
 7527 				#define RX_BUFFER_MULTIPLE	64  
 7528 				#define RX_RING_SIZE		512 
 7529 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7530 				#define TX_RING_SIZE		128 
 7531 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7532 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7533 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7534 				 | MACB_BIT(ISR_ROVR))
 7535 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7536 					| MACB_BIT(ISR_RLE)		\
 7537 					| MACB_BIT(TXERR))
 7538 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7539 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7540 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7541 				#define GEM_MTU_MIN_SIZE	68
 7542 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7543 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7544 				#define MACB_HALT_TIMEOUT	1230
 7545 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7546 				#endif
 7547 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7548 				#endif
 7549 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7550 				#endif
 7551 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7552 				#endif
 7553 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7554 				#endif
 7555 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7556 				#endif
 7557 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7558 				#endif
 7559 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7560 				#endif
 7561 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7562 				#endif
 7563 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7564 				#endif
 7565 				/* LDV_COMMENT_END_PREP */
 7566 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_set_features" from driver structure with callbacks "macb_netdev_ops" */
 7567 				ldv_handler_precall();
 7568 				macb_set_features( var_group1, var_macb_set_features_72_p1);
 7569 				/* LDV_COMMENT_BEGIN_PREP */
 7570 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7571 				#endif
 7572 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7573 				#endif
 7574 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7575 				#endif
 7576 				#if defined(CONFIG_OF)
 7577 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7578 				#define AT91ETHER_MAX_RX_DESCR	9
 7579 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7580 				#endif
 7581 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7582 				#endif
 7583 				#endif 
 7584 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7585 				#endif
 7586 				/* LDV_COMMENT_END_PREP */
 7587 				
 7588 
 7589 				
 7590 
 7591 			}
 7592 
 7593 			break;
 7594 			case 18: {
 7595 
 7596 				/** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/
 7597 				if(ldv_s_at91ether_netdev_ops_net_device_ops==0) {
 7598 
 7599 				/* content: static int at91ether_open(struct net_device *dev)*/
 7600 				/* LDV_COMMENT_BEGIN_PREP */
 7601 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7602 				#define MACB_RX_BUFFER_SIZE	128
 7603 				#define RX_BUFFER_MULTIPLE	64  
 7604 				#define RX_RING_SIZE		512 
 7605 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7606 				#define TX_RING_SIZE		128 
 7607 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7608 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7609 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7610 				 | MACB_BIT(ISR_ROVR))
 7611 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7612 					| MACB_BIT(ISR_RLE)		\
 7613 					| MACB_BIT(TXERR))
 7614 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7615 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7616 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7617 				#define GEM_MTU_MIN_SIZE	68
 7618 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7619 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7620 				#define MACB_HALT_TIMEOUT	1230
 7621 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7622 				#endif
 7623 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7624 				#endif
 7625 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7626 				#endif
 7627 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7628 				#endif
 7629 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7630 				#endif
 7631 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7632 				#endif
 7633 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7634 				#endif
 7635 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7636 				#endif
 7637 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7638 				#endif
 7639 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7640 				#endif
 7641 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7642 				#endif
 7643 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7644 				#endif
 7645 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7646 				#endif
 7647 				#if defined(CONFIG_OF)
 7648 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7649 				#define AT91ETHER_MAX_RX_DESCR	9
 7650 				/* LDV_COMMENT_END_PREP */
 7651 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "at91ether_netdev_ops". Standart function test for correct return result. */
 7652 				ldv_handler_precall();
 7653 				res_at91ether_open_78 = at91ether_open( var_group1);
 7654 				 ldv_check_return_value(res_at91ether_open_78);
 7655 				 if(res_at91ether_open_78 < 0) 
 7656 					goto ldv_module_exit;
 7657 				/* LDV_COMMENT_BEGIN_PREP */
 7658 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7659 				#endif
 7660 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7661 				#endif
 7662 				#endif 
 7663 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7664 				#endif
 7665 				/* LDV_COMMENT_END_PREP */
 7666 				ldv_s_at91ether_netdev_ops_net_device_ops++;
 7667 
 7668 				}
 7669 
 7670 			}
 7671 
 7672 			break;
 7673 			case 19: {
 7674 
 7675 				/** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/
 7676 				if(ldv_s_at91ether_netdev_ops_net_device_ops==1) {
 7677 
 7678 				/* content: static int at91ether_close(struct net_device *dev)*/
 7679 				/* LDV_COMMENT_BEGIN_PREP */
 7680 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7681 				#define MACB_RX_BUFFER_SIZE	128
 7682 				#define RX_BUFFER_MULTIPLE	64  
 7683 				#define RX_RING_SIZE		512 
 7684 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7685 				#define TX_RING_SIZE		128 
 7686 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7687 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7688 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7689 				 | MACB_BIT(ISR_ROVR))
 7690 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7691 					| MACB_BIT(ISR_RLE)		\
 7692 					| MACB_BIT(TXERR))
 7693 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7694 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7695 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7696 				#define GEM_MTU_MIN_SIZE	68
 7697 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7698 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7699 				#define MACB_HALT_TIMEOUT	1230
 7700 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7701 				#endif
 7702 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7703 				#endif
 7704 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7705 				#endif
 7706 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7707 				#endif
 7708 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7709 				#endif
 7710 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7711 				#endif
 7712 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7713 				#endif
 7714 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7715 				#endif
 7716 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7717 				#endif
 7718 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7719 				#endif
 7720 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7721 				#endif
 7722 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7723 				#endif
 7724 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7725 				#endif
 7726 				#if defined(CONFIG_OF)
 7727 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7728 				#define AT91ETHER_MAX_RX_DESCR	9
 7729 				/* LDV_COMMENT_END_PREP */
 7730 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "at91ether_netdev_ops". Standart function test for correct return result. */
 7731 				ldv_handler_precall();
 7732 				res_at91ether_close_79 = at91ether_close( var_group1);
 7733 				 ldv_check_return_value(res_at91ether_close_79);
 7734 				 if(res_at91ether_close_79) 
 7735 					goto ldv_module_exit;
 7736 				/* LDV_COMMENT_BEGIN_PREP */
 7737 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7738 				#endif
 7739 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7740 				#endif
 7741 				#endif 
 7742 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7743 				#endif
 7744 				/* LDV_COMMENT_END_PREP */
 7745 				ldv_s_at91ether_netdev_ops_net_device_ops=0;
 7746 
 7747 				}
 7748 
 7749 			}
 7750 
 7751 			break;
 7752 			case 20: {
 7753 
 7754 				/** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/
 7755 				
 7756 
 7757 				/* content: static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)*/
 7758 				/* LDV_COMMENT_BEGIN_PREP */
 7759 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7760 				#define MACB_RX_BUFFER_SIZE	128
 7761 				#define RX_BUFFER_MULTIPLE	64  
 7762 				#define RX_RING_SIZE		512 
 7763 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7764 				#define TX_RING_SIZE		128 
 7765 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7766 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7767 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7768 				 | MACB_BIT(ISR_ROVR))
 7769 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7770 					| MACB_BIT(ISR_RLE)		\
 7771 					| MACB_BIT(TXERR))
 7772 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7773 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7774 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7775 				#define GEM_MTU_MIN_SIZE	68
 7776 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7777 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7778 				#define MACB_HALT_TIMEOUT	1230
 7779 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7780 				#endif
 7781 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7782 				#endif
 7783 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7784 				#endif
 7785 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7786 				#endif
 7787 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7788 				#endif
 7789 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7790 				#endif
 7791 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7792 				#endif
 7793 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7794 				#endif
 7795 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7796 				#endif
 7797 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7798 				#endif
 7799 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7800 				#endif
 7801 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7802 				#endif
 7803 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7804 				#endif
 7805 				#if defined(CONFIG_OF)
 7806 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7807 				#define AT91ETHER_MAX_RX_DESCR	9
 7808 				/* LDV_COMMENT_END_PREP */
 7809 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_start_xmit" from driver structure with callbacks "at91ether_netdev_ops" */
 7810 				ldv_handler_precall();
 7811 				at91ether_start_xmit( var_group5, var_group1);
 7812 				/* LDV_COMMENT_BEGIN_PREP */
 7813 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7814 				#endif
 7815 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7816 				#endif
 7817 				#endif 
 7818 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7819 				#endif
 7820 				/* LDV_COMMENT_END_PREP */
 7821 				
 7822 
 7823 				
 7824 
 7825 			}
 7826 
 7827 			break;
 7828 			case 21: {
 7829 
 7830 				/** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/
 7831 				
 7832 
 7833 				/* content: static struct net_device_stats *macb_get_stats(struct net_device *dev)*/
 7834 				/* LDV_COMMENT_BEGIN_PREP */
 7835 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7836 				#define MACB_RX_BUFFER_SIZE	128
 7837 				#define RX_BUFFER_MULTIPLE	64  
 7838 				#define RX_RING_SIZE		512 
 7839 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7840 				#define TX_RING_SIZE		128 
 7841 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7842 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7843 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7844 				 | MACB_BIT(ISR_ROVR))
 7845 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7846 					| MACB_BIT(ISR_RLE)		\
 7847 					| MACB_BIT(TXERR))
 7848 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7849 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7850 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7851 				#define GEM_MTU_MIN_SIZE	68
 7852 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7853 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7854 				#define MACB_HALT_TIMEOUT	1230
 7855 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7856 				#endif
 7857 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7858 				#endif
 7859 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7860 				#endif
 7861 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7862 				#endif
 7863 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7864 				#endif
 7865 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7866 				#endif
 7867 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7868 				#endif
 7869 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7870 				#endif
 7871 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7872 				#endif
 7873 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7874 				#endif
 7875 				/* LDV_COMMENT_END_PREP */
 7876 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_get_stats" from driver structure with callbacks "at91ether_netdev_ops" */
 7877 				ldv_handler_precall();
 7878 				macb_get_stats( var_group1);
 7879 				/* LDV_COMMENT_BEGIN_PREP */
 7880 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7881 				#endif
 7882 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7883 				#endif
 7884 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7885 				#endif
 7886 				#if defined(CONFIG_OF)
 7887 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7888 				#define AT91ETHER_MAX_RX_DESCR	9
 7889 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7890 				#endif
 7891 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7892 				#endif
 7893 				#endif 
 7894 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7895 				#endif
 7896 				/* LDV_COMMENT_END_PREP */
 7897 				
 7898 
 7899 				
 7900 
 7901 			}
 7902 
 7903 			break;
 7904 			case 22: {
 7905 
 7906 				/** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/
 7907 				
 7908 
 7909 				/* content: static void macb_set_rx_mode(struct net_device *dev)*/
 7910 				/* LDV_COMMENT_BEGIN_PREP */
 7911 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7912 				#define MACB_RX_BUFFER_SIZE	128
 7913 				#define RX_BUFFER_MULTIPLE	64  
 7914 				#define RX_RING_SIZE		512 
 7915 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7916 				#define TX_RING_SIZE		128 
 7917 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7918 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7919 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7920 				 | MACB_BIT(ISR_ROVR))
 7921 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7922 					| MACB_BIT(ISR_RLE)		\
 7923 					| MACB_BIT(TXERR))
 7924 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7925 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7926 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7927 				#define GEM_MTU_MIN_SIZE	68
 7928 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7929 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7930 				#define MACB_HALT_TIMEOUT	1230
 7931 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7932 				#endif
 7933 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7934 				#endif
 7935 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7936 				#endif
 7937 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7938 				#endif
 7939 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7940 				#endif
 7941 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7942 				#endif
 7943 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7944 				#endif
 7945 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7946 				#endif
 7947 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7948 				#endif
 7949 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7950 				#endif
 7951 				/* LDV_COMMENT_END_PREP */
 7952 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_set_rx_mode" from driver structure with callbacks "at91ether_netdev_ops" */
 7953 				ldv_handler_precall();
 7954 				macb_set_rx_mode( var_group1);
 7955 				/* LDV_COMMENT_BEGIN_PREP */
 7956 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7957 				#endif
 7958 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7959 				#endif
 7960 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7961 				#endif
 7962 				#if defined(CONFIG_OF)
 7963 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7964 				#define AT91ETHER_MAX_RX_DESCR	9
 7965 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7966 				#endif
 7967 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7968 				#endif
 7969 				#endif 
 7970 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7971 				#endif
 7972 				/* LDV_COMMENT_END_PREP */
 7973 				
 7974 
 7975 				
 7976 
 7977 			}
 7978 
 7979 			break;
 7980 			case 23: {
 7981 
 7982 				/** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/
 7983 				
 7984 
 7985 				/* content: static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/
 7986 				/* LDV_COMMENT_BEGIN_PREP */
 7987 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7988 				#define MACB_RX_BUFFER_SIZE	128
 7989 				#define RX_BUFFER_MULTIPLE	64  
 7990 				#define RX_RING_SIZE		512 
 7991 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7992 				#define TX_RING_SIZE		128 
 7993 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7994 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7995 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7996 				 | MACB_BIT(ISR_ROVR))
 7997 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7998 					| MACB_BIT(ISR_RLE)		\
 7999 					| MACB_BIT(TXERR))
 8000 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8001 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8002 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8003 				#define GEM_MTU_MIN_SIZE	68
 8004 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8005 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8006 				#define MACB_HALT_TIMEOUT	1230
 8007 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8008 				#endif
 8009 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8010 				#endif
 8011 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8012 				#endif
 8013 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8014 				#endif
 8015 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8016 				#endif
 8017 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8018 				#endif
 8019 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8020 				#endif
 8021 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8022 				#endif
 8023 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8024 				#endif
 8025 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8026 				#endif
 8027 				/* LDV_COMMENT_END_PREP */
 8028 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_do_ioctl" from driver structure with callbacks "at91ether_netdev_ops" */
 8029 				ldv_handler_precall();
 8030 				macb_ioctl( var_group1, var_group6, var_macb_ioctl_71_p2);
 8031 				/* LDV_COMMENT_BEGIN_PREP */
 8032 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8033 				#endif
 8034 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8035 				#endif
 8036 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8037 				#endif
 8038 				#if defined(CONFIG_OF)
 8039 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8040 				#define AT91ETHER_MAX_RX_DESCR	9
 8041 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8042 				#endif
 8043 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8044 				#endif
 8045 				#endif 
 8046 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8047 				#endif
 8048 				/* LDV_COMMENT_END_PREP */
 8049 				
 8050 
 8051 				
 8052 
 8053 			}
 8054 
 8055 			break;
 8056 			case 24: {
 8057 
 8058 				/** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/
 8059 				
 8060 
 8061 				/* content: static void at91ether_poll_controller(struct net_device *dev)*/
 8062 				/* LDV_COMMENT_BEGIN_PREP */
 8063 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8064 				#define MACB_RX_BUFFER_SIZE	128
 8065 				#define RX_BUFFER_MULTIPLE	64  
 8066 				#define RX_RING_SIZE		512 
 8067 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8068 				#define TX_RING_SIZE		128 
 8069 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8070 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8071 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8072 				 | MACB_BIT(ISR_ROVR))
 8073 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8074 					| MACB_BIT(ISR_RLE)		\
 8075 					| MACB_BIT(TXERR))
 8076 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8077 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8078 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8079 				#define GEM_MTU_MIN_SIZE	68
 8080 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8081 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8082 				#define MACB_HALT_TIMEOUT	1230
 8083 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8084 				#endif
 8085 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8086 				#endif
 8087 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8088 				#endif
 8089 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8090 				#endif
 8091 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8092 				#endif
 8093 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8094 				#endif
 8095 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8096 				#endif
 8097 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8098 				#endif
 8099 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8100 				#endif
 8101 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8102 				#endif
 8103 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8104 				#endif
 8105 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8106 				#endif
 8107 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8108 				#endif
 8109 				#if defined(CONFIG_OF)
 8110 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8111 				#define AT91ETHER_MAX_RX_DESCR	9
 8112 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8113 				/* LDV_COMMENT_END_PREP */
 8114 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_poll_controller" from driver structure with callbacks "at91ether_netdev_ops" */
 8115 				ldv_handler_precall();
 8116 				at91ether_poll_controller( var_group1);
 8117 				/* LDV_COMMENT_BEGIN_PREP */
 8118 				#endif
 8119 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8120 				#endif
 8121 				#endif 
 8122 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8123 				#endif
 8124 				/* LDV_COMMENT_END_PREP */
 8125 				
 8126 
 8127 				
 8128 
 8129 			}
 8130 
 8131 			break;
 8132 			case 25: {
 8133 
 8134 				/** STRUCT: struct type: macb_config, struct name: at91sam9260_config **/
 8135 				
 8136 
 8137 				/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 8138 				/* LDV_COMMENT_BEGIN_PREP */
 8139 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8140 				#define MACB_RX_BUFFER_SIZE	128
 8141 				#define RX_BUFFER_MULTIPLE	64  
 8142 				#define RX_RING_SIZE		512 
 8143 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8144 				#define TX_RING_SIZE		128 
 8145 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8146 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8147 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8148 				 | MACB_BIT(ISR_ROVR))
 8149 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8150 					| MACB_BIT(ISR_RLE)		\
 8151 					| MACB_BIT(TXERR))
 8152 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8153 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8154 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8155 				#define GEM_MTU_MIN_SIZE	68
 8156 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8157 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8158 				#define MACB_HALT_TIMEOUT	1230
 8159 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8160 				#endif
 8161 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8162 				#endif
 8163 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8164 				#endif
 8165 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8166 				#endif
 8167 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8168 				#endif
 8169 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8170 				#endif
 8171 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8172 				#endif
 8173 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8174 				#endif
 8175 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8176 				#endif
 8177 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8178 				#endif
 8179 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8180 				#endif
 8181 				/* LDV_COMMENT_END_PREP */
 8182 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "at91sam9260_config" */
 8183 				ldv_handler_precall();
 8184 				macb_clk_init( var_group7, var_group8, var_macb_clk_init_75_p2, var_macb_clk_init_75_p3, var_macb_clk_init_75_p4);
 8185 				/* LDV_COMMENT_BEGIN_PREP */
 8186 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8187 				#endif
 8188 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8189 				#endif
 8190 				#if defined(CONFIG_OF)
 8191 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8192 				#define AT91ETHER_MAX_RX_DESCR	9
 8193 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8194 				#endif
 8195 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8196 				#endif
 8197 				#endif 
 8198 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8199 				#endif
 8200 				/* LDV_COMMENT_END_PREP */
 8201 				
 8202 
 8203 				
 8204 
 8205 			}
 8206 
 8207 			break;
 8208 			case 26: {
 8209 
 8210 				/** STRUCT: struct type: macb_config, struct name: at91sam9260_config **/
 8211 				
 8212 
 8213 				/* content: static int macb_init(struct platform_device *pdev)*/
 8214 				/* LDV_COMMENT_BEGIN_PREP */
 8215 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8216 				#define MACB_RX_BUFFER_SIZE	128
 8217 				#define RX_BUFFER_MULTIPLE	64  
 8218 				#define RX_RING_SIZE		512 
 8219 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8220 				#define TX_RING_SIZE		128 
 8221 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8222 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8223 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8224 				 | MACB_BIT(ISR_ROVR))
 8225 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8226 					| MACB_BIT(ISR_RLE)		\
 8227 					| MACB_BIT(TXERR))
 8228 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8229 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8230 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8231 				#define GEM_MTU_MIN_SIZE	68
 8232 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8233 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8234 				#define MACB_HALT_TIMEOUT	1230
 8235 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8236 				#endif
 8237 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8238 				#endif
 8239 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8240 				#endif
 8241 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8242 				#endif
 8243 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8244 				#endif
 8245 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8246 				#endif
 8247 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8248 				#endif
 8249 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8250 				#endif
 8251 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8252 				#endif
 8253 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8254 				#endif
 8255 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8256 				#endif
 8257 				/* LDV_COMMENT_END_PREP */
 8258 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "at91sam9260_config" */
 8259 				ldv_handler_precall();
 8260 				macb_init( var_group7);
 8261 				/* LDV_COMMENT_BEGIN_PREP */
 8262 				#if defined(CONFIG_OF)
 8263 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8264 				#define AT91ETHER_MAX_RX_DESCR	9
 8265 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8266 				#endif
 8267 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8268 				#endif
 8269 				#endif 
 8270 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8271 				#endif
 8272 				/* LDV_COMMENT_END_PREP */
 8273 				
 8274 
 8275 				
 8276 
 8277 			}
 8278 
 8279 			break;
 8280 			case 27: {
 8281 
 8282 				/** STRUCT: struct type: macb_config, struct name: pc302gem_config **/
 8283 				
 8284 
 8285 				/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 8286 				/* LDV_COMMENT_BEGIN_PREP */
 8287 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8288 				#define MACB_RX_BUFFER_SIZE	128
 8289 				#define RX_BUFFER_MULTIPLE	64  
 8290 				#define RX_RING_SIZE		512 
 8291 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8292 				#define TX_RING_SIZE		128 
 8293 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8294 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8295 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8296 				 | MACB_BIT(ISR_ROVR))
 8297 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8298 					| MACB_BIT(ISR_RLE)		\
 8299 					| MACB_BIT(TXERR))
 8300 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8301 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8302 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8303 				#define GEM_MTU_MIN_SIZE	68
 8304 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8305 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8306 				#define MACB_HALT_TIMEOUT	1230
 8307 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8308 				#endif
 8309 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8310 				#endif
 8311 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8312 				#endif
 8313 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8314 				#endif
 8315 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8316 				#endif
 8317 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8318 				#endif
 8319 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8320 				#endif
 8321 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8322 				#endif
 8323 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8324 				#endif
 8325 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8326 				#endif
 8327 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8328 				#endif
 8329 				/* LDV_COMMENT_END_PREP */
 8330 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "pc302gem_config" */
 8331 				ldv_handler_precall();
 8332 				macb_clk_init( var_group7, var_group8, var_macb_clk_init_75_p2, var_macb_clk_init_75_p3, var_macb_clk_init_75_p4);
 8333 				/* LDV_COMMENT_BEGIN_PREP */
 8334 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8335 				#endif
 8336 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8337 				#endif
 8338 				#if defined(CONFIG_OF)
 8339 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8340 				#define AT91ETHER_MAX_RX_DESCR	9
 8341 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8342 				#endif
 8343 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8344 				#endif
 8345 				#endif 
 8346 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8347 				#endif
 8348 				/* LDV_COMMENT_END_PREP */
 8349 				
 8350 
 8351 				
 8352 
 8353 			}
 8354 
 8355 			break;
 8356 			case 28: {
 8357 
 8358 				/** STRUCT: struct type: macb_config, struct name: pc302gem_config **/
 8359 				
 8360 
 8361 				/* content: static int macb_init(struct platform_device *pdev)*/
 8362 				/* LDV_COMMENT_BEGIN_PREP */
 8363 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8364 				#define MACB_RX_BUFFER_SIZE	128
 8365 				#define RX_BUFFER_MULTIPLE	64  
 8366 				#define RX_RING_SIZE		512 
 8367 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8368 				#define TX_RING_SIZE		128 
 8369 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8370 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8371 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8372 				 | MACB_BIT(ISR_ROVR))
 8373 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8374 					| MACB_BIT(ISR_RLE)		\
 8375 					| MACB_BIT(TXERR))
 8376 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8377 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8378 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8379 				#define GEM_MTU_MIN_SIZE	68
 8380 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8381 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8382 				#define MACB_HALT_TIMEOUT	1230
 8383 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8384 				#endif
 8385 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8386 				#endif
 8387 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8388 				#endif
 8389 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8390 				#endif
 8391 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8392 				#endif
 8393 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8394 				#endif
 8395 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8396 				#endif
 8397 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8398 				#endif
 8399 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8400 				#endif
 8401 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8402 				#endif
 8403 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8404 				#endif
 8405 				/* LDV_COMMENT_END_PREP */
 8406 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "pc302gem_config" */
 8407 				ldv_handler_precall();
 8408 				macb_init( var_group7);
 8409 				/* LDV_COMMENT_BEGIN_PREP */
 8410 				#if defined(CONFIG_OF)
 8411 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8412 				#define AT91ETHER_MAX_RX_DESCR	9
 8413 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8414 				#endif
 8415 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8416 				#endif
 8417 				#endif 
 8418 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8419 				#endif
 8420 				/* LDV_COMMENT_END_PREP */
 8421 				
 8422 
 8423 				
 8424 
 8425 			}
 8426 
 8427 			break;
 8428 			case 29: {
 8429 
 8430 				/** STRUCT: struct type: macb_config, struct name: sama5d2_config **/
 8431 				
 8432 
 8433 				/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 8434 				/* LDV_COMMENT_BEGIN_PREP */
 8435 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8436 				#define MACB_RX_BUFFER_SIZE	128
 8437 				#define RX_BUFFER_MULTIPLE	64  
 8438 				#define RX_RING_SIZE		512 
 8439 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8440 				#define TX_RING_SIZE		128 
 8441 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8442 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8443 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8444 				 | MACB_BIT(ISR_ROVR))
 8445 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8446 					| MACB_BIT(ISR_RLE)		\
 8447 					| MACB_BIT(TXERR))
 8448 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8449 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8450 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8451 				#define GEM_MTU_MIN_SIZE	68
 8452 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8453 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8454 				#define MACB_HALT_TIMEOUT	1230
 8455 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8456 				#endif
 8457 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8458 				#endif
 8459 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8460 				#endif
 8461 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8462 				#endif
 8463 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8464 				#endif
 8465 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8466 				#endif
 8467 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8468 				#endif
 8469 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8470 				#endif
 8471 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8472 				#endif
 8473 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8474 				#endif
 8475 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8476 				#endif
 8477 				/* LDV_COMMENT_END_PREP */
 8478 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "sama5d2_config" */
 8479 				ldv_handler_precall();
 8480 				macb_clk_init( var_group7, var_group8, var_macb_clk_init_75_p2, var_macb_clk_init_75_p3, var_macb_clk_init_75_p4);
 8481 				/* LDV_COMMENT_BEGIN_PREP */
 8482 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8483 				#endif
 8484 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8485 				#endif
 8486 				#if defined(CONFIG_OF)
 8487 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8488 				#define AT91ETHER_MAX_RX_DESCR	9
 8489 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8490 				#endif
 8491 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8492 				#endif
 8493 				#endif 
 8494 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8495 				#endif
 8496 				/* LDV_COMMENT_END_PREP */
 8497 				
 8498 
 8499 				
 8500 
 8501 			}
 8502 
 8503 			break;
 8504 			case 30: {
 8505 
 8506 				/** STRUCT: struct type: macb_config, struct name: sama5d2_config **/
 8507 				
 8508 
 8509 				/* content: static int macb_init(struct platform_device *pdev)*/
 8510 				/* LDV_COMMENT_BEGIN_PREP */
 8511 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8512 				#define MACB_RX_BUFFER_SIZE	128
 8513 				#define RX_BUFFER_MULTIPLE	64  
 8514 				#define RX_RING_SIZE		512 
 8515 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8516 				#define TX_RING_SIZE		128 
 8517 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8518 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8519 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8520 				 | MACB_BIT(ISR_ROVR))
 8521 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8522 					| MACB_BIT(ISR_RLE)		\
 8523 					| MACB_BIT(TXERR))
 8524 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8525 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8526 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8527 				#define GEM_MTU_MIN_SIZE	68
 8528 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8529 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8530 				#define MACB_HALT_TIMEOUT	1230
 8531 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8532 				#endif
 8533 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8534 				#endif
 8535 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8536 				#endif
 8537 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8538 				#endif
 8539 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8540 				#endif
 8541 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8542 				#endif
 8543 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8544 				#endif
 8545 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8546 				#endif
 8547 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8548 				#endif
 8549 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8550 				#endif
 8551 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8552 				#endif
 8553 				/* LDV_COMMENT_END_PREP */
 8554 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "sama5d2_config" */
 8555 				ldv_handler_precall();
 8556 				macb_init( var_group7);
 8557 				/* LDV_COMMENT_BEGIN_PREP */
 8558 				#if defined(CONFIG_OF)
 8559 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8560 				#define AT91ETHER_MAX_RX_DESCR	9
 8561 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8562 				#endif
 8563 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8564 				#endif
 8565 				#endif 
 8566 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8567 				#endif
 8568 				/* LDV_COMMENT_END_PREP */
 8569 				
 8570 
 8571 				
 8572 
 8573 			}
 8574 
 8575 			break;
 8576 			case 31: {
 8577 
 8578 				/** STRUCT: struct type: macb_config, struct name: sama5d3_config **/
 8579 				
 8580 
 8581 				/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 8582 				/* LDV_COMMENT_BEGIN_PREP */
 8583 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8584 				#define MACB_RX_BUFFER_SIZE	128
 8585 				#define RX_BUFFER_MULTIPLE	64  
 8586 				#define RX_RING_SIZE		512 
 8587 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8588 				#define TX_RING_SIZE		128 
 8589 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8590 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8591 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8592 				 | MACB_BIT(ISR_ROVR))
 8593 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8594 					| MACB_BIT(ISR_RLE)		\
 8595 					| MACB_BIT(TXERR))
 8596 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8597 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8598 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8599 				#define GEM_MTU_MIN_SIZE	68
 8600 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8601 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8602 				#define MACB_HALT_TIMEOUT	1230
 8603 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8604 				#endif
 8605 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8606 				#endif
 8607 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8608 				#endif
 8609 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8610 				#endif
 8611 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8612 				#endif
 8613 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8614 				#endif
 8615 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8616 				#endif
 8617 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8618 				#endif
 8619 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8620 				#endif
 8621 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8622 				#endif
 8623 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8624 				#endif
 8625 				/* LDV_COMMENT_END_PREP */
 8626 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "sama5d3_config" */
 8627 				ldv_handler_precall();
 8628 				macb_clk_init( var_group7, var_group8, var_macb_clk_init_75_p2, var_macb_clk_init_75_p3, var_macb_clk_init_75_p4);
 8629 				/* LDV_COMMENT_BEGIN_PREP */
 8630 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8631 				#endif
 8632 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8633 				#endif
 8634 				#if defined(CONFIG_OF)
 8635 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8636 				#define AT91ETHER_MAX_RX_DESCR	9
 8637 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8638 				#endif
 8639 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8640 				#endif
 8641 				#endif 
 8642 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8643 				#endif
 8644 				/* LDV_COMMENT_END_PREP */
 8645 				
 8646 
 8647 				
 8648 
 8649 			}
 8650 
 8651 			break;
 8652 			case 32: {
 8653 
 8654 				/** STRUCT: struct type: macb_config, struct name: sama5d3_config **/
 8655 				
 8656 
 8657 				/* content: static int macb_init(struct platform_device *pdev)*/
 8658 				/* LDV_COMMENT_BEGIN_PREP */
 8659 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8660 				#define MACB_RX_BUFFER_SIZE	128
 8661 				#define RX_BUFFER_MULTIPLE	64  
 8662 				#define RX_RING_SIZE		512 
 8663 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8664 				#define TX_RING_SIZE		128 
 8665 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8666 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8667 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8668 				 | MACB_BIT(ISR_ROVR))
 8669 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8670 					| MACB_BIT(ISR_RLE)		\
 8671 					| MACB_BIT(TXERR))
 8672 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8673 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8674 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8675 				#define GEM_MTU_MIN_SIZE	68
 8676 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8677 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8678 				#define MACB_HALT_TIMEOUT	1230
 8679 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8680 				#endif
 8681 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8682 				#endif
 8683 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8684 				#endif
 8685 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8686 				#endif
 8687 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8688 				#endif
 8689 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8690 				#endif
 8691 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8692 				#endif
 8693 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8694 				#endif
 8695 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8696 				#endif
 8697 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8698 				#endif
 8699 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8700 				#endif
 8701 				/* LDV_COMMENT_END_PREP */
 8702 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "sama5d3_config" */
 8703 				ldv_handler_precall();
 8704 				macb_init( var_group7);
 8705 				/* LDV_COMMENT_BEGIN_PREP */
 8706 				#if defined(CONFIG_OF)
 8707 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8708 				#define AT91ETHER_MAX_RX_DESCR	9
 8709 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8710 				#endif
 8711 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8712 				#endif
 8713 				#endif 
 8714 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8715 				#endif
 8716 				/* LDV_COMMENT_END_PREP */
 8717 				
 8718 
 8719 				
 8720 
 8721 			}
 8722 
 8723 			break;
 8724 			case 33: {
 8725 
 8726 				/** STRUCT: struct type: macb_config, struct name: sama5d4_config **/
 8727 				
 8728 
 8729 				/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 8730 				/* LDV_COMMENT_BEGIN_PREP */
 8731 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8732 				#define MACB_RX_BUFFER_SIZE	128
 8733 				#define RX_BUFFER_MULTIPLE	64  
 8734 				#define RX_RING_SIZE		512 
 8735 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8736 				#define TX_RING_SIZE		128 
 8737 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8738 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8739 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8740 				 | MACB_BIT(ISR_ROVR))
 8741 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8742 					| MACB_BIT(ISR_RLE)		\
 8743 					| MACB_BIT(TXERR))
 8744 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8745 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8746 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8747 				#define GEM_MTU_MIN_SIZE	68
 8748 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8749 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8750 				#define MACB_HALT_TIMEOUT	1230
 8751 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8752 				#endif
 8753 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8754 				#endif
 8755 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8756 				#endif
 8757 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8758 				#endif
 8759 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8760 				#endif
 8761 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8762 				#endif
 8763 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8764 				#endif
 8765 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8766 				#endif
 8767 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8768 				#endif
 8769 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8770 				#endif
 8771 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8772 				#endif
 8773 				/* LDV_COMMENT_END_PREP */
 8774 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "sama5d4_config" */
 8775 				ldv_handler_precall();
 8776 				macb_clk_init( var_group7, var_group8, var_macb_clk_init_75_p2, var_macb_clk_init_75_p3, var_macb_clk_init_75_p4);
 8777 				/* LDV_COMMENT_BEGIN_PREP */
 8778 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8779 				#endif
 8780 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8781 				#endif
 8782 				#if defined(CONFIG_OF)
 8783 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8784 				#define AT91ETHER_MAX_RX_DESCR	9
 8785 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8786 				#endif
 8787 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8788 				#endif
 8789 				#endif 
 8790 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8791 				#endif
 8792 				/* LDV_COMMENT_END_PREP */
 8793 				
 8794 
 8795 				
 8796 
 8797 			}
 8798 
 8799 			break;
 8800 			case 34: {
 8801 
 8802 				/** STRUCT: struct type: macb_config, struct name: sama5d4_config **/
 8803 				
 8804 
 8805 				/* content: static int macb_init(struct platform_device *pdev)*/
 8806 				/* LDV_COMMENT_BEGIN_PREP */
 8807 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8808 				#define MACB_RX_BUFFER_SIZE	128
 8809 				#define RX_BUFFER_MULTIPLE	64  
 8810 				#define RX_RING_SIZE		512 
 8811 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8812 				#define TX_RING_SIZE		128 
 8813 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8814 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8815 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8816 				 | MACB_BIT(ISR_ROVR))
 8817 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8818 					| MACB_BIT(ISR_RLE)		\
 8819 					| MACB_BIT(TXERR))
 8820 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8821 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8822 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8823 				#define GEM_MTU_MIN_SIZE	68
 8824 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8825 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8826 				#define MACB_HALT_TIMEOUT	1230
 8827 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8828 				#endif
 8829 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8830 				#endif
 8831 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8832 				#endif
 8833 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8834 				#endif
 8835 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8836 				#endif
 8837 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8838 				#endif
 8839 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8840 				#endif
 8841 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8842 				#endif
 8843 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8844 				#endif
 8845 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8846 				#endif
 8847 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8848 				#endif
 8849 				/* LDV_COMMENT_END_PREP */
 8850 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "sama5d4_config" */
 8851 				ldv_handler_precall();
 8852 				macb_init( var_group7);
 8853 				/* LDV_COMMENT_BEGIN_PREP */
 8854 				#if defined(CONFIG_OF)
 8855 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8856 				#define AT91ETHER_MAX_RX_DESCR	9
 8857 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8858 				#endif
 8859 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8860 				#endif
 8861 				#endif 
 8862 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8863 				#endif
 8864 				/* LDV_COMMENT_END_PREP */
 8865 				
 8866 
 8867 				
 8868 
 8869 			}
 8870 
 8871 			break;
 8872 			case 35: {
 8873 
 8874 				/** STRUCT: struct type: macb_config, struct name: emac_config **/
 8875 				
 8876 
 8877 				/* content: static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 8878 				/* LDV_COMMENT_BEGIN_PREP */
 8879 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8880 				#define MACB_RX_BUFFER_SIZE	128
 8881 				#define RX_BUFFER_MULTIPLE	64  
 8882 				#define RX_RING_SIZE		512 
 8883 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8884 				#define TX_RING_SIZE		128 
 8885 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8886 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8887 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8888 				 | MACB_BIT(ISR_ROVR))
 8889 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8890 					| MACB_BIT(ISR_RLE)		\
 8891 					| MACB_BIT(TXERR))
 8892 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8893 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8894 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8895 				#define GEM_MTU_MIN_SIZE	68
 8896 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8897 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8898 				#define MACB_HALT_TIMEOUT	1230
 8899 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8900 				#endif
 8901 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8902 				#endif
 8903 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8904 				#endif
 8905 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8906 				#endif
 8907 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8908 				#endif
 8909 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8910 				#endif
 8911 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8912 				#endif
 8913 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8914 				#endif
 8915 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8916 				#endif
 8917 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8918 				#endif
 8919 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8920 				#endif
 8921 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8922 				#endif
 8923 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8924 				#endif
 8925 				#if defined(CONFIG_OF)
 8926 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8927 				#define AT91ETHER_MAX_RX_DESCR	9
 8928 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8929 				#endif
 8930 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8931 				#endif
 8932 				/* LDV_COMMENT_END_PREP */
 8933 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "emac_config" */
 8934 				ldv_handler_precall();
 8935 				at91ether_clk_init( var_group7, var_group8, var_at91ether_clk_init_84_p2, var_at91ether_clk_init_84_p3, var_at91ether_clk_init_84_p4);
 8936 				/* LDV_COMMENT_BEGIN_PREP */
 8937 				#endif 
 8938 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8939 				#endif
 8940 				/* LDV_COMMENT_END_PREP */
 8941 				
 8942 
 8943 				
 8944 
 8945 			}
 8946 
 8947 			break;
 8948 			case 36: {
 8949 
 8950 				/** STRUCT: struct type: macb_config, struct name: emac_config **/
 8951 				
 8952 
 8953 				/* content: static int at91ether_init(struct platform_device *pdev)*/
 8954 				/* LDV_COMMENT_BEGIN_PREP */
 8955 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8956 				#define MACB_RX_BUFFER_SIZE	128
 8957 				#define RX_BUFFER_MULTIPLE	64  
 8958 				#define RX_RING_SIZE		512 
 8959 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8960 				#define TX_RING_SIZE		128 
 8961 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8962 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8963 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8964 				 | MACB_BIT(ISR_ROVR))
 8965 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8966 					| MACB_BIT(ISR_RLE)		\
 8967 					| MACB_BIT(TXERR))
 8968 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8969 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8970 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8971 				#define GEM_MTU_MIN_SIZE	68
 8972 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8973 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8974 				#define MACB_HALT_TIMEOUT	1230
 8975 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8976 				#endif
 8977 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8978 				#endif
 8979 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8980 				#endif
 8981 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8982 				#endif
 8983 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8984 				#endif
 8985 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8986 				#endif
 8987 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8988 				#endif
 8989 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8990 				#endif
 8991 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8992 				#endif
 8993 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8994 				#endif
 8995 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8996 				#endif
 8997 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8998 				#endif
 8999 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9000 				#endif
 9001 				#if defined(CONFIG_OF)
 9002 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9003 				#define AT91ETHER_MAX_RX_DESCR	9
 9004 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9005 				#endif
 9006 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9007 				#endif
 9008 				/* LDV_COMMENT_END_PREP */
 9009 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "emac_config" */
 9010 				ldv_handler_precall();
 9011 				at91ether_init( var_group7);
 9012 				/* LDV_COMMENT_BEGIN_PREP */
 9013 				#endif 
 9014 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9015 				#endif
 9016 				/* LDV_COMMENT_END_PREP */
 9017 				
 9018 
 9019 				
 9020 
 9021 			}
 9022 
 9023 			break;
 9024 			case 37: {
 9025 
 9026 				/** STRUCT: struct type: macb_config, struct name: np4_config **/
 9027 				
 9028 
 9029 				/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 9030 				/* LDV_COMMENT_BEGIN_PREP */
 9031 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9032 				#define MACB_RX_BUFFER_SIZE	128
 9033 				#define RX_BUFFER_MULTIPLE	64  
 9034 				#define RX_RING_SIZE		512 
 9035 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9036 				#define TX_RING_SIZE		128 
 9037 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9038 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9039 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9040 				 | MACB_BIT(ISR_ROVR))
 9041 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9042 					| MACB_BIT(ISR_RLE)		\
 9043 					| MACB_BIT(TXERR))
 9044 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9045 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9046 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9047 				#define GEM_MTU_MIN_SIZE	68
 9048 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9049 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9050 				#define MACB_HALT_TIMEOUT	1230
 9051 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9052 				#endif
 9053 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9054 				#endif
 9055 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9056 				#endif
 9057 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9058 				#endif
 9059 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9060 				#endif
 9061 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9062 				#endif
 9063 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9064 				#endif
 9065 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9066 				#endif
 9067 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9068 				#endif
 9069 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9070 				#endif
 9071 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9072 				#endif
 9073 				/* LDV_COMMENT_END_PREP */
 9074 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "np4_config" */
 9075 				ldv_handler_precall();
 9076 				macb_clk_init( var_group7, var_group8, var_macb_clk_init_75_p2, var_macb_clk_init_75_p3, var_macb_clk_init_75_p4);
 9077 				/* LDV_COMMENT_BEGIN_PREP */
 9078 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9079 				#endif
 9080 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9081 				#endif
 9082 				#if defined(CONFIG_OF)
 9083 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9084 				#define AT91ETHER_MAX_RX_DESCR	9
 9085 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9086 				#endif
 9087 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9088 				#endif
 9089 				#endif 
 9090 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9091 				#endif
 9092 				/* LDV_COMMENT_END_PREP */
 9093 				
 9094 
 9095 				
 9096 
 9097 			}
 9098 
 9099 			break;
 9100 			case 38: {
 9101 
 9102 				/** STRUCT: struct type: macb_config, struct name: np4_config **/
 9103 				
 9104 
 9105 				/* content: static int macb_init(struct platform_device *pdev)*/
 9106 				/* LDV_COMMENT_BEGIN_PREP */
 9107 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9108 				#define MACB_RX_BUFFER_SIZE	128
 9109 				#define RX_BUFFER_MULTIPLE	64  
 9110 				#define RX_RING_SIZE		512 
 9111 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9112 				#define TX_RING_SIZE		128 
 9113 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9114 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9115 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9116 				 | MACB_BIT(ISR_ROVR))
 9117 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9118 					| MACB_BIT(ISR_RLE)		\
 9119 					| MACB_BIT(TXERR))
 9120 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9121 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9122 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9123 				#define GEM_MTU_MIN_SIZE	68
 9124 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9125 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9126 				#define MACB_HALT_TIMEOUT	1230
 9127 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9128 				#endif
 9129 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9130 				#endif
 9131 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9132 				#endif
 9133 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9134 				#endif
 9135 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9136 				#endif
 9137 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9138 				#endif
 9139 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9140 				#endif
 9141 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9142 				#endif
 9143 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9144 				#endif
 9145 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9146 				#endif
 9147 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9148 				#endif
 9149 				/* LDV_COMMENT_END_PREP */
 9150 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "np4_config" */
 9151 				ldv_handler_precall();
 9152 				macb_init( var_group7);
 9153 				/* LDV_COMMENT_BEGIN_PREP */
 9154 				#if defined(CONFIG_OF)
 9155 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9156 				#define AT91ETHER_MAX_RX_DESCR	9
 9157 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9158 				#endif
 9159 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9160 				#endif
 9161 				#endif 
 9162 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9163 				#endif
 9164 				/* LDV_COMMENT_END_PREP */
 9165 				
 9166 
 9167 				
 9168 
 9169 			}
 9170 
 9171 			break;
 9172 			case 39: {
 9173 
 9174 				/** STRUCT: struct type: macb_config, struct name: zynqmp_config **/
 9175 				
 9176 
 9177 				/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 9178 				/* LDV_COMMENT_BEGIN_PREP */
 9179 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9180 				#define MACB_RX_BUFFER_SIZE	128
 9181 				#define RX_BUFFER_MULTIPLE	64  
 9182 				#define RX_RING_SIZE		512 
 9183 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9184 				#define TX_RING_SIZE		128 
 9185 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9186 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9187 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9188 				 | MACB_BIT(ISR_ROVR))
 9189 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9190 					| MACB_BIT(ISR_RLE)		\
 9191 					| MACB_BIT(TXERR))
 9192 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9193 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9194 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9195 				#define GEM_MTU_MIN_SIZE	68
 9196 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9197 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9198 				#define MACB_HALT_TIMEOUT	1230
 9199 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9200 				#endif
 9201 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9202 				#endif
 9203 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9204 				#endif
 9205 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9206 				#endif
 9207 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9208 				#endif
 9209 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9210 				#endif
 9211 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9212 				#endif
 9213 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9214 				#endif
 9215 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9216 				#endif
 9217 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9218 				#endif
 9219 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9220 				#endif
 9221 				/* LDV_COMMENT_END_PREP */
 9222 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "zynqmp_config" */
 9223 				ldv_handler_precall();
 9224 				macb_clk_init( var_group7, var_group8, var_macb_clk_init_75_p2, var_macb_clk_init_75_p3, var_macb_clk_init_75_p4);
 9225 				/* LDV_COMMENT_BEGIN_PREP */
 9226 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9227 				#endif
 9228 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9229 				#endif
 9230 				#if defined(CONFIG_OF)
 9231 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9232 				#define AT91ETHER_MAX_RX_DESCR	9
 9233 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9234 				#endif
 9235 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9236 				#endif
 9237 				#endif 
 9238 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9239 				#endif
 9240 				/* LDV_COMMENT_END_PREP */
 9241 				
 9242 
 9243 				
 9244 
 9245 			}
 9246 
 9247 			break;
 9248 			case 40: {
 9249 
 9250 				/** STRUCT: struct type: macb_config, struct name: zynqmp_config **/
 9251 				
 9252 
 9253 				/* content: static int macb_init(struct platform_device *pdev)*/
 9254 				/* LDV_COMMENT_BEGIN_PREP */
 9255 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9256 				#define MACB_RX_BUFFER_SIZE	128
 9257 				#define RX_BUFFER_MULTIPLE	64  
 9258 				#define RX_RING_SIZE		512 
 9259 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9260 				#define TX_RING_SIZE		128 
 9261 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9262 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9263 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9264 				 | MACB_BIT(ISR_ROVR))
 9265 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9266 					| MACB_BIT(ISR_RLE)		\
 9267 					| MACB_BIT(TXERR))
 9268 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9269 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9270 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9271 				#define GEM_MTU_MIN_SIZE	68
 9272 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9273 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9274 				#define MACB_HALT_TIMEOUT	1230
 9275 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9276 				#endif
 9277 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9278 				#endif
 9279 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9280 				#endif
 9281 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9282 				#endif
 9283 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9284 				#endif
 9285 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9286 				#endif
 9287 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9288 				#endif
 9289 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9290 				#endif
 9291 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9292 				#endif
 9293 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9294 				#endif
 9295 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9296 				#endif
 9297 				/* LDV_COMMENT_END_PREP */
 9298 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "zynqmp_config" */
 9299 				ldv_handler_precall();
 9300 				macb_init( var_group7);
 9301 				/* LDV_COMMENT_BEGIN_PREP */
 9302 				#if defined(CONFIG_OF)
 9303 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9304 				#define AT91ETHER_MAX_RX_DESCR	9
 9305 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9306 				#endif
 9307 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9308 				#endif
 9309 				#endif 
 9310 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9311 				#endif
 9312 				/* LDV_COMMENT_END_PREP */
 9313 				
 9314 
 9315 				
 9316 
 9317 			}
 9318 
 9319 			break;
 9320 			case 41: {
 9321 
 9322 				/** STRUCT: struct type: macb_config, struct name: zynq_config **/
 9323 				
 9324 
 9325 				/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 9326 				/* LDV_COMMENT_BEGIN_PREP */
 9327 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9328 				#define MACB_RX_BUFFER_SIZE	128
 9329 				#define RX_BUFFER_MULTIPLE	64  
 9330 				#define RX_RING_SIZE		512 
 9331 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9332 				#define TX_RING_SIZE		128 
 9333 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9334 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9335 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9336 				 | MACB_BIT(ISR_ROVR))
 9337 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9338 					| MACB_BIT(ISR_RLE)		\
 9339 					| MACB_BIT(TXERR))
 9340 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9341 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9342 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9343 				#define GEM_MTU_MIN_SIZE	68
 9344 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9345 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9346 				#define MACB_HALT_TIMEOUT	1230
 9347 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9348 				#endif
 9349 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9350 				#endif
 9351 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9352 				#endif
 9353 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9354 				#endif
 9355 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9356 				#endif
 9357 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9358 				#endif
 9359 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9360 				#endif
 9361 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9362 				#endif
 9363 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9364 				#endif
 9365 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9366 				#endif
 9367 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9368 				#endif
 9369 				/* LDV_COMMENT_END_PREP */
 9370 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "zynq_config" */
 9371 				ldv_handler_precall();
 9372 				macb_clk_init( var_group7, var_group8, var_macb_clk_init_75_p2, var_macb_clk_init_75_p3, var_macb_clk_init_75_p4);
 9373 				/* LDV_COMMENT_BEGIN_PREP */
 9374 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9375 				#endif
 9376 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9377 				#endif
 9378 				#if defined(CONFIG_OF)
 9379 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9380 				#define AT91ETHER_MAX_RX_DESCR	9
 9381 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9382 				#endif
 9383 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9384 				#endif
 9385 				#endif 
 9386 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9387 				#endif
 9388 				/* LDV_COMMENT_END_PREP */
 9389 				
 9390 
 9391 				
 9392 
 9393 			}
 9394 
 9395 			break;
 9396 			case 42: {
 9397 
 9398 				/** STRUCT: struct type: macb_config, struct name: zynq_config **/
 9399 				
 9400 
 9401 				/* content: static int macb_init(struct platform_device *pdev)*/
 9402 				/* LDV_COMMENT_BEGIN_PREP */
 9403 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9404 				#define MACB_RX_BUFFER_SIZE	128
 9405 				#define RX_BUFFER_MULTIPLE	64  
 9406 				#define RX_RING_SIZE		512 
 9407 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9408 				#define TX_RING_SIZE		128 
 9409 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9410 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9411 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9412 				 | MACB_BIT(ISR_ROVR))
 9413 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9414 					| MACB_BIT(ISR_RLE)		\
 9415 					| MACB_BIT(TXERR))
 9416 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9417 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9418 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9419 				#define GEM_MTU_MIN_SIZE	68
 9420 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9421 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9422 				#define MACB_HALT_TIMEOUT	1230
 9423 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9424 				#endif
 9425 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9426 				#endif
 9427 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9428 				#endif
 9429 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9430 				#endif
 9431 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9432 				#endif
 9433 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9434 				#endif
 9435 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9436 				#endif
 9437 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9438 				#endif
 9439 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9440 				#endif
 9441 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9442 				#endif
 9443 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9444 				#endif
 9445 				/* LDV_COMMENT_END_PREP */
 9446 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "zynq_config" */
 9447 				ldv_handler_precall();
 9448 				macb_init( var_group7);
 9449 				/* LDV_COMMENT_BEGIN_PREP */
 9450 				#if defined(CONFIG_OF)
 9451 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9452 				#define AT91ETHER_MAX_RX_DESCR	9
 9453 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9454 				#endif
 9455 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9456 				#endif
 9457 				#endif 
 9458 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9459 				#endif
 9460 				/* LDV_COMMENT_END_PREP */
 9461 				
 9462 
 9463 				
 9464 
 9465 			}
 9466 
 9467 			break;
 9468 			case 43: {
 9469 
 9470 				/** STRUCT: struct type: platform_driver, struct name: macb_driver **/
 9471 				if(ldv_s_macb_driver_platform_driver==0) {
 9472 
 9473 				/* content: static int macb_probe(struct platform_device *pdev)*/
 9474 				/* LDV_COMMENT_BEGIN_PREP */
 9475 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9476 				#define MACB_RX_BUFFER_SIZE	128
 9477 				#define RX_BUFFER_MULTIPLE	64  
 9478 				#define RX_RING_SIZE		512 
 9479 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9480 				#define TX_RING_SIZE		128 
 9481 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9482 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9483 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9484 				 | MACB_BIT(ISR_ROVR))
 9485 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9486 					| MACB_BIT(ISR_RLE)		\
 9487 					| MACB_BIT(TXERR))
 9488 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9489 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9490 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9491 				#define GEM_MTU_MIN_SIZE	68
 9492 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9493 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9494 				#define MACB_HALT_TIMEOUT	1230
 9495 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9496 				#endif
 9497 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9498 				#endif
 9499 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9500 				#endif
 9501 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9502 				#endif
 9503 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9504 				#endif
 9505 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9506 				#endif
 9507 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9508 				#endif
 9509 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9510 				#endif
 9511 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9512 				#endif
 9513 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9514 				#endif
 9515 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9516 				#endif
 9517 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9518 				#endif
 9519 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9520 				#endif
 9521 				#if defined(CONFIG_OF)
 9522 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9523 				#define AT91ETHER_MAX_RX_DESCR	9
 9524 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9525 				#endif
 9526 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9527 				#endif
 9528 				#endif 
 9529 				/* LDV_COMMENT_END_PREP */
 9530 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "macb_driver". Standart function test for correct return result. */
 9531 				res_macb_probe_86 = macb_probe( var_group7);
 9532 				 ldv_check_return_value(res_macb_probe_86);
 9533 				 ldv_check_return_value_probe(res_macb_probe_86);
 9534 				 if(res_macb_probe_86) 
 9535 					goto ldv_module_exit;
 9536 				ldv_s_macb_driver_platform_driver++;
 9537 
 9538 				}
 9539 
 9540 			}
 9541 
 9542 			break;
 9543 			case 44: {
 9544 
 9545 				/** STRUCT: struct type: platform_driver, struct name: macb_driver **/
 9546 				if(ldv_s_macb_driver_platform_driver==1) {
 9547 
 9548 				/* content: static int macb_remove(struct platform_device *pdev)*/
 9549 				/* LDV_COMMENT_BEGIN_PREP */
 9550 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9551 				#define MACB_RX_BUFFER_SIZE	128
 9552 				#define RX_BUFFER_MULTIPLE	64  
 9553 				#define RX_RING_SIZE		512 
 9554 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9555 				#define TX_RING_SIZE		128 
 9556 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9557 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9558 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9559 				 | MACB_BIT(ISR_ROVR))
 9560 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9561 					| MACB_BIT(ISR_RLE)		\
 9562 					| MACB_BIT(TXERR))
 9563 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9564 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9565 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9566 				#define GEM_MTU_MIN_SIZE	68
 9567 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9568 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9569 				#define MACB_HALT_TIMEOUT	1230
 9570 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9571 				#endif
 9572 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9573 				#endif
 9574 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9575 				#endif
 9576 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9577 				#endif
 9578 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9579 				#endif
 9580 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9581 				#endif
 9582 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9583 				#endif
 9584 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9585 				#endif
 9586 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9587 				#endif
 9588 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9589 				#endif
 9590 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9591 				#endif
 9592 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9593 				#endif
 9594 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9595 				#endif
 9596 				#if defined(CONFIG_OF)
 9597 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9598 				#define AT91ETHER_MAX_RX_DESCR	9
 9599 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9600 				#endif
 9601 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9602 				#endif
 9603 				#endif 
 9604 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9605 				#endif
 9606 				/* LDV_COMMENT_END_PREP */
 9607 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "macb_driver" */
 9608 				ldv_handler_precall();
 9609 				macb_remove( var_group7);
 9610 				ldv_s_macb_driver_platform_driver=0;
 9611 
 9612 				}
 9613 
 9614 			}
 9615 
 9616 			break;
 9617 			case 45: {
 9618 
 9619 				/** CALLBACK SECTION request_irq **/
 9620 				LDV_IN_INTERRUPT=2;
 9621 
 9622 				/* content: static irqreturn_t at91ether_interrupt(int irq, void *dev_id)*/
 9623 				/* LDV_COMMENT_BEGIN_PREP */
 9624 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9625 				#define MACB_RX_BUFFER_SIZE	128
 9626 				#define RX_BUFFER_MULTIPLE	64  
 9627 				#define RX_RING_SIZE		512 
 9628 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9629 				#define TX_RING_SIZE		128 
 9630 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9631 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9632 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9633 				 | MACB_BIT(ISR_ROVR))
 9634 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9635 					| MACB_BIT(ISR_RLE)		\
 9636 					| MACB_BIT(TXERR))
 9637 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9638 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9639 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9640 				#define GEM_MTU_MIN_SIZE	68
 9641 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9642 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9643 				#define MACB_HALT_TIMEOUT	1230
 9644 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9645 				#endif
 9646 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9647 				#endif
 9648 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9649 				#endif
 9650 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9651 				#endif
 9652 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9653 				#endif
 9654 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9655 				#endif
 9656 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9657 				#endif
 9658 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9659 				#endif
 9660 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9661 				#endif
 9662 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9663 				#endif
 9664 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9665 				#endif
 9666 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9667 				#endif
 9668 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9669 				#endif
 9670 				#if defined(CONFIG_OF)
 9671 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9672 				#define AT91ETHER_MAX_RX_DESCR	9
 9673 				/* LDV_COMMENT_END_PREP */
 9674 				/* LDV_COMMENT_FUNCTION_CALL */
 9675 				ldv_handler_precall();
 9676 				at91ether_interrupt( var_at91ether_interrupt_82_p0, var_at91ether_interrupt_82_p1);
 9677 				/* LDV_COMMENT_BEGIN_PREP */
 9678 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9679 				#endif
 9680 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9681 				#endif
 9682 				#endif 
 9683 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9684 				#endif
 9685 				/* LDV_COMMENT_END_PREP */
 9686 				LDV_IN_INTERRUPT=1;
 9687 
 9688 				
 9689 
 9690 			}
 9691 
 9692 			break;
 9693 			case 46: {
 9694 
 9695 				/** CALLBACK SECTION request_irq **/
 9696 				LDV_IN_INTERRUPT=2;
 9697 
 9698 				/* content: static irqreturn_t macb_interrupt(int irq, void *dev_id)*/
 9699 				/* LDV_COMMENT_BEGIN_PREP */
 9700 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9701 				#define MACB_RX_BUFFER_SIZE	128
 9702 				#define RX_BUFFER_MULTIPLE	64  
 9703 				#define RX_RING_SIZE		512 
 9704 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9705 				#define TX_RING_SIZE		128 
 9706 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9707 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9708 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9709 				 | MACB_BIT(ISR_ROVR))
 9710 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9711 					| MACB_BIT(ISR_RLE)		\
 9712 					| MACB_BIT(TXERR))
 9713 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9714 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9715 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9716 				#define GEM_MTU_MIN_SIZE	68
 9717 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9718 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9719 				#define MACB_HALT_TIMEOUT	1230
 9720 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9721 				#endif
 9722 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9723 				#endif
 9724 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9725 				#endif
 9726 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9727 				#endif
 9728 				/* LDV_COMMENT_END_PREP */
 9729 				/* LDV_COMMENT_FUNCTION_CALL */
 9730 				ldv_handler_precall();
 9731 				macb_interrupt( var_macb_interrupt_34_p0, var_macb_interrupt_34_p1);
 9732 				/* LDV_COMMENT_BEGIN_PREP */
 9733 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9734 				#endif
 9735 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9736 				#endif
 9737 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9738 				#endif
 9739 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9740 				#endif
 9741 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9742 				#endif
 9743 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9744 				#endif
 9745 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9746 				#endif
 9747 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9748 				#endif
 9749 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9750 				#endif
 9751 				#if defined(CONFIG_OF)
 9752 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9753 				#define AT91ETHER_MAX_RX_DESCR	9
 9754 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9755 				#endif
 9756 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9757 				#endif
 9758 				#endif 
 9759 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9760 				#endif
 9761 				/* LDV_COMMENT_END_PREP */
 9762 				LDV_IN_INTERRUPT=1;
 9763 
 9764 				
 9765 
 9766 			}
 9767 
 9768 			break;
 9769 			default: break;
 9770 
 9771 		}
 9772 
 9773 	}
 9774 
 9775 	ldv_module_exit: 
 9776 
 9777 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 9778 	ldv_final: ldv_check_final_state();
 9779 
 9780 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 9781 	return;
 9782 
 9783 }
 9784 #endif
 9785 
 9786 /* LDV_COMMENT_END_MAIN */                 1 
    2 #include <linux/kernel.h>
    3 bool ldv_is_err(const void *ptr);
    4 bool ldv_is_err_or_null(const void *ptr);
    5 void* ldv_err_ptr(long error);
    6 long ldv_ptr_err(const void *ptr);
    7 
    8 extern void ldv_dma_map_page(void);
    9 extern void ldv_dma_mapping_error(void);
   10 #line 1 "/home/vitaly/ldv-launches/work/current--X--drivers--X--defaultlinux-4.9-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.9-rc1.tar.xz/csd_deg_dscv/5529/dscv_tempdir/dscv/ri/331_1a/drivers/net/ethernet/cadence/macb.c"
   11 
   12 /*
   13  * Cadence MACB/GEM Ethernet Controller driver
   14  *
   15  * Copyright (C) 2004-2006 Atmel Corporation
   16  *
   17  * This program is free software; you can redistribute it and/or modify
   18  * it under the terms of the GNU General Public License version 2 as
   19  * published by the Free Software Foundation.
   20  */
   21 
   22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   23 #include <linux/clk.h>
   24 #include <linux/module.h>
   25 #include <linux/moduleparam.h>
   26 #include <linux/kernel.h>
   27 #include <linux/types.h>
   28 #include <linux/circ_buf.h>
   29 #include <linux/slab.h>
   30 #include <linux/init.h>
   31 #include <linux/io.h>
   32 #include <linux/gpio.h>
   33 #include <linux/gpio/consumer.h>
   34 #include <linux/interrupt.h>
   35 #include <linux/netdevice.h>
   36 #include <linux/etherdevice.h>
   37 #include <linux/dma-mapping.h>
   38 #include <linux/platform_data/macb.h>
   39 #include <linux/platform_device.h>
   40 #include <linux/phy.h>
   41 #include <linux/of.h>
   42 #include <linux/of_device.h>
   43 #include <linux/of_gpio.h>
   44 #include <linux/of_mdio.h>
   45 #include <linux/of_net.h>
   46 
   47 #include "macb.h"
   48 
   49 #define MACB_RX_BUFFER_SIZE	128
   50 #define RX_BUFFER_MULTIPLE	64  /* bytes */
   51 #define RX_RING_SIZE		512 /* must be power of 2 */
   52 #define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
   53 
   54 #define TX_RING_SIZE		128 /* must be power of 2 */
   55 #define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
   56 
   57 /* level of occupied TX descriptors under which we wake up TX process */
   58 #define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
   59 
   60 #define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
   61 				 | MACB_BIT(ISR_ROVR))
   62 #define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
   63 					| MACB_BIT(ISR_RLE)		\
   64 					| MACB_BIT(TXERR))
   65 #define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
   66 
   67 #define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
   68 #define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
   69 
   70 #define GEM_MTU_MIN_SIZE	68
   71 
   72 #define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
   73 #define MACB_WOL_ENABLED		(0x1 << 1)
   74 
   75 /* Graceful stop timeouts in us. We should allow up to
   76  * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
   77  */
   78 #define MACB_HALT_TIMEOUT	1230
   79 
   80 /* Ring buffer accessors */
   81 static unsigned int macb_tx_ring_wrap(unsigned int index)
   82 {
   83 	return index & (TX_RING_SIZE - 1);
   84 }
   85 
   86 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
   87 					  unsigned int index)
   88 {
   89 	return &queue->tx_ring[macb_tx_ring_wrap(index)];
   90 }
   91 
   92 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
   93 				       unsigned int index)
   94 {
   95 	return &queue->tx_skb[macb_tx_ring_wrap(index)];
   96 }
   97 
   98 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
   99 {
  100 	dma_addr_t offset;
  101 
  102 	offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
  103 
  104 	return queue->tx_ring_dma + offset;
  105 }
  106 
  107 static unsigned int macb_rx_ring_wrap(unsigned int index)
  108 {
  109 	return index & (RX_RING_SIZE - 1);
  110 }
  111 
  112 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
  113 {
  114 	return &bp->rx_ring[macb_rx_ring_wrap(index)];
  115 }
  116 
  117 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
  118 {
  119 	return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
  120 }
  121 
  122 /* I/O accessors */
  123 static u32 hw_readl_native(struct macb *bp, int offset)
  124 {
  125 	return __raw_readl(bp->regs + offset);
  126 }
  127 
  128 static void hw_writel_native(struct macb *bp, int offset, u32 value)
  129 {
  130 	__raw_writel(value, bp->regs + offset);
  131 }
  132 
  133 static u32 hw_readl(struct macb *bp, int offset)
  134 {
  135 	return readl_relaxed(bp->regs + offset);
  136 }
  137 
  138 static void hw_writel(struct macb *bp, int offset, u32 value)
  139 {
  140 	writel_relaxed(value, bp->regs + offset);
  141 }
  142 
  143 /* Find the CPU endianness by using the loopback bit of NCR register. When the
  144  * CPU is in big endian we need to program swapped mode for management
  145  * descriptor access.
  146  */
  147 static bool hw_is_native_io(void __iomem *addr)
  148 {
  149 	u32 value = MACB_BIT(LLB);
  150 
  151 	__raw_writel(value, addr + MACB_NCR);
  152 	value = __raw_readl(addr + MACB_NCR);
  153 
  154 	/* Write 0 back to disable everything */
  155 	__raw_writel(0, addr + MACB_NCR);
  156 
  157 	return value == MACB_BIT(LLB);
  158 }
  159 
  160 static bool hw_is_gem(void __iomem *addr, bool native_io)
  161 {
  162 	u32 id;
  163 
  164 	if (native_io)
  165 		id = __raw_readl(addr + MACB_MID);
  166 	else
  167 		id = readl_relaxed(addr + MACB_MID);
  168 
  169 	return MACB_BFEXT(IDNUM, id) >= 0x2;
  170 }
  171 
  172 static void macb_set_hwaddr(struct macb *bp)
  173 {
  174 	u32 bottom;
  175 	u16 top;
  176 
  177 	bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
  178 	macb_or_gem_writel(bp, SA1B, bottom);
  179 	top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
  180 	macb_or_gem_writel(bp, SA1T, top);
  181 
  182 	/* Clear unused address register sets */
  183 	macb_or_gem_writel(bp, SA2B, 0);
  184 	macb_or_gem_writel(bp, SA2T, 0);
  185 	macb_or_gem_writel(bp, SA3B, 0);
  186 	macb_or_gem_writel(bp, SA3T, 0);
  187 	macb_or_gem_writel(bp, SA4B, 0);
  188 	macb_or_gem_writel(bp, SA4T, 0);
  189 }
  190 
  191 static void macb_get_hwaddr(struct macb *bp)
  192 {
  193 	struct macb_platform_data *pdata;
  194 	u32 bottom;
  195 	u16 top;
  196 	u8 addr[6];
  197 	int i;
  198 
  199 	pdata = dev_get_platdata(&bp->pdev->dev);
  200 
  201 	/* Check all 4 address register for valid address */
  202 	for (i = 0; i < 4; i++) {
  203 		bottom = macb_or_gem_readl(bp, SA1B + i * 8);
  204 		top = macb_or_gem_readl(bp, SA1T + i * 8);
  205 
  206 		if (pdata && pdata->rev_eth_addr) {
  207 			addr[5] = bottom & 0xff;
  208 			addr[4] = (bottom >> 8) & 0xff;
  209 			addr[3] = (bottom >> 16) & 0xff;
  210 			addr[2] = (bottom >> 24) & 0xff;
  211 			addr[1] = top & 0xff;
  212 			addr[0] = (top & 0xff00) >> 8;
  213 		} else {
  214 			addr[0] = bottom & 0xff;
  215 			addr[1] = (bottom >> 8) & 0xff;
  216 			addr[2] = (bottom >> 16) & 0xff;
  217 			addr[3] = (bottom >> 24) & 0xff;
  218 			addr[4] = top & 0xff;
  219 			addr[5] = (top >> 8) & 0xff;
  220 		}
  221 
  222 		if (is_valid_ether_addr(addr)) {
  223 			memcpy(bp->dev->dev_addr, addr, sizeof(addr));
  224 			return;
  225 		}
  226 	}
  227 
  228 	dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
  229 	eth_hw_addr_random(bp->dev);
  230 }
  231 
  232 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  233 {
  234 	struct macb *bp = bus->priv;
  235 	int value;
  236 
  237 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
  238 			      | MACB_BF(RW, MACB_MAN_READ)
  239 			      | MACB_BF(PHYA, mii_id)
  240 			      | MACB_BF(REGA, regnum)
  241 			      | MACB_BF(CODE, MACB_MAN_CODE)));
  242 
  243 	/* wait for end of transfer */
  244 	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
  245 		cpu_relax();
  246 
  247 	value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
  248 
  249 	return value;
  250 }
  251 
  252 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
  253 			   u16 value)
  254 {
  255 	struct macb *bp = bus->priv;
  256 
  257 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
  258 			      | MACB_BF(RW, MACB_MAN_WRITE)
  259 			      | MACB_BF(PHYA, mii_id)
  260 			      | MACB_BF(REGA, regnum)
  261 			      | MACB_BF(CODE, MACB_MAN_CODE)
  262 			      | MACB_BF(DATA, value)));
  263 
  264 	/* wait for end of transfer */
  265 	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
  266 		cpu_relax();
  267 
  268 	return 0;
  269 }
  270 
  271 /**
  272  * macb_set_tx_clk() - Set a clock to a new frequency
  273  * @clk		Pointer to the clock to change
  274  * @rate	New frequency in Hz
  275  * @dev		Pointer to the struct net_device
  276  */
  277 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
  278 {
  279 	long ferr, rate, rate_rounded;
  280 
  281 	if (!clk)
  282 		return;
  283 
  284 	switch (speed) {
  285 	case SPEED_10:
  286 		rate = 2500000;
  287 		break;
  288 	case SPEED_100:
  289 		rate = 25000000;
  290 		break;
  291 	case SPEED_1000:
  292 		rate = 125000000;
  293 		break;
  294 	default:
  295 		return;
  296 	}
  297 
  298 	rate_rounded = clk_round_rate(clk, rate);
  299 	if (rate_rounded < 0)
  300 		return;
  301 
  302 	/* RGMII allows 50 ppm frequency error. Test and warn if this limit
  303 	 * is not satisfied.
  304 	 */
  305 	ferr = abs(rate_rounded - rate);
  306 	ferr = DIV_ROUND_UP(ferr, rate / 100000);
  307 	if (ferr > 5)
  308 		netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
  309 			    rate);
  310 
  311 	if (clk_set_rate(clk, rate_rounded))
  312 		netdev_err(dev, "adjusting tx_clk failed.\n");
  313 }
  314 
  315 static void macb_handle_link_change(struct net_device *dev)
  316 {
  317 	struct macb *bp = netdev_priv(dev);
  318 	struct phy_device *phydev = dev->phydev;
  319 	unsigned long flags;
  320 	int status_change = 0;
  321 
  322 	spin_lock_irqsave(&bp->lock, flags);
  323 
  324 	if (phydev->link) {
  325 		if ((bp->speed != phydev->speed) ||
  326 		    (bp->duplex != phydev->duplex)) {
  327 			u32 reg;
  328 
  329 			reg = macb_readl(bp, NCFGR);
  330 			reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
  331 			if (macb_is_gem(bp))
  332 				reg &= ~GEM_BIT(GBE);
  333 
  334 			if (phydev->duplex)
  335 				reg |= MACB_BIT(FD);
  336 			if (phydev->speed == SPEED_100)
  337 				reg |= MACB_BIT(SPD);
  338 			if (phydev->speed == SPEED_1000 &&
  339 			    bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
  340 				reg |= GEM_BIT(GBE);
  341 
  342 			macb_or_gem_writel(bp, NCFGR, reg);
  343 
  344 			bp->speed = phydev->speed;
  345 			bp->duplex = phydev->duplex;
  346 			status_change = 1;
  347 		}
  348 	}
  349 
  350 	if (phydev->link != bp->link) {
  351 		if (!phydev->link) {
  352 			bp->speed = 0;
  353 			bp->duplex = -1;
  354 		}
  355 		bp->link = phydev->link;
  356 
  357 		status_change = 1;
  358 	}
  359 
  360 	spin_unlock_irqrestore(&bp->lock, flags);
  361 
  362 	if (status_change) {
  363 		if (phydev->link) {
  364 			/* Update the TX clock rate if and only if the link is
  365 			 * up and there has been a link change.
  366 			 */
  367 			macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
  368 
  369 			netif_carrier_on(dev);
  370 			netdev_info(dev, "link up (%d/%s)\n",
  371 				    phydev->speed,
  372 				    phydev->duplex == DUPLEX_FULL ?
  373 				    "Full" : "Half");
  374 		} else {
  375 			netif_carrier_off(dev);
  376 			netdev_info(dev, "link down\n");
  377 		}
  378 	}
  379 }
  380 
  381 /* based on au1000_eth. c*/
  382 static int macb_mii_probe(struct net_device *dev)
  383 {
  384 	struct macb *bp = netdev_priv(dev);
  385 	struct macb_platform_data *pdata;
  386 	struct phy_device *phydev;
  387 	int phy_irq;
  388 	int ret;
  389 
  390 	phydev = phy_find_first(bp->mii_bus);
  391 	if (!phydev) {
  392 		netdev_err(dev, "no PHY found\n");
  393 		return -ENXIO;
  394 	}
  395 
  396 	pdata = dev_get_platdata(&bp->pdev->dev);
  397 	if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
  398 		ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin,
  399 					"phy int");
  400 		if (!ret) {
  401 			phy_irq = gpio_to_irq(pdata->phy_irq_pin);
  402 			phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
  403 		}
  404 	}
  405 
  406 	/* attach the mac to the phy */
  407 	ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
  408 				 bp->phy_interface);
  409 	if (ret) {
  410 		netdev_err(dev, "Could not attach to PHY\n");
  411 		return ret;
  412 	}
  413 
  414 	/* mask with MAC supported features */
  415 	if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
  416 		phydev->supported &= PHY_GBIT_FEATURES;
  417 	else
  418 		phydev->supported &= PHY_BASIC_FEATURES;
  419 
  420 	if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
  421 		phydev->supported &= ~SUPPORTED_1000baseT_Half;
  422 
  423 	phydev->advertising = phydev->supported;
  424 
  425 	bp->link = 0;
  426 	bp->speed = 0;
  427 	bp->duplex = -1;
  428 
  429 	return 0;
  430 }
  431 
  432 static int macb_mii_init(struct macb *bp)
  433 {
  434 	struct macb_platform_data *pdata;
  435 	struct device_node *np;
  436 	int err = -ENXIO, i;
  437 
  438 	/* Enable management port */
  439 	macb_writel(bp, NCR, MACB_BIT(MPE));
  440 
  441 	bp->mii_bus = mdiobus_alloc();
  442 	if (!bp->mii_bus) {
  443 		err = -ENOMEM;
  444 		goto err_out;
  445 	}
  446 
  447 	bp->mii_bus->name = "MACB_mii_bus";
  448 	bp->mii_bus->read = &macb_mdio_read;
  449 	bp->mii_bus->write = &macb_mdio_write;
  450 	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
  451 		 bp->pdev->name, bp->pdev->id);
  452 	bp->mii_bus->priv = bp;
  453 	bp->mii_bus->parent = &bp->pdev->dev;
  454 	pdata = dev_get_platdata(&bp->pdev->dev);
  455 
  456 	dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
  457 
  458 	np = bp->pdev->dev.of_node;
  459 	if (np) {
  460 		/* try dt phy registration */
  461 		err = of_mdiobus_register(bp->mii_bus, np);
  462 
  463 		/* fallback to standard phy registration if no phy were
  464 		 * found during dt phy registration
  465 		 */
  466 		if (!err && !phy_find_first(bp->mii_bus)) {
  467 			for (i = 0; i < PHY_MAX_ADDR; i++) {
  468 				struct phy_device *phydev;
  469 
  470 				phydev = mdiobus_scan(bp->mii_bus, i);
  471 				if (IS_ERR(phydev) &&
  472 				    PTR_ERR(phydev) != -ENODEV) {
  473 					err = PTR_ERR(phydev);
  474 					break;
  475 				}
  476 			}
  477 
  478 			if (err)
  479 				goto err_out_unregister_bus;
  480 		}
  481 	} else {
  482 		if (pdata)
  483 			bp->mii_bus->phy_mask = pdata->phy_mask;
  484 
  485 		err = mdiobus_register(bp->mii_bus);
  486 	}
  487 
  488 	if (err)
  489 		goto err_out_free_mdiobus;
  490 
  491 	err = macb_mii_probe(bp->dev);
  492 	if (err)
  493 		goto err_out_unregister_bus;
  494 
  495 	return 0;
  496 
  497 err_out_unregister_bus:
  498 	mdiobus_unregister(bp->mii_bus);
  499 err_out_free_mdiobus:
  500 	mdiobus_free(bp->mii_bus);
  501 err_out:
  502 	return err;
  503 }
  504 
  505 static void macb_update_stats(struct macb *bp)
  506 {
  507 	u32 *p = &bp->hw_stats.macb.rx_pause_frames;
  508 	u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
  509 	int offset = MACB_PFR;
  510 
  511 	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
  512 
  513 	for (; p < end; p++, offset += 4)
  514 		*p += bp->macb_reg_readl(bp, offset);
  515 }
  516 
  517 static int macb_halt_tx(struct macb *bp)
  518 {
  519 	unsigned long	halt_time, timeout;
  520 	u32		status;
  521 
  522 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
  523 
  524 	timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
  525 	do {
  526 		halt_time = jiffies;
  527 		status = macb_readl(bp, TSR);
  528 		if (!(status & MACB_BIT(TGO)))
  529 			return 0;
  530 
  531 		usleep_range(10, 250);
  532 	} while (time_before(halt_time, timeout));
  533 
  534 	return -ETIMEDOUT;
  535 }
  536 
  537 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
  538 {
  539 	if (tx_skb->mapping) {
  540 		if (tx_skb->mapped_as_page)
  541 			dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
  542 				       tx_skb->size, DMA_TO_DEVICE);
  543 		else
  544 			dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
  545 					 tx_skb->size, DMA_TO_DEVICE);
  546 		tx_skb->mapping = 0;
  547 	}
  548 
  549 	if (tx_skb->skb) {
  550 		dev_kfree_skb_any(tx_skb->skb);
  551 		tx_skb->skb = NULL;
  552 	}
  553 }
  554 
  555 static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr)
  556 {
  557 	desc->addr = (u32)addr;
  558 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  559 	desc->addrh = (u32)(addr >> 32);
  560 #endif
  561 }
  562 
  563 static void macb_tx_error_task(struct work_struct *work)
  564 {
  565 	struct macb_queue	*queue = container_of(work, struct macb_queue,
  566 						      tx_error_task);
  567 	struct macb		*bp = queue->bp;
  568 	struct macb_tx_skb	*tx_skb;
  569 	struct macb_dma_desc	*desc;
  570 	struct sk_buff		*skb;
  571 	unsigned int		tail;
  572 	unsigned long		flags;
  573 
  574 	netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
  575 		    (unsigned int)(queue - bp->queues),
  576 		    queue->tx_tail, queue->tx_head);
  577 
  578 	/* Prevent the queue IRQ handlers from running: each of them may call
  579 	 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
  580 	 * As explained below, we have to halt the transmission before updating
  581 	 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
  582 	 * network engine about the macb/gem being halted.
  583 	 */
  584 	spin_lock_irqsave(&bp->lock, flags);
  585 
  586 	/* Make sure nobody is trying to queue up new packets */
  587 	netif_tx_stop_all_queues(bp->dev);
  588 
  589 	/* Stop transmission now
  590 	 * (in case we have just queued new packets)
  591 	 * macb/gem must be halted to write TBQP register
  592 	 */
  593 	if (macb_halt_tx(bp))
  594 		/* Just complain for now, reinitializing TX path can be good */
  595 		netdev_err(bp->dev, "BUG: halt tx timed out\n");
  596 
  597 	/* Treat frames in TX queue including the ones that caused the error.
  598 	 * Free transmit buffers in upper layer.
  599 	 */
  600 	for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
  601 		u32	ctrl;
  602 
  603 		desc = macb_tx_desc(queue, tail);
  604 		ctrl = desc->ctrl;
  605 		tx_skb = macb_tx_skb(queue, tail);
  606 		skb = tx_skb->skb;
  607 
  608 		if (ctrl & MACB_BIT(TX_USED)) {
  609 			/* skb is set for the last buffer of the frame */
  610 			while (!skb) {
  611 				macb_tx_unmap(bp, tx_skb);
  612 				tail++;
  613 				tx_skb = macb_tx_skb(queue, tail);
  614 				skb = tx_skb->skb;
  615 			}
  616 
  617 			/* ctrl still refers to the first buffer descriptor
  618 			 * since it's the only one written back by the hardware
  619 			 */
  620 			if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
  621 				netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
  622 					    macb_tx_ring_wrap(tail), skb->data);
  623 				bp->stats.tx_packets++;
  624 				bp->stats.tx_bytes += skb->len;
  625 			}
  626 		} else {
  627 			/* "Buffers exhausted mid-frame" errors may only happen
  628 			 * if the driver is buggy, so complain loudly about
  629 			 * those. Statistics are updated by hardware.
  630 			 */
  631 			if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
  632 				netdev_err(bp->dev,
  633 					   "BUG: TX buffers exhausted mid-frame\n");
  634 
  635 			desc->ctrl = ctrl | MACB_BIT(TX_USED);
  636 		}
  637 
  638 		macb_tx_unmap(bp, tx_skb);
  639 	}
  640 
  641 	/* Set end of TX queue */
  642 	desc = macb_tx_desc(queue, 0);
  643 	macb_set_addr(desc, 0);
  644 	desc->ctrl = MACB_BIT(TX_USED);
  645 
  646 	/* Make descriptor updates visible to hardware */
  647 	wmb();
  648 
  649 	/* Reinitialize the TX desc queue */
  650 	queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
  651 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  652 	queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
  653 #endif
  654 	/* Make TX ring reflect state of hardware */
  655 	queue->tx_head = 0;
  656 	queue->tx_tail = 0;
  657 
  658 	/* Housework before enabling TX IRQ */
  659 	macb_writel(bp, TSR, macb_readl(bp, TSR));
  660 	queue_writel(queue, IER, MACB_TX_INT_FLAGS);
  661 
  662 	/* Now we are ready to start transmission again */
  663 	netif_tx_start_all_queues(bp->dev);
  664 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
  665 
  666 	spin_unlock_irqrestore(&bp->lock, flags);
  667 }
  668 
  669 static void macb_tx_interrupt(struct macb_queue *queue)
  670 {
  671 	unsigned int tail;
  672 	unsigned int head;
  673 	u32 status;
  674 	struct macb *bp = queue->bp;
  675 	u16 queue_index = queue - bp->queues;
  676 
  677 	status = macb_readl(bp, TSR);
  678 	macb_writel(bp, TSR, status);
  679 
  680 	if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
  681 		queue_writel(queue, ISR, MACB_BIT(TCOMP));
  682 
  683 	netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
  684 		    (unsigned long)status);
  685 
  686 	head = queue->tx_head;
  687 	for (tail = queue->tx_tail; tail != head; tail++) {
  688 		struct macb_tx_skb	*tx_skb;
  689 		struct sk_buff		*skb;
  690 		struct macb_dma_desc	*desc;
  691 		u32			ctrl;
  692 
  693 		desc = macb_tx_desc(queue, tail);
  694 
  695 		/* Make hw descriptor updates visible to CPU */
  696 		rmb();
  697 
  698 		ctrl = desc->ctrl;
  699 
  700 		/* TX_USED bit is only set by hardware on the very first buffer
  701 		 * descriptor of the transmitted frame.
  702 		 */
  703 		if (!(ctrl & MACB_BIT(TX_USED)))
  704 			break;
  705 
  706 		/* Process all buffers of the current transmitted frame */
  707 		for (;; tail++) {
  708 			tx_skb = macb_tx_skb(queue, tail);
  709 			skb = tx_skb->skb;
  710 
  711 			/* First, update TX stats if needed */
  712 			if (skb) {
  713 				netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
  714 					    macb_tx_ring_wrap(tail), skb->data);
  715 				bp->stats.tx_packets++;
  716 				bp->stats.tx_bytes += skb->len;
  717 			}
  718 
  719 			/* Now we can safely release resources */
  720 			macb_tx_unmap(bp, tx_skb);
  721 
  722 			/* skb is set only for the last buffer of the frame.
  723 			 * WARNING: at this point skb has been freed by
  724 			 * macb_tx_unmap().
  725 			 */
  726 			if (skb)
  727 				break;
  728 		}
  729 	}
  730 
  731 	queue->tx_tail = tail;
  732 	if (__netif_subqueue_stopped(bp->dev, queue_index) &&
  733 	    CIRC_CNT(queue->tx_head, queue->tx_tail,
  734 		     TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
  735 		netif_wake_subqueue(bp->dev, queue_index);
  736 }
  737 
  738 static void gem_rx_refill(struct macb *bp)
  739 {
  740 	unsigned int		entry;
  741 	struct sk_buff		*skb;
  742 	dma_addr_t		paddr;
  743 
  744 	while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
  745 			  RX_RING_SIZE) > 0) {
  746 		entry = macb_rx_ring_wrap(bp->rx_prepared_head);
  747 
  748 		/* Make hw descriptor updates visible to CPU */
  749 		rmb();
  750 
  751 		bp->rx_prepared_head++;
  752 
  753 		if (!bp->rx_skbuff[entry]) {
  754 			/* allocate sk_buff for this free entry in ring */
  755 			skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
  756 			if (unlikely(!skb)) {
  757 				netdev_err(bp->dev,
  758 					   "Unable to allocate sk_buff\n");
  759 				break;
  760 			}
  761 
  762 			/* now fill corresponding descriptor entry */
  763 			paddr = dma_map_single(&bp->pdev->dev, skb->data,
  764 					       bp->rx_buffer_size,
  765 					       DMA_FROM_DEVICE);
  766 			if (dma_mapping_error(&bp->pdev->dev, paddr)) {
  767 				dev_kfree_skb(skb);
  768 				break;
  769 			}
  770 
  771 			bp->rx_skbuff[entry] = skb;
  772 
  773 			if (entry == RX_RING_SIZE - 1)
  774 				paddr |= MACB_BIT(RX_WRAP);
  775 			macb_set_addr(&(bp->rx_ring[entry]), paddr);
  776 			bp->rx_ring[entry].ctrl = 0;
  777 
  778 			/* properly align Ethernet header */
  779 			skb_reserve(skb, NET_IP_ALIGN);
  780 		} else {
  781 			bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
  782 			bp->rx_ring[entry].ctrl = 0;
  783 		}
  784 	}
  785 
  786 	/* Make descriptor updates visible to hardware */
  787 	wmb();
  788 
  789 	netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
  790 		    bp->rx_prepared_head, bp->rx_tail);
  791 }
  792 
  793 /* Mark DMA descriptors from begin up to and not including end as unused */
  794 static void discard_partial_frame(struct macb *bp, unsigned int begin,
  795 				  unsigned int end)
  796 {
  797 	unsigned int frag;
  798 
  799 	for (frag = begin; frag != end; frag++) {
  800 		struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
  801 
  802 		desc->addr &= ~MACB_BIT(RX_USED);
  803 	}
  804 
  805 	/* Make descriptor updates visible to hardware */
  806 	wmb();
  807 
  808 	/* When this happens, the hardware stats registers for
  809 	 * whatever caused this is updated, so we don't have to record
  810 	 * anything.
  811 	 */
  812 }
  813 
  814 static int gem_rx(struct macb *bp, int budget)
  815 {
  816 	unsigned int		len;
  817 	unsigned int		entry;
  818 	struct sk_buff		*skb;
  819 	struct macb_dma_desc	*desc;
  820 	int			count = 0;
  821 
  822 	while (count < budget) {
  823 		u32 ctrl;
  824 		dma_addr_t addr;
  825 		bool rxused;
  826 
  827 		entry = macb_rx_ring_wrap(bp->rx_tail);
  828 		desc = &bp->rx_ring[entry];
  829 
  830 		/* Make hw descriptor updates visible to CPU */
  831 		rmb();
  832 
  833 		rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
  834 		addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
  835 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  836 		addr |= ((u64)(desc->addrh) << 32);
  837 #endif
  838 		ctrl = desc->ctrl;
  839 
  840 		if (!rxused)
  841 			break;
  842 
  843 		bp->rx_tail++;
  844 		count++;
  845 
  846 		if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
  847 			netdev_err(bp->dev,
  848 				   "not whole frame pointed by descriptor\n");
  849 			bp->stats.rx_dropped++;
  850 			break;
  851 		}
  852 		skb = bp->rx_skbuff[entry];
  853 		if (unlikely(!skb)) {
  854 			netdev_err(bp->dev,
  855 				   "inconsistent Rx descriptor chain\n");
  856 			bp->stats.rx_dropped++;
  857 			break;
  858 		}
  859 		/* now everything is ready for receiving packet */
  860 		bp->rx_skbuff[entry] = NULL;
  861 		len = ctrl & bp->rx_frm_len_mask;
  862 
  863 		netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
  864 
  865 		skb_put(skb, len);
  866 		dma_unmap_single(&bp->pdev->dev, addr,
  867 				 bp->rx_buffer_size, DMA_FROM_DEVICE);
  868 
  869 		skb->protocol = eth_type_trans(skb, bp->dev);
  870 		skb_checksum_none_assert(skb);
  871 		if (bp->dev->features & NETIF_F_RXCSUM &&
  872 		    !(bp->dev->flags & IFF_PROMISC) &&
  873 		    GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
  874 			skb->ip_summed = CHECKSUM_UNNECESSARY;
  875 
  876 		bp->stats.rx_packets++;
  877 		bp->stats.rx_bytes += skb->len;
  878 
  879 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
  880 		netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
  881 			    skb->len, skb->csum);
  882 		print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
  883 			       skb_mac_header(skb), 16, true);
  884 		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
  885 			       skb->data, 32, true);
  886 #endif
  887 
  888 		netif_receive_skb(skb);
  889 	}
  890 
  891 	gem_rx_refill(bp);
  892 
  893 	return count;
  894 }
  895 
  896 static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
  897 			 unsigned int last_frag)
  898 {
  899 	unsigned int len;
  900 	unsigned int frag;
  901 	unsigned int offset;
  902 	struct sk_buff *skb;
  903 	struct macb_dma_desc *desc;
  904 
  905 	desc = macb_rx_desc(bp, last_frag);
  906 	len = desc->ctrl & bp->rx_frm_len_mask;
  907 
  908 	netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
  909 		    macb_rx_ring_wrap(first_frag),
  910 		    macb_rx_ring_wrap(last_frag), len);
  911 
  912 	/* The ethernet header starts NET_IP_ALIGN bytes into the
  913 	 * first buffer. Since the header is 14 bytes, this makes the
  914 	 * payload word-aligned.
  915 	 *
  916 	 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
  917 	 * the two padding bytes into the skb so that we avoid hitting
  918 	 * the slowpath in memcpy(), and pull them off afterwards.
  919 	 */
  920 	skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
  921 	if (!skb) {
  922 		bp->stats.rx_dropped++;
  923 		for (frag = first_frag; ; frag++) {
  924 			desc = macb_rx_desc(bp, frag);
  925 			desc->addr &= ~MACB_BIT(RX_USED);
  926 			if (frag == last_frag)
  927 				break;
  928 		}
  929 
  930 		/* Make descriptor updates visible to hardware */
  931 		wmb();
  932 
  933 		return 1;
  934 	}
  935 
  936 	offset = 0;
  937 	len += NET_IP_ALIGN;
  938 	skb_checksum_none_assert(skb);
  939 	skb_put(skb, len);
  940 
  941 	for (frag = first_frag; ; frag++) {
  942 		unsigned int frag_len = bp->rx_buffer_size;
  943 
  944 		if (offset + frag_len > len) {
  945 			if (unlikely(frag != last_frag)) {
  946 				dev_kfree_skb_any(skb);
  947 				return -1;
  948 			}
  949 			frag_len = len - offset;
  950 		}
  951 		skb_copy_to_linear_data_offset(skb, offset,
  952 					       macb_rx_buffer(bp, frag),
  953 					       frag_len);
  954 		offset += bp->rx_buffer_size;
  955 		desc = macb_rx_desc(bp, frag);
  956 		desc->addr &= ~MACB_BIT(RX_USED);
  957 
  958 		if (frag == last_frag)
  959 			break;
  960 	}
  961 
  962 	/* Make descriptor updates visible to hardware */
  963 	wmb();
  964 
  965 	__skb_pull(skb, NET_IP_ALIGN);
  966 	skb->protocol = eth_type_trans(skb, bp->dev);
  967 
  968 	bp->stats.rx_packets++;
  969 	bp->stats.rx_bytes += skb->len;
  970 	netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
  971 		    skb->len, skb->csum);
  972 	netif_receive_skb(skb);
  973 
  974 	return 0;
  975 }
  976 
  977 static inline void macb_init_rx_ring(struct macb *bp)
  978 {
  979 	dma_addr_t addr;
  980 	int i;
  981 
  982 	addr = bp->rx_buffers_dma;
  983 	for (i = 0; i < RX_RING_SIZE; i++) {
  984 		bp->rx_ring[i].addr = addr;
  985 		bp->rx_ring[i].ctrl = 0;
  986 		addr += bp->rx_buffer_size;
  987 	}
  988 	bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
  989 }
  990 
  991 static int macb_rx(struct macb *bp, int budget)
  992 {
  993 	bool reset_rx_queue = false;
  994 	int received = 0;
  995 	unsigned int tail;
  996 	int first_frag = -1;
  997 
  998 	for (tail = bp->rx_tail; budget > 0; tail++) {
  999 		struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
 1000 		u32 addr, ctrl;
 1001 
 1002 		/* Make hw descriptor updates visible to CPU */
 1003 		rmb();
 1004 
 1005 		addr = desc->addr;
 1006 		ctrl = desc->ctrl;
 1007 
 1008 		if (!(addr & MACB_BIT(RX_USED)))
 1009 			break;
 1010 
 1011 		if (ctrl & MACB_BIT(RX_SOF)) {
 1012 			if (first_frag != -1)
 1013 				discard_partial_frame(bp, first_frag, tail);
 1014 			first_frag = tail;
 1015 		}
 1016 
 1017 		if (ctrl & MACB_BIT(RX_EOF)) {
 1018 			int dropped;
 1019 
 1020 			if (unlikely(first_frag == -1)) {
 1021 				reset_rx_queue = true;
 1022 				continue;
 1023 			}
 1024 
 1025 			dropped = macb_rx_frame(bp, first_frag, tail);
 1026 			first_frag = -1;
 1027 			if (unlikely(dropped < 0)) {
 1028 				reset_rx_queue = true;
 1029 				continue;
 1030 			}
 1031 			if (!dropped) {
 1032 				received++;
 1033 				budget--;
 1034 			}
 1035 		}
 1036 	}
 1037 
 1038 	if (unlikely(reset_rx_queue)) {
 1039 		unsigned long flags;
 1040 		u32 ctrl;
 1041 
 1042 		netdev_err(bp->dev, "RX queue corruption: reset it\n");
 1043 
 1044 		spin_lock_irqsave(&bp->lock, flags);
 1045 
 1046 		ctrl = macb_readl(bp, NCR);
 1047 		macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
 1048 
 1049 		macb_init_rx_ring(bp);
 1050 		macb_writel(bp, RBQP, bp->rx_ring_dma);
 1051 
 1052 		macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
 1053 
 1054 		spin_unlock_irqrestore(&bp->lock, flags);
 1055 		return received;
 1056 	}
 1057 
 1058 	if (first_frag != -1)
 1059 		bp->rx_tail = first_frag;
 1060 	else
 1061 		bp->rx_tail = tail;
 1062 
 1063 	return received;
 1064 }
 1065 
 1066 static int macb_poll(struct napi_struct *napi, int budget)
 1067 {
 1068 	struct macb *bp = container_of(napi, struct macb, napi);
 1069 	int work_done;
 1070 	u32 status;
 1071 
 1072 	status = macb_readl(bp, RSR);
 1073 	macb_writel(bp, RSR, status);
 1074 
 1075 	work_done = 0;
 1076 
 1077 	netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
 1078 		    (unsigned long)status, budget);
 1079 
 1080 	work_done = bp->macbgem_ops.mog_rx(bp, budget);
 1081 	if (work_done < budget) {
 1082 		napi_complete(napi);
 1083 
 1084 		/* Packets received while interrupts were disabled */
 1085 		status = macb_readl(bp, RSR);
 1086 		if (status) {
 1087 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 1088 				macb_writel(bp, ISR, MACB_BIT(RCOMP));
 1089 			napi_reschedule(napi);
 1090 		} else {
 1091 			macb_writel(bp, IER, MACB_RX_INT_FLAGS);
 1092 		}
 1093 	}
 1094 
 1095 	/* TODO: Handle errors */
 1096 
 1097 	return work_done;
 1098 }
 1099 
 1100 static irqreturn_t macb_interrupt(int irq, void *dev_id)
 1101 {
 1102 	struct macb_queue *queue = dev_id;
 1103 	struct macb *bp = queue->bp;
 1104 	struct net_device *dev = bp->dev;
 1105 	u32 status, ctrl;
 1106 
 1107 	status = queue_readl(queue, ISR);
 1108 
 1109 	if (unlikely(!status))
 1110 		return IRQ_NONE;
 1111 
 1112 	spin_lock(&bp->lock);
 1113 
 1114 	while (status) {
 1115 		/* close possible race with dev_close */
 1116 		if (unlikely(!netif_running(dev))) {
 1117 			queue_writel(queue, IDR, -1);
 1118 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 1119 				queue_writel(queue, ISR, -1);
 1120 			break;
 1121 		}
 1122 
 1123 		netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
 1124 			    (unsigned int)(queue - bp->queues),
 1125 			    (unsigned long)status);
 1126 
 1127 		if (status & MACB_RX_INT_FLAGS) {
 1128 			/* There's no point taking any more interrupts
 1129 			 * until we have processed the buffers. The
 1130 			 * scheduling call may fail if the poll routine
 1131 			 * is already scheduled, so disable interrupts
 1132 			 * now.
 1133 			 */
 1134 			queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
 1135 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 1136 				queue_writel(queue, ISR, MACB_BIT(RCOMP));
 1137 
 1138 			if (napi_schedule_prep(&bp->napi)) {
 1139 				netdev_vdbg(bp->dev, "scheduling RX softirq\n");
 1140 				__napi_schedule(&bp->napi);
 1141 			}
 1142 		}
 1143 
 1144 		if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
 1145 			queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
 1146 			schedule_work(&queue->tx_error_task);
 1147 
 1148 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 1149 				queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
 1150 
 1151 			break;
 1152 		}
 1153 
 1154 		if (status & MACB_BIT(TCOMP))
 1155 			macb_tx_interrupt(queue);
 1156 
 1157 		/* Link change detection isn't possible with RMII, so we'll
 1158 		 * add that if/when we get our hands on a full-blown MII PHY.
 1159 		 */
 1160 
 1161 		/* There is a hardware issue under heavy load where DMA can
 1162 		 * stop, this causes endless "used buffer descriptor read"
 1163 		 * interrupts but it can be cleared by re-enabling RX. See
 1164 		 * the at91 manual, section 41.3.1 or the Zynq manual
 1165 		 * section 16.7.4 for details.
 1166 		 */
 1167 		if (status & MACB_BIT(RXUBR)) {
 1168 			ctrl = macb_readl(bp, NCR);
 1169 			macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
 1170 			macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
 1171 
 1172 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 1173 				queue_writel(queue, ISR, MACB_BIT(RXUBR));
 1174 		}
 1175 
 1176 		if (status & MACB_BIT(ISR_ROVR)) {
 1177 			/* We missed at least one packet */
 1178 			if (macb_is_gem(bp))
 1179 				bp->hw_stats.gem.rx_overruns++;
 1180 			else
 1181 				bp->hw_stats.macb.rx_overruns++;
 1182 
 1183 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 1184 				queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
 1185 		}
 1186 
 1187 		if (status & MACB_BIT(HRESP)) {
 1188 			/* TODO: Reset the hardware, and maybe move the
 1189 			 * netdev_err to a lower-priority context as well
 1190 			 * (work queue?)
 1191 			 */
 1192 			netdev_err(dev, "DMA bus error: HRESP not OK\n");
 1193 
 1194 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 1195 				queue_writel(queue, ISR, MACB_BIT(HRESP));
 1196 		}
 1197 
 1198 		status = queue_readl(queue, ISR);
 1199 	}
 1200 
 1201 	spin_unlock(&bp->lock);
 1202 
 1203 	return IRQ_HANDLED;
 1204 }
 1205 
 1206 #ifdef CONFIG_NET_POLL_CONTROLLER
 1207 /* Polling receive - used by netconsole and other diagnostic tools
 1208  * to allow network i/o with interrupts disabled.
 1209  */
 1210 static void macb_poll_controller(struct net_device *dev)
 1211 {
 1212 	struct macb *bp = netdev_priv(dev);
 1213 	struct macb_queue *queue;
 1214 	unsigned long flags;
 1215 	unsigned int q;
 1216 
 1217 	local_irq_save(flags);
 1218 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
 1219 		macb_interrupt(dev->irq, queue);
 1220 	local_irq_restore(flags);
 1221 }
 1222 #endif
 1223 
 1224 static unsigned int macb_tx_map(struct macb *bp,
 1225 				struct macb_queue *queue,
 1226 				struct sk_buff *skb)
 1227 {
 1228 	dma_addr_t mapping;
 1229 	unsigned int len, entry, i, tx_head = queue->tx_head;
 1230 	struct macb_tx_skb *tx_skb = NULL;
 1231 	struct macb_dma_desc *desc;
 1232 	unsigned int offset, size, count = 0;
 1233 	unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
 1234 	unsigned int eof = 1;
 1235 	u32 ctrl;
 1236 
 1237 	/* First, map non-paged data */
 1238 	len = skb_headlen(skb);
 1239 	offset = 0;
 1240 	while (len) {
 1241 		size = min(len, bp->max_tx_length);
 1242 		entry = macb_tx_ring_wrap(tx_head);
 1243 		tx_skb = &queue->tx_skb[entry];
 1244 
 1245 		mapping = dma_map_single(&bp->pdev->dev,
 1246 					 skb->data + offset,
 1247 					 size, DMA_TO_DEVICE);
 1248 		if (dma_mapping_error(&bp->pdev->dev, mapping))
 1249 			goto dma_error;
 1250 
 1251 		/* Save info to properly release resources */
 1252 		tx_skb->skb = NULL;
 1253 		tx_skb->mapping = mapping;
 1254 		tx_skb->size = size;
 1255 		tx_skb->mapped_as_page = false;
 1256 
 1257 		len -= size;
 1258 		offset += size;
 1259 		count++;
 1260 		tx_head++;
 1261 	}
 1262 
 1263 	/* Then, map paged data from fragments */
 1264 	for (f = 0; f < nr_frags; f++) {
 1265 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
 1266 
 1267 		len = skb_frag_size(frag);
 1268 		offset = 0;
 1269 		while (len) {
 1270 			size = min(len, bp->max_tx_length);
 1271 			entry = macb_tx_ring_wrap(tx_head);
 1272 			tx_skb = &queue->tx_skb[entry];
 1273 
 1274 			mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
 1275 						   offset, size, DMA_TO_DEVICE);
 1276 			if (dma_mapping_error(&bp->pdev->dev, mapping))
 1277 				goto dma_error;
 1278 
 1279 			/* Save info to properly release resources */
 1280 			tx_skb->skb = NULL;
 1281 			tx_skb->mapping = mapping;
 1282 			tx_skb->size = size;
 1283 			tx_skb->mapped_as_page = true;
 1284 
 1285 			len -= size;
 1286 			offset += size;
 1287 			count++;
 1288 			tx_head++;
 1289 		}
 1290 	}
 1291 
 1292 	/* Should never happen */
 1293 	if (unlikely(!tx_skb)) {
 1294 		netdev_err(bp->dev, "BUG! empty skb!\n");
 1295 		return 0;
 1296 	}
 1297 
 1298 	/* This is the last buffer of the frame: save socket buffer */
 1299 	tx_skb->skb = skb;
 1300 
 1301 	/* Update TX ring: update buffer descriptors in reverse order
 1302 	 * to avoid race condition
 1303 	 */
 1304 
 1305 	/* Set 'TX_USED' bit in buffer descriptor at tx_head position
 1306 	 * to set the end of TX queue
 1307 	 */
 1308 	i = tx_head;
 1309 	entry = macb_tx_ring_wrap(i);
 1310 	ctrl = MACB_BIT(TX_USED);
 1311 	desc = &queue->tx_ring[entry];
 1312 	desc->ctrl = ctrl;
 1313 
 1314 	do {
 1315 		i--;
 1316 		entry = macb_tx_ring_wrap(i);
 1317 		tx_skb = &queue->tx_skb[entry];
 1318 		desc = &queue->tx_ring[entry];
 1319 
 1320 		ctrl = (u32)tx_skb->size;
 1321 		if (eof) {
 1322 			ctrl |= MACB_BIT(TX_LAST);
 1323 			eof = 0;
 1324 		}
 1325 		if (unlikely(entry == (TX_RING_SIZE - 1)))
 1326 			ctrl |= MACB_BIT(TX_WRAP);
 1327 
 1328 		/* Set TX buffer descriptor */
 1329 		macb_set_addr(desc, tx_skb->mapping);
 1330 		/* desc->addr must be visible to hardware before clearing
 1331 		 * 'TX_USED' bit in desc->ctrl.
 1332 		 */
 1333 		wmb();
 1334 		desc->ctrl = ctrl;
 1335 	} while (i != queue->tx_head);
 1336 
 1337 	queue->tx_head = tx_head;
 1338 
 1339 	return count;
 1340 
 1341 dma_error:
 1342 	netdev_err(bp->dev, "TX DMA map failed\n");
 1343 
 1344 	for (i = queue->tx_head; i != tx_head; i++) {
 1345 		tx_skb = macb_tx_skb(queue, i);
 1346 
 1347 		macb_tx_unmap(bp, tx_skb);
 1348 	}
 1349 
 1350 	return 0;
 1351 }
 1352 
 1353 static inline int macb_clear_csum(struct sk_buff *skb)
 1354 {
 1355 	/* no change for packets without checksum offloading */
 1356 	if (skb->ip_summed != CHECKSUM_PARTIAL)
 1357 		return 0;
 1358 
 1359 	/* make sure we can modify the header */
 1360 	if (unlikely(skb_cow_head(skb, 0)))
 1361 		return -1;
 1362 
 1363 	/* initialize checksum field
 1364 	 * This is required - at least for Zynq, which otherwise calculates
 1365 	 * wrong UDP header checksums for UDP packets with UDP data len <=2
 1366 	 */
 1367 	*(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
 1368 	return 0;
 1369 }
 1370 
 1371 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
 1372 {
 1373 	u16 queue_index = skb_get_queue_mapping(skb);
 1374 	struct macb *bp = netdev_priv(dev);
 1375 	struct macb_queue *queue = &bp->queues[queue_index];
 1376 	unsigned long flags;
 1377 	unsigned int count, nr_frags, frag_size, f;
 1378 
 1379 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
 1380 	netdev_vdbg(bp->dev,
 1381 		    "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
 1382 		    queue_index, skb->len, skb->head, skb->data,
 1383 		    skb_tail_pointer(skb), skb_end_pointer(skb));
 1384 	print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
 1385 		       skb->data, 16, true);
 1386 #endif
 1387 
 1388 	/* Count how many TX buffer descriptors are needed to send this
 1389 	 * socket buffer: skb fragments of jumbo frames may need to be
 1390 	 * split into many buffer descriptors.
 1391 	 */
 1392 	count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
 1393 	nr_frags = skb_shinfo(skb)->nr_frags;
 1394 	for (f = 0; f < nr_frags; f++) {
 1395 		frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
 1396 		count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
 1397 	}
 1398 
 1399 	spin_lock_irqsave(&bp->lock, flags);
 1400 
 1401 	/* This is a hard error, log it. */
 1402 	if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) {
 1403 		netif_stop_subqueue(dev, queue_index);
 1404 		spin_unlock_irqrestore(&bp->lock, flags);
 1405 		netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
 1406 			   queue->tx_head, queue->tx_tail);
 1407 		return NETDEV_TX_BUSY;
 1408 	}
 1409 
 1410 	if (macb_clear_csum(skb)) {
 1411 		dev_kfree_skb_any(skb);
 1412 		goto unlock;
 1413 	}
 1414 
 1415 	/* Map socket buffer for DMA transfer */
 1416 	if (!macb_tx_map(bp, queue, skb)) {
 1417 		dev_kfree_skb_any(skb);
 1418 		goto unlock;
 1419 	}
 1420 
 1421 	/* Make newly initialized descriptor visible to hardware */
 1422 	wmb();
 1423 
 1424 	skb_tx_timestamp(skb);
 1425 
 1426 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
 1427 
 1428 	if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1)
 1429 		netif_stop_subqueue(dev, queue_index);
 1430 
 1431 unlock:
 1432 	spin_unlock_irqrestore(&bp->lock, flags);
 1433 
 1434 	return NETDEV_TX_OK;
 1435 }
 1436 
 1437 static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
 1438 {
 1439 	if (!macb_is_gem(bp)) {
 1440 		bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
 1441 	} else {
 1442 		bp->rx_buffer_size = size;
 1443 
 1444 		if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
 1445 			netdev_dbg(bp->dev,
 1446 				   "RX buffer must be multiple of %d bytes, expanding\n",
 1447 				   RX_BUFFER_MULTIPLE);
 1448 			bp->rx_buffer_size =
 1449 				roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
 1450 		}
 1451 	}
 1452 
 1453 	netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
 1454 		   bp->dev->mtu, bp->rx_buffer_size);
 1455 }
 1456 
 1457 static void gem_free_rx_buffers(struct macb *bp)
 1458 {
 1459 	struct sk_buff		*skb;
 1460 	struct macb_dma_desc	*desc;
 1461 	dma_addr_t		addr;
 1462 	int i;
 1463 
 1464 	if (!bp->rx_skbuff)
 1465 		return;
 1466 
 1467 	for (i = 0; i < RX_RING_SIZE; i++) {
 1468 		skb = bp->rx_skbuff[i];
 1469 
 1470 		if (!skb)
 1471 			continue;
 1472 
 1473 		desc = &bp->rx_ring[i];
 1474 		addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
 1475 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 1476 		addr |= ((u64)(desc->addrh) << 32);
 1477 #endif
 1478 		dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
 1479 				 DMA_FROM_DEVICE);
 1480 		dev_kfree_skb_any(skb);
 1481 		skb = NULL;
 1482 	}
 1483 
 1484 	kfree(bp->rx_skbuff);
 1485 	bp->rx_skbuff = NULL;
 1486 }
 1487 
 1488 static void macb_free_rx_buffers(struct macb *bp)
 1489 {
 1490 	if (bp->rx_buffers) {
 1491 		dma_free_coherent(&bp->pdev->dev,
 1492 				  RX_RING_SIZE * bp->rx_buffer_size,
 1493 				  bp->rx_buffers, bp->rx_buffers_dma);
 1494 		bp->rx_buffers = NULL;
 1495 	}
 1496 }
 1497 
 1498 static void macb_free_consistent(struct macb *bp)
 1499 {
 1500 	struct macb_queue *queue;
 1501 	unsigned int q;
 1502 
 1503 	bp->macbgem_ops.mog_free_rx_buffers(bp);
 1504 	if (bp->rx_ring) {
 1505 		dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
 1506 				  bp->rx_ring, bp->rx_ring_dma);
 1507 		bp->rx_ring = NULL;
 1508 	}
 1509 
 1510 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 1511 		kfree(queue->tx_skb);
 1512 		queue->tx_skb = NULL;
 1513 		if (queue->tx_ring) {
 1514 			dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
 1515 					  queue->tx_ring, queue->tx_ring_dma);
 1516 			queue->tx_ring = NULL;
 1517 		}
 1518 	}
 1519 }
 1520 
 1521 static int gem_alloc_rx_buffers(struct macb *bp)
 1522 {
 1523 	int size;
 1524 
 1525 	size = RX_RING_SIZE * sizeof(struct sk_buff *);
 1526 	bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
 1527 	if (!bp->rx_skbuff)
 1528 		return -ENOMEM;
 1529 
 1530 	netdev_dbg(bp->dev,
 1531 		   "Allocated %d RX struct sk_buff entries at %p\n",
 1532 		   RX_RING_SIZE, bp->rx_skbuff);
 1533 	return 0;
 1534 }
 1535 
 1536 static int macb_alloc_rx_buffers(struct macb *bp)
 1537 {
 1538 	int size;
 1539 
 1540 	size = RX_RING_SIZE * bp->rx_buffer_size;
 1541 	bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
 1542 					    &bp->rx_buffers_dma, GFP_KERNEL);
 1543 	if (!bp->rx_buffers)
 1544 		return -ENOMEM;
 1545 
 1546 	netdev_dbg(bp->dev,
 1547 		   "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
 1548 		   size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
 1549 	return 0;
 1550 }
 1551 
 1552 static int macb_alloc_consistent(struct macb *bp)
 1553 {
 1554 	struct macb_queue *queue;
 1555 	unsigned int q;
 1556 	int size;
 1557 
 1558 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 1559 		size = TX_RING_BYTES;
 1560 		queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
 1561 						    &queue->tx_ring_dma,
 1562 						    GFP_KERNEL);
 1563 		if (!queue->tx_ring)
 1564 			goto out_err;
 1565 		netdev_dbg(bp->dev,
 1566 			   "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
 1567 			   q, size, (unsigned long)queue->tx_ring_dma,
 1568 			   queue->tx_ring);
 1569 
 1570 		size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
 1571 		queue->tx_skb = kmalloc(size, GFP_KERNEL);
 1572 		if (!queue->tx_skb)
 1573 			goto out_err;
 1574 	}
 1575 
 1576 	size = RX_RING_BYTES;
 1577 	bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
 1578 					 &bp->rx_ring_dma, GFP_KERNEL);
 1579 	if (!bp->rx_ring)
 1580 		goto out_err;
 1581 	netdev_dbg(bp->dev,
 1582 		   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
 1583 		   size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
 1584 
 1585 	if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
 1586 		goto out_err;
 1587 
 1588 	return 0;
 1589 
 1590 out_err:
 1591 	macb_free_consistent(bp);
 1592 	return -ENOMEM;
 1593 }
 1594 
 1595 static void gem_init_rings(struct macb *bp)
 1596 {
 1597 	struct macb_queue *queue;
 1598 	unsigned int q;
 1599 	int i;
 1600 
 1601 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 1602 		for (i = 0; i < TX_RING_SIZE; i++) {
 1603 			macb_set_addr(&(queue->tx_ring[i]), 0);
 1604 			queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
 1605 		}
 1606 		queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
 1607 		queue->tx_head = 0;
 1608 		queue->tx_tail = 0;
 1609 	}
 1610 
 1611 	bp->rx_tail = 0;
 1612 	bp->rx_prepared_head = 0;
 1613 
 1614 	gem_rx_refill(bp);
 1615 }
 1616 
 1617 static void macb_init_rings(struct macb *bp)
 1618 {
 1619 	int i;
 1620 
 1621 	macb_init_rx_ring(bp);
 1622 
 1623 	for (i = 0; i < TX_RING_SIZE; i++) {
 1624 		bp->queues[0].tx_ring[i].addr = 0;
 1625 		bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
 1626 	}
 1627 	bp->queues[0].tx_head = 0;
 1628 	bp->queues[0].tx_tail = 0;
 1629 	bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
 1630 
 1631 	bp->rx_tail = 0;
 1632 }
 1633 
 1634 static void macb_reset_hw(struct macb *bp)
 1635 {
 1636 	struct macb_queue *queue;
 1637 	unsigned int q;
 1638 
 1639 	/* Disable RX and TX (XXX: Should we halt the transmission
 1640 	 * more gracefully?)
 1641 	 */
 1642 	macb_writel(bp, NCR, 0);
 1643 
 1644 	/* Clear the stats registers (XXX: Update stats first?) */
 1645 	macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
 1646 
 1647 	/* Clear all status flags */
 1648 	macb_writel(bp, TSR, -1);
 1649 	macb_writel(bp, RSR, -1);
 1650 
 1651 	/* Disable all interrupts */
 1652 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 1653 		queue_writel(queue, IDR, -1);
 1654 		queue_readl(queue, ISR);
 1655 		if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 1656 			queue_writel(queue, ISR, -1);
 1657 	}
 1658 }
 1659 
 1660 static u32 gem_mdc_clk_div(struct macb *bp)
 1661 {
 1662 	u32 config;
 1663 	unsigned long pclk_hz = clk_get_rate(bp->pclk);
 1664 
 1665 	if (pclk_hz <= 20000000)
 1666 		config = GEM_BF(CLK, GEM_CLK_DIV8);
 1667 	else if (pclk_hz <= 40000000)
 1668 		config = GEM_BF(CLK, GEM_CLK_DIV16);
 1669 	else if (pclk_hz <= 80000000)
 1670 		config = GEM_BF(CLK, GEM_CLK_DIV32);
 1671 	else if (pclk_hz <= 120000000)
 1672 		config = GEM_BF(CLK, GEM_CLK_DIV48);
 1673 	else if (pclk_hz <= 160000000)
 1674 		config = GEM_BF(CLK, GEM_CLK_DIV64);
 1675 	else
 1676 		config = GEM_BF(CLK, GEM_CLK_DIV96);
 1677 
 1678 	return config;
 1679 }
 1680 
 1681 static u32 macb_mdc_clk_div(struct macb *bp)
 1682 {
 1683 	u32 config;
 1684 	unsigned long pclk_hz;
 1685 
 1686 	if (macb_is_gem(bp))
 1687 		return gem_mdc_clk_div(bp);
 1688 
 1689 	pclk_hz = clk_get_rate(bp->pclk);
 1690 	if (pclk_hz <= 20000000)
 1691 		config = MACB_BF(CLK, MACB_CLK_DIV8);
 1692 	else if (pclk_hz <= 40000000)
 1693 		config = MACB_BF(CLK, MACB_CLK_DIV16);
 1694 	else if (pclk_hz <= 80000000)
 1695 		config = MACB_BF(CLK, MACB_CLK_DIV32);
 1696 	else
 1697 		config = MACB_BF(CLK, MACB_CLK_DIV64);
 1698 
 1699 	return config;
 1700 }
 1701 
 1702 /* Get the DMA bus width field of the network configuration register that we
 1703  * should program.  We find the width from decoding the design configuration
 1704  * register to find the maximum supported data bus width.
 1705  */
 1706 static u32 macb_dbw(struct macb *bp)
 1707 {
 1708 	if (!macb_is_gem(bp))
 1709 		return 0;
 1710 
 1711 	switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
 1712 	case 4:
 1713 		return GEM_BF(DBW, GEM_DBW128);
 1714 	case 2:
 1715 		return GEM_BF(DBW, GEM_DBW64);
 1716 	case 1:
 1717 	default:
 1718 		return GEM_BF(DBW, GEM_DBW32);
 1719 	}
 1720 }
 1721 
 1722 /* Configure the receive DMA engine
 1723  * - use the correct receive buffer size
 1724  * - set best burst length for DMA operations
 1725  *   (if not supported by FIFO, it will fallback to default)
 1726  * - set both rx/tx packet buffers to full memory size
 1727  * These are configurable parameters for GEM.
 1728  */
 1729 static void macb_configure_dma(struct macb *bp)
 1730 {
 1731 	u32 dmacfg;
 1732 
 1733 	if (macb_is_gem(bp)) {
 1734 		dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
 1735 		dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
 1736 		if (bp->dma_burst_length)
 1737 			dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
 1738 		dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
 1739 		dmacfg &= ~GEM_BIT(ENDIA_PKT);
 1740 
 1741 		if (bp->native_io)
 1742 			dmacfg &= ~GEM_BIT(ENDIA_DESC);
 1743 		else
 1744 			dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
 1745 
 1746 		if (bp->dev->features & NETIF_F_HW_CSUM)
 1747 			dmacfg |= GEM_BIT(TXCOEN);
 1748 		else
 1749 			dmacfg &= ~GEM_BIT(TXCOEN);
 1750 
 1751 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 1752 		dmacfg |= GEM_BIT(ADDR64);
 1753 #endif
 1754 		netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
 1755 			   dmacfg);
 1756 		gem_writel(bp, DMACFG, dmacfg);
 1757 	}
 1758 }
 1759 
 1760 static void macb_init_hw(struct macb *bp)
 1761 {
 1762 	struct macb_queue *queue;
 1763 	unsigned int q;
 1764 
 1765 	u32 config;
 1766 
 1767 	macb_reset_hw(bp);
 1768 	macb_set_hwaddr(bp);
 1769 
 1770 	config = macb_mdc_clk_div(bp);
 1771 	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
 1772 		config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
 1773 	config |= MACB_BF(RBOF, NET_IP_ALIGN);	/* Make eth data aligned */
 1774 	config |= MACB_BIT(PAE);		/* PAuse Enable */
 1775 	config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */
 1776 	if (bp->caps & MACB_CAPS_JUMBO)
 1777 		config |= MACB_BIT(JFRAME);	/* Enable jumbo frames */
 1778 	else
 1779 		config |= MACB_BIT(BIG);	/* Receive oversized frames */
 1780 	if (bp->dev->flags & IFF_PROMISC)
 1781 		config |= MACB_BIT(CAF);	/* Copy All Frames */
 1782 	else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
 1783 		config |= GEM_BIT(RXCOEN);
 1784 	if (!(bp->dev->flags & IFF_BROADCAST))
 1785 		config |= MACB_BIT(NBC);	/* No BroadCast */
 1786 	config |= macb_dbw(bp);
 1787 	macb_writel(bp, NCFGR, config);
 1788 	if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
 1789 		gem_writel(bp, JML, bp->jumbo_max_len);
 1790 	bp->speed = SPEED_10;
 1791 	bp->duplex = DUPLEX_HALF;
 1792 	bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
 1793 	if (bp->caps & MACB_CAPS_JUMBO)
 1794 		bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
 1795 
 1796 	macb_configure_dma(bp);
 1797 
 1798 	/* Initialize TX and RX buffers */
 1799 	macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma));
 1800 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 1801 	macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32));
 1802 #endif
 1803 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 1804 		queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
 1805 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 1806 		queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
 1807 #endif
 1808 
 1809 		/* Enable interrupts */
 1810 		queue_writel(queue, IER,
 1811 			     MACB_RX_INT_FLAGS |
 1812 			     MACB_TX_INT_FLAGS |
 1813 			     MACB_BIT(HRESP));
 1814 	}
 1815 
 1816 	/* Enable TX and RX */
 1817 	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
 1818 }
 1819 
 1820 /* The hash address register is 64 bits long and takes up two
 1821  * locations in the memory map.  The least significant bits are stored
 1822  * in EMAC_HSL and the most significant bits in EMAC_HSH.
 1823  *
 1824  * The unicast hash enable and the multicast hash enable bits in the
 1825  * network configuration register enable the reception of hash matched
 1826  * frames. The destination address is reduced to a 6 bit index into
 1827  * the 64 bit hash register using the following hash function.  The
 1828  * hash function is an exclusive or of every sixth bit of the
 1829  * destination address.
 1830  *
 1831  * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
 1832  * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
 1833  * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
 1834  * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
 1835  * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
 1836  * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
 1837  *
 1838  * da[0] represents the least significant bit of the first byte
 1839  * received, that is, the multicast/unicast indicator, and da[47]
 1840  * represents the most significant bit of the last byte received.  If
 1841  * the hash index, hi[n], points to a bit that is set in the hash
 1842  * register then the frame will be matched according to whether the
 1843  * frame is multicast or unicast.  A multicast match will be signalled
 1844  * if the multicast hash enable bit is set, da[0] is 1 and the hash
 1845  * index points to a bit set in the hash register.  A unicast match
 1846  * will be signalled if the unicast hash enable bit is set, da[0] is 0
 1847  * and the hash index points to a bit set in the hash register.  To
 1848  * receive all multicast frames, the hash register should be set with
 1849  * all ones and the multicast hash enable bit should be set in the
 1850  * network configuration register.
 1851  */
 1852 
 1853 static inline int hash_bit_value(int bitnr, __u8 *addr)
 1854 {
 1855 	if (addr[bitnr / 8] & (1 << (bitnr % 8)))
 1856 		return 1;
 1857 	return 0;
 1858 }
 1859 
 1860 /* Return the hash index value for the specified address. */
 1861 static int hash_get_index(__u8 *addr)
 1862 {
 1863 	int i, j, bitval;
 1864 	int hash_index = 0;
 1865 
 1866 	for (j = 0; j < 6; j++) {
 1867 		for (i = 0, bitval = 0; i < 8; i++)
 1868 			bitval ^= hash_bit_value(i * 6 + j, addr);
 1869 
 1870 		hash_index |= (bitval << j);
 1871 	}
 1872 
 1873 	return hash_index;
 1874 }
 1875 
 1876 /* Add multicast addresses to the internal multicast-hash table. */
 1877 static void macb_sethashtable(struct net_device *dev)
 1878 {
 1879 	struct netdev_hw_addr *ha;
 1880 	unsigned long mc_filter[2];
 1881 	unsigned int bitnr;
 1882 	struct macb *bp = netdev_priv(dev);
 1883 
 1884 	mc_filter[0] = 0;
 1885 	mc_filter[1] = 0;
 1886 
 1887 	netdev_for_each_mc_addr(ha, dev) {
 1888 		bitnr = hash_get_index(ha->addr);
 1889 		mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
 1890 	}
 1891 
 1892 	macb_or_gem_writel(bp, HRB, mc_filter[0]);
 1893 	macb_or_gem_writel(bp, HRT, mc_filter[1]);
 1894 }
 1895 
 1896 /* Enable/Disable promiscuous and multicast modes. */
 1897 static void macb_set_rx_mode(struct net_device *dev)
 1898 {
 1899 	unsigned long cfg;
 1900 	struct macb *bp = netdev_priv(dev);
 1901 
 1902 	cfg = macb_readl(bp, NCFGR);
 1903 
 1904 	if (dev->flags & IFF_PROMISC) {
 1905 		/* Enable promiscuous mode */
 1906 		cfg |= MACB_BIT(CAF);
 1907 
 1908 		/* Disable RX checksum offload */
 1909 		if (macb_is_gem(bp))
 1910 			cfg &= ~GEM_BIT(RXCOEN);
 1911 	} else {
 1912 		/* Disable promiscuous mode */
 1913 		cfg &= ~MACB_BIT(CAF);
 1914 
 1915 		/* Enable RX checksum offload only if requested */
 1916 		if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
 1917 			cfg |= GEM_BIT(RXCOEN);
 1918 	}
 1919 
 1920 	if (dev->flags & IFF_ALLMULTI) {
 1921 		/* Enable all multicast mode */
 1922 		macb_or_gem_writel(bp, HRB, -1);
 1923 		macb_or_gem_writel(bp, HRT, -1);
 1924 		cfg |= MACB_BIT(NCFGR_MTI);
 1925 	} else if (!netdev_mc_empty(dev)) {
 1926 		/* Enable specific multicasts */
 1927 		macb_sethashtable(dev);
 1928 		cfg |= MACB_BIT(NCFGR_MTI);
 1929 	} else if (dev->flags & (~IFF_ALLMULTI)) {
 1930 		/* Disable all multicast mode */
 1931 		macb_or_gem_writel(bp, HRB, 0);
 1932 		macb_or_gem_writel(bp, HRT, 0);
 1933 		cfg &= ~MACB_BIT(NCFGR_MTI);
 1934 	}
 1935 
 1936 	macb_writel(bp, NCFGR, cfg);
 1937 }
 1938 
 1939 static int macb_open(struct net_device *dev)
 1940 {
 1941 	struct macb *bp = netdev_priv(dev);
 1942 	size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
 1943 	int err;
 1944 
 1945 	netdev_dbg(bp->dev, "open\n");
 1946 
 1947 	/* carrier starts down */
 1948 	netif_carrier_off(dev);
 1949 
 1950 	/* if the phy is not yet register, retry later*/
 1951 	if (!dev->phydev)
 1952 		return -EAGAIN;
 1953 
 1954 	/* RX buffers initialization */
 1955 	macb_init_rx_buffer_size(bp, bufsz);
 1956 
 1957 	err = macb_alloc_consistent(bp);
 1958 	if (err) {
 1959 		netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
 1960 			   err);
 1961 		return err;
 1962 	}
 1963 
 1964 	napi_enable(&bp->napi);
 1965 
 1966 	bp->macbgem_ops.mog_init_rings(bp);
 1967 	macb_init_hw(bp);
 1968 
 1969 	/* schedule a link state check */
 1970 	phy_start(dev->phydev);
 1971 
 1972 	netif_tx_start_all_queues(dev);
 1973 
 1974 	return 0;
 1975 }
 1976 
 1977 static int macb_close(struct net_device *dev)
 1978 {
 1979 	struct macb *bp = netdev_priv(dev);
 1980 	unsigned long flags;
 1981 
 1982 	netif_tx_stop_all_queues(dev);
 1983 	napi_disable(&bp->napi);
 1984 
 1985 	if (dev->phydev)
 1986 		phy_stop(dev->phydev);
 1987 
 1988 	spin_lock_irqsave(&bp->lock, flags);
 1989 	macb_reset_hw(bp);
 1990 	netif_carrier_off(dev);
 1991 	spin_unlock_irqrestore(&bp->lock, flags);
 1992 
 1993 	macb_free_consistent(bp);
 1994 
 1995 	return 0;
 1996 }
 1997 
 1998 static int macb_change_mtu(struct net_device *dev, int new_mtu)
 1999 {
 2000 	struct macb *bp = netdev_priv(dev);
 2001 	u32 max_mtu;
 2002 
 2003 	if (netif_running(dev))
 2004 		return -EBUSY;
 2005 
 2006 	max_mtu = ETH_DATA_LEN;
 2007 	if (bp->caps & MACB_CAPS_JUMBO)
 2008 		max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
 2009 
 2010 	if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE))
 2011 		return -EINVAL;
 2012 
 2013 	dev->mtu = new_mtu;
 2014 
 2015 	return 0;
 2016 }
 2017 
 2018 static void gem_update_stats(struct macb *bp)
 2019 {
 2020 	unsigned int i;
 2021 	u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
 2022 
 2023 	for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
 2024 		u32 offset = gem_statistics[i].offset;
 2025 		u64 val = bp->macb_reg_readl(bp, offset);
 2026 
 2027 		bp->ethtool_stats[i] += val;
 2028 		*p += val;
 2029 
 2030 		if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
 2031 			/* Add GEM_OCTTXH, GEM_OCTRXH */
 2032 			val = bp->macb_reg_readl(bp, offset + 4);
 2033 			bp->ethtool_stats[i] += ((u64)val) << 32;
 2034 			*(++p) += val;
 2035 		}
 2036 	}
 2037 }
 2038 
 2039 static struct net_device_stats *gem_get_stats(struct macb *bp)
 2040 {
 2041 	struct gem_stats *hwstat = &bp->hw_stats.gem;
 2042 	struct net_device_stats *nstat = &bp->stats;
 2043 
 2044 	gem_update_stats(bp);
 2045 
 2046 	nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
 2047 			    hwstat->rx_alignment_errors +
 2048 			    hwstat->rx_resource_errors +
 2049 			    hwstat->rx_overruns +
 2050 			    hwstat->rx_oversize_frames +
 2051 			    hwstat->rx_jabbers +
 2052 			    hwstat->rx_undersized_frames +
 2053 			    hwstat->rx_length_field_frame_errors);
 2054 	nstat->tx_errors = (hwstat->tx_late_collisions +
 2055 			    hwstat->tx_excessive_collisions +
 2056 			    hwstat->tx_underrun +
 2057 			    hwstat->tx_carrier_sense_errors);
 2058 	nstat->multicast = hwstat->rx_multicast_frames;
 2059 	nstat->collisions = (hwstat->tx_single_collision_frames +
 2060 			     hwstat->tx_multiple_collision_frames +
 2061 			     hwstat->tx_excessive_collisions);
 2062 	nstat->rx_length_errors = (hwstat->rx_oversize_frames +
 2063 				   hwstat->rx_jabbers +
 2064 				   hwstat->rx_undersized_frames +
 2065 				   hwstat->rx_length_field_frame_errors);
 2066 	nstat->rx_over_errors = hwstat->rx_resource_errors;
 2067 	nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
 2068 	nstat->rx_frame_errors = hwstat->rx_alignment_errors;
 2069 	nstat->rx_fifo_errors = hwstat->rx_overruns;
 2070 	nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
 2071 	nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
 2072 	nstat->tx_fifo_errors = hwstat->tx_underrun;
 2073 
 2074 	return nstat;
 2075 }
 2076 
 2077 static void gem_get_ethtool_stats(struct net_device *dev,
 2078 				  struct ethtool_stats *stats, u64 *data)
 2079 {
 2080 	struct macb *bp;
 2081 
 2082 	bp = netdev_priv(dev);
 2083 	gem_update_stats(bp);
 2084 	memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
 2085 }
 2086 
 2087 static int gem_get_sset_count(struct net_device *dev, int sset)
 2088 {
 2089 	switch (sset) {
 2090 	case ETH_SS_STATS:
 2091 		return GEM_STATS_LEN;
 2092 	default:
 2093 		return -EOPNOTSUPP;
 2094 	}
 2095 }
 2096 
 2097 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
 2098 {
 2099 	unsigned int i;
 2100 
 2101 	switch (sset) {
 2102 	case ETH_SS_STATS:
 2103 		for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
 2104 			memcpy(p, gem_statistics[i].stat_string,
 2105 			       ETH_GSTRING_LEN);
 2106 		break;
 2107 	}
 2108 }
 2109 
 2110 static struct net_device_stats *macb_get_stats(struct net_device *dev)
 2111 {
 2112 	struct macb *bp = netdev_priv(dev);
 2113 	struct net_device_stats *nstat = &bp->stats;
 2114 	struct macb_stats *hwstat = &bp->hw_stats.macb;
 2115 
 2116 	if (macb_is_gem(bp))
 2117 		return gem_get_stats(bp);
 2118 
 2119 	/* read stats from hardware */
 2120 	macb_update_stats(bp);
 2121 
 2122 	/* Convert HW stats into netdevice stats */
 2123 	nstat->rx_errors = (hwstat->rx_fcs_errors +
 2124 			    hwstat->rx_align_errors +
 2125 			    hwstat->rx_resource_errors +
 2126 			    hwstat->rx_overruns +
 2127 			    hwstat->rx_oversize_pkts +
 2128 			    hwstat->rx_jabbers +
 2129 			    hwstat->rx_undersize_pkts +
 2130 			    hwstat->rx_length_mismatch);
 2131 	nstat->tx_errors = (hwstat->tx_late_cols +
 2132 			    hwstat->tx_excessive_cols +
 2133 			    hwstat->tx_underruns +
 2134 			    hwstat->tx_carrier_errors +
 2135 			    hwstat->sqe_test_errors);
 2136 	nstat->collisions = (hwstat->tx_single_cols +
 2137 			     hwstat->tx_multiple_cols +
 2138 			     hwstat->tx_excessive_cols);
 2139 	nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
 2140 				   hwstat->rx_jabbers +
 2141 				   hwstat->rx_undersize_pkts +
 2142 				   hwstat->rx_length_mismatch);
 2143 	nstat->rx_over_errors = hwstat->rx_resource_errors +
 2144 				   hwstat->rx_overruns;
 2145 	nstat->rx_crc_errors = hwstat->rx_fcs_errors;
 2146 	nstat->rx_frame_errors = hwstat->rx_align_errors;
 2147 	nstat->rx_fifo_errors = hwstat->rx_overruns;
 2148 	/* XXX: What does "missed" mean? */
 2149 	nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
 2150 	nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
 2151 	nstat->tx_fifo_errors = hwstat->tx_underruns;
 2152 	/* Don't know about heartbeat or window errors... */
 2153 
 2154 	return nstat;
 2155 }
 2156 
 2157 static int macb_get_regs_len(struct net_device *netdev)
 2158 {
 2159 	return MACB_GREGS_NBR * sizeof(u32);
 2160 }
 2161 
 2162 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 2163 			  void *p)
 2164 {
 2165 	struct macb *bp = netdev_priv(dev);
 2166 	unsigned int tail, head;
 2167 	u32 *regs_buff = p;
 2168 
 2169 	regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
 2170 			| MACB_GREGS_VERSION;
 2171 
 2172 	tail = macb_tx_ring_wrap(bp->queues[0].tx_tail);
 2173 	head = macb_tx_ring_wrap(bp->queues[0].tx_head);
 2174 
 2175 	regs_buff[0]  = macb_readl(bp, NCR);
 2176 	regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
 2177 	regs_buff[2]  = macb_readl(bp, NSR);
 2178 	regs_buff[3]  = macb_readl(bp, TSR);
 2179 	regs_buff[4]  = macb_readl(bp, RBQP);
 2180 	regs_buff[5]  = macb_readl(bp, TBQP);
 2181 	regs_buff[6]  = macb_readl(bp, RSR);
 2182 	regs_buff[7]  = macb_readl(bp, IMR);
 2183 
 2184 	regs_buff[8]  = tail;
 2185 	regs_buff[9]  = head;
 2186 	regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
 2187 	regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
 2188 
 2189 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
 2190 		regs_buff[12] = macb_or_gem_readl(bp, USRIO);
 2191 	if (macb_is_gem(bp))
 2192 		regs_buff[13] = gem_readl(bp, DMACFG);
 2193 }
 2194 
 2195 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 2196 {
 2197 	struct macb *bp = netdev_priv(netdev);
 2198 
 2199 	wol->supported = 0;
 2200 	wol->wolopts = 0;
 2201 
 2202 	if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
 2203 		wol->supported = WAKE_MAGIC;
 2204 
 2205 		if (bp->wol & MACB_WOL_ENABLED)
 2206 			wol->wolopts |= WAKE_MAGIC;
 2207 	}
 2208 }
 2209 
 2210 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 2211 {
 2212 	struct macb *bp = netdev_priv(netdev);
 2213 
 2214 	if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
 2215 	    (wol->wolopts & ~WAKE_MAGIC))
 2216 		return -EOPNOTSUPP;
 2217 
 2218 	if (wol->wolopts & WAKE_MAGIC)
 2219 		bp->wol |= MACB_WOL_ENABLED;
 2220 	else
 2221 		bp->wol &= ~MACB_WOL_ENABLED;
 2222 
 2223 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
 2224 
 2225 	return 0;
 2226 }
 2227 
 2228 static const struct ethtool_ops macb_ethtool_ops = {
 2229 	.get_regs_len		= macb_get_regs_len,
 2230 	.get_regs		= macb_get_regs,
 2231 	.get_link		= ethtool_op_get_link,
 2232 	.get_ts_info		= ethtool_op_get_ts_info,
 2233 	.get_wol		= macb_get_wol,
 2234 	.set_wol		= macb_set_wol,
 2235 	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
 2236 	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
 2237 };
 2238 
 2239 static const struct ethtool_ops gem_ethtool_ops = {
 2240 	.get_regs_len		= macb_get_regs_len,
 2241 	.get_regs		= macb_get_regs,
 2242 	.get_link		= ethtool_op_get_link,
 2243 	.get_ts_info		= ethtool_op_get_ts_info,
 2244 	.get_ethtool_stats	= gem_get_ethtool_stats,
 2245 	.get_strings		= gem_get_ethtool_strings,
 2246 	.get_sset_count		= gem_get_sset_count,
 2247 	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
 2248 	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
 2249 };
 2250 
 2251 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 2252 {
 2253 	struct phy_device *phydev = dev->phydev;
 2254 
 2255 	if (!netif_running(dev))
 2256 		return -EINVAL;
 2257 
 2258 	if (!phydev)
 2259 		return -ENODEV;
 2260 
 2261 	return phy_mii_ioctl(phydev, rq, cmd);
 2262 }
 2263 
 2264 static int macb_set_features(struct net_device *netdev,
 2265 			     netdev_features_t features)
 2266 {
 2267 	struct macb *bp = netdev_priv(netdev);
 2268 	netdev_features_t changed = features ^ netdev->features;
 2269 
 2270 	/* TX checksum offload */
 2271 	if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
 2272 		u32 dmacfg;
 2273 
 2274 		dmacfg = gem_readl(bp, DMACFG);
 2275 		if (features & NETIF_F_HW_CSUM)
 2276 			dmacfg |= GEM_BIT(TXCOEN);
 2277 		else
 2278 			dmacfg &= ~GEM_BIT(TXCOEN);
 2279 		gem_writel(bp, DMACFG, dmacfg);
 2280 	}
 2281 
 2282 	/* RX checksum offload */
 2283 	if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
 2284 		u32 netcfg;
 2285 
 2286 		netcfg = gem_readl(bp, NCFGR);
 2287 		if (features & NETIF_F_RXCSUM &&
 2288 		    !(netdev->flags & IFF_PROMISC))
 2289 			netcfg |= GEM_BIT(RXCOEN);
 2290 		else
 2291 			netcfg &= ~GEM_BIT(RXCOEN);
 2292 		gem_writel(bp, NCFGR, netcfg);
 2293 	}
 2294 
 2295 	return 0;
 2296 }
 2297 
 2298 static const struct net_device_ops macb_netdev_ops = {
 2299 	.ndo_open		= macb_open,
 2300 	.ndo_stop		= macb_close,
 2301 	.ndo_start_xmit		= macb_start_xmit,
 2302 	.ndo_set_rx_mode	= macb_set_rx_mode,
 2303 	.ndo_get_stats		= macb_get_stats,
 2304 	.ndo_do_ioctl		= macb_ioctl,
 2305 	.ndo_validate_addr	= eth_validate_addr,
 2306 	.ndo_change_mtu		= macb_change_mtu,
 2307 	.ndo_set_mac_address	= eth_mac_addr,
 2308 #ifdef CONFIG_NET_POLL_CONTROLLER
 2309 	.ndo_poll_controller	= macb_poll_controller,
 2310 #endif
 2311 	.ndo_set_features	= macb_set_features,
 2312 };
 2313 
 2314 /* Configure peripheral capabilities according to device tree
 2315  * and integration options used
 2316  */
 2317 static void macb_configure_caps(struct macb *bp,
 2318 				const struct macb_config *dt_conf)
 2319 {
 2320 	u32 dcfg;
 2321 
 2322 	if (dt_conf)
 2323 		bp->caps = dt_conf->caps;
 2324 
 2325 	if (hw_is_gem(bp->regs, bp->native_io)) {
 2326 		bp->caps |= MACB_CAPS_MACB_IS_GEM;
 2327 
 2328 		dcfg = gem_readl(bp, DCFG1);
 2329 		if (GEM_BFEXT(IRQCOR, dcfg) == 0)
 2330 			bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
 2331 		dcfg = gem_readl(bp, DCFG2);
 2332 		if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
 2333 			bp->caps |= MACB_CAPS_FIFO_MODE;
 2334 	}
 2335 
 2336 	dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
 2337 }
 2338 
 2339 static void macb_probe_queues(void __iomem *mem,
 2340 			      bool native_io,
 2341 			      unsigned int *queue_mask,
 2342 			      unsigned int *num_queues)
 2343 {
 2344 	unsigned int hw_q;
 2345 
 2346 	*queue_mask = 0x1;
 2347 	*num_queues = 1;
 2348 
 2349 	/* is it macb or gem ?
 2350 	 *
 2351 	 * We need to read directly from the hardware here because
 2352 	 * we are early in the probe process and don't have the
 2353 	 * MACB_CAPS_MACB_IS_GEM flag positioned
 2354 	 */
 2355 	if (!hw_is_gem(mem, native_io))
 2356 		return;
 2357 
 2358 	/* bit 0 is never set but queue 0 always exists */
 2359 	*queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
 2360 
 2361 	*queue_mask |= 0x1;
 2362 
 2363 	for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
 2364 		if (*queue_mask & (1 << hw_q))
 2365 			(*num_queues)++;
 2366 }
 2367 
 2368 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
 2369 			 struct clk **hclk, struct clk **tx_clk,
 2370 			 struct clk **rx_clk)
 2371 {
 2372 	int err;
 2373 
 2374 	*pclk = devm_clk_get(&pdev->dev, "pclk");
 2375 	if (IS_ERR(*pclk)) {
 2376 		err = PTR_ERR(*pclk);
 2377 		dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
 2378 		return err;
 2379 	}
 2380 
 2381 	*hclk = devm_clk_get(&pdev->dev, "hclk");
 2382 	if (IS_ERR(*hclk)) {
 2383 		err = PTR_ERR(*hclk);
 2384 		dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
 2385 		return err;
 2386 	}
 2387 
 2388 	*tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
 2389 	if (IS_ERR(*tx_clk))
 2390 		*tx_clk = NULL;
 2391 
 2392 	*rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
 2393 	if (IS_ERR(*rx_clk))
 2394 		*rx_clk = NULL;
 2395 
 2396 	err = clk_prepare_enable(*pclk);
 2397 	if (err) {
 2398 		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
 2399 		return err;
 2400 	}
 2401 
 2402 	err = clk_prepare_enable(*hclk);
 2403 	if (err) {
 2404 		dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
 2405 		goto err_disable_pclk;
 2406 	}
 2407 
 2408 	err = clk_prepare_enable(*tx_clk);
 2409 	if (err) {
 2410 		dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
 2411 		goto err_disable_hclk;
 2412 	}
 2413 
 2414 	err = clk_prepare_enable(*rx_clk);
 2415 	if (err) {
 2416 		dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
 2417 		goto err_disable_txclk;
 2418 	}
 2419 
 2420 	return 0;
 2421 
 2422 err_disable_txclk:
 2423 	clk_disable_unprepare(*tx_clk);
 2424 
 2425 err_disable_hclk:
 2426 	clk_disable_unprepare(*hclk);
 2427 
 2428 err_disable_pclk:
 2429 	clk_disable_unprepare(*pclk);
 2430 
 2431 	return err;
 2432 }
 2433 
 2434 static int macb_init(struct platform_device *pdev)
 2435 {
 2436 	struct net_device *dev = platform_get_drvdata(pdev);
 2437 	unsigned int hw_q, q;
 2438 	struct macb *bp = netdev_priv(dev);
 2439 	struct macb_queue *queue;
 2440 	int err;
 2441 	u32 val;
 2442 
 2443 	/* set the queue register mapping once for all: queue0 has a special
 2444 	 * register mapping but we don't want to test the queue index then
 2445 	 * compute the corresponding register offset at run time.
 2446 	 */
 2447 	for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
 2448 		if (!(bp->queue_mask & (1 << hw_q)))
 2449 			continue;
 2450 
 2451 		queue = &bp->queues[q];
 2452 		queue->bp = bp;
 2453 		if (hw_q) {
 2454 			queue->ISR  = GEM_ISR(hw_q - 1);
 2455 			queue->IER  = GEM_IER(hw_q - 1);
 2456 			queue->IDR  = GEM_IDR(hw_q - 1);
 2457 			queue->IMR  = GEM_IMR(hw_q - 1);
 2458 			queue->TBQP = GEM_TBQP(hw_q - 1);
 2459 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 2460 			queue->TBQPH = GEM_TBQPH(hw_q -1);
 2461 #endif
 2462 		} else {
 2463 			/* queue0 uses legacy registers */
 2464 			queue->ISR  = MACB_ISR;
 2465 			queue->IER  = MACB_IER;
 2466 			queue->IDR  = MACB_IDR;
 2467 			queue->IMR  = MACB_IMR;
 2468 			queue->TBQP = MACB_TBQP;
 2469 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 2470 			queue->TBQPH = MACB_TBQPH;
 2471 #endif
 2472 		}
 2473 
 2474 		/* get irq: here we use the linux queue index, not the hardware
 2475 		 * queue index. the queue irq definitions in the device tree
 2476 		 * must remove the optional gaps that could exist in the
 2477 		 * hardware queue mask.
 2478 		 */
 2479 		queue->irq = platform_get_irq(pdev, q);
 2480 		err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
 2481 				       IRQF_SHARED, dev->name, queue);
 2482 		if (err) {
 2483 			dev_err(&pdev->dev,
 2484 				"Unable to request IRQ %d (error %d)\n",
 2485 				queue->irq, err);
 2486 			return err;
 2487 		}
 2488 
 2489 		INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
 2490 		q++;
 2491 	}
 2492 
 2493 	dev->netdev_ops = &macb_netdev_ops;
 2494 	netif_napi_add(dev, &bp->napi, macb_poll, 64);
 2495 
 2496 	/* setup appropriated routines according to adapter type */
 2497 	if (macb_is_gem(bp)) {
 2498 		bp->max_tx_length = GEM_MAX_TX_LEN;
 2499 		bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
 2500 		bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
 2501 		bp->macbgem_ops.mog_init_rings = gem_init_rings;
 2502 		bp->macbgem_ops.mog_rx = gem_rx;
 2503 		dev->ethtool_ops = &gem_ethtool_ops;
 2504 	} else {
 2505 		bp->max_tx_length = MACB_MAX_TX_LEN;
 2506 		bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
 2507 		bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
 2508 		bp->macbgem_ops.mog_init_rings = macb_init_rings;
 2509 		bp->macbgem_ops.mog_rx = macb_rx;
 2510 		dev->ethtool_ops = &macb_ethtool_ops;
 2511 	}
 2512 
 2513 	/* Set features */
 2514 	dev->hw_features = NETIF_F_SG;
 2515 	/* Checksum offload is only available on gem with packet buffer */
 2516 	if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
 2517 		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
 2518 	if (bp->caps & MACB_CAPS_SG_DISABLED)
 2519 		dev->hw_features &= ~NETIF_F_SG;
 2520 	dev->features = dev->hw_features;
 2521 
 2522 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
 2523 		val = 0;
 2524 		if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
 2525 			val = GEM_BIT(RGMII);
 2526 		else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
 2527 			 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
 2528 			val = MACB_BIT(RMII);
 2529 		else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
 2530 			val = MACB_BIT(MII);
 2531 
 2532 		if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
 2533 			val |= MACB_BIT(CLKEN);
 2534 
 2535 		macb_or_gem_writel(bp, USRIO, val);
 2536 	}
 2537 
 2538 	/* Set MII management clock divider */
 2539 	val = macb_mdc_clk_div(bp);
 2540 	val |= macb_dbw(bp);
 2541 	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
 2542 		val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
 2543 	macb_writel(bp, NCFGR, val);
 2544 
 2545 	return 0;
 2546 }
 2547 
 2548 #if defined(CONFIG_OF)
 2549 /* 1518 rounded up */
 2550 #define AT91ETHER_MAX_RBUFF_SZ	0x600
 2551 /* max number of receive buffers */
 2552 #define AT91ETHER_MAX_RX_DESCR	9
 2553 
 2554 /* Initialize and start the Receiver and Transmit subsystems */
 2555 static int at91ether_start(struct net_device *dev)
 2556 {
 2557 	struct macb *lp = netdev_priv(dev);
 2558 	dma_addr_t addr;
 2559 	u32 ctl;
 2560 	int i;
 2561 
 2562 	lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
 2563 					 (AT91ETHER_MAX_RX_DESCR *
 2564 					  sizeof(struct macb_dma_desc)),
 2565 					 &lp->rx_ring_dma, GFP_KERNEL);
 2566 	if (!lp->rx_ring)
 2567 		return -ENOMEM;
 2568 
 2569 	lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
 2570 					    AT91ETHER_MAX_RX_DESCR *
 2571 					    AT91ETHER_MAX_RBUFF_SZ,
 2572 					    &lp->rx_buffers_dma, GFP_KERNEL);
 2573 	if (!lp->rx_buffers) {
 2574 		dma_free_coherent(&lp->pdev->dev,
 2575 				  AT91ETHER_MAX_RX_DESCR *
 2576 				  sizeof(struct macb_dma_desc),
 2577 				  lp->rx_ring, lp->rx_ring_dma);
 2578 		lp->rx_ring = NULL;
 2579 		return -ENOMEM;
 2580 	}
 2581 
 2582 	addr = lp->rx_buffers_dma;
 2583 	for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
 2584 		lp->rx_ring[i].addr = addr;
 2585 		lp->rx_ring[i].ctrl = 0;
 2586 		addr += AT91ETHER_MAX_RBUFF_SZ;
 2587 	}
 2588 
 2589 	/* Set the Wrap bit on the last descriptor */
 2590 	lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
 2591 
 2592 	/* Reset buffer index */
 2593 	lp->rx_tail = 0;
 2594 
 2595 	/* Program address of descriptor list in Rx Buffer Queue register */
 2596 	macb_writel(lp, RBQP, lp->rx_ring_dma);
 2597 
 2598 	/* Enable Receive and Transmit */
 2599 	ctl = macb_readl(lp, NCR);
 2600 	macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
 2601 
 2602 	return 0;
 2603 }
 2604 
 2605 /* Open the ethernet interface */
 2606 static int at91ether_open(struct net_device *dev)
 2607 {
 2608 	struct macb *lp = netdev_priv(dev);
 2609 	u32 ctl;
 2610 	int ret;
 2611 
 2612 	/* Clear internal statistics */
 2613 	ctl = macb_readl(lp, NCR);
 2614 	macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
 2615 
 2616 	macb_set_hwaddr(lp);
 2617 
 2618 	ret = at91ether_start(dev);
 2619 	if (ret)
 2620 		return ret;
 2621 
 2622 	/* Enable MAC interrupts */
 2623 	macb_writel(lp, IER, MACB_BIT(RCOMP)	|
 2624 			     MACB_BIT(RXUBR)	|
 2625 			     MACB_BIT(ISR_TUND)	|
 2626 			     MACB_BIT(ISR_RLE)	|
 2627 			     MACB_BIT(TCOMP)	|
 2628 			     MACB_BIT(ISR_ROVR)	|
 2629 			     MACB_BIT(HRESP));
 2630 
 2631 	/* schedule a link state check */
 2632 	phy_start(dev->phydev);
 2633 
 2634 	netif_start_queue(dev);
 2635 
 2636 	return 0;
 2637 }
 2638 
 2639 /* Close the interface */
 2640 static int at91ether_close(struct net_device *dev)
 2641 {
 2642 	struct macb *lp = netdev_priv(dev);
 2643 	u32 ctl;
 2644 
 2645 	/* Disable Receiver and Transmitter */
 2646 	ctl = macb_readl(lp, NCR);
 2647 	macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
 2648 
 2649 	/* Disable MAC interrupts */
 2650 	macb_writel(lp, IDR, MACB_BIT(RCOMP)	|
 2651 			     MACB_BIT(RXUBR)	|
 2652 			     MACB_BIT(ISR_TUND)	|
 2653 			     MACB_BIT(ISR_RLE)	|
 2654 			     MACB_BIT(TCOMP)	|
 2655 			     MACB_BIT(ISR_ROVR) |
 2656 			     MACB_BIT(HRESP));
 2657 
 2658 	netif_stop_queue(dev);
 2659 
 2660 	dma_free_coherent(&lp->pdev->dev,
 2661 			  AT91ETHER_MAX_RX_DESCR *
 2662 			  sizeof(struct macb_dma_desc),
 2663 			  lp->rx_ring, lp->rx_ring_dma);
 2664 	lp->rx_ring = NULL;
 2665 
 2666 	dma_free_coherent(&lp->pdev->dev,
 2667 			  AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
 2668 			  lp->rx_buffers, lp->rx_buffers_dma);
 2669 	lp->rx_buffers = NULL;
 2670 
 2671 	return 0;
 2672 }
 2673 
 2674 /* Transmit packet */
 2675 static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
 2676 {
 2677 	struct macb *lp = netdev_priv(dev);
 2678 
 2679 	if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
 2680 		netif_stop_queue(dev);
 2681 
 2682 		/* Store packet information (to free when Tx completed) */
 2683 		lp->skb = skb;
 2684 		lp->skb_length = skb->len;
 2685 		lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
 2686 							DMA_TO_DEVICE);
 2687 
 2688 		/* Set address of the data in the Transmit Address register */
 2689 		macb_writel(lp, TAR, lp->skb_physaddr);
 2690 		/* Set length of the packet in the Transmit Control register */
 2691 		macb_writel(lp, TCR, skb->len);
 2692 
 2693 	} else {
 2694 		netdev_err(dev, "%s called, but device is busy!\n", __func__);
 2695 		return NETDEV_TX_BUSY;
 2696 	}
 2697 
 2698 	return NETDEV_TX_OK;
 2699 }
 2700 
 2701 /* Extract received frame from buffer descriptors and sent to upper layers.
 2702  * (Called from interrupt context)
 2703  */
 2704 static void at91ether_rx(struct net_device *dev)
 2705 {
 2706 	struct macb *lp = netdev_priv(dev);
 2707 	unsigned char *p_recv;
 2708 	struct sk_buff *skb;
 2709 	unsigned int pktlen;
 2710 
 2711 	while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
 2712 		p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
 2713 		pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
 2714 		skb = netdev_alloc_skb(dev, pktlen + 2);
 2715 		if (skb) {
 2716 			skb_reserve(skb, 2);
 2717 			memcpy(skb_put(skb, pktlen), p_recv, pktlen);
 2718 
 2719 			skb->protocol = eth_type_trans(skb, dev);
 2720 			lp->stats.rx_packets++;
 2721 			lp->stats.rx_bytes += pktlen;
 2722 			netif_rx(skb);
 2723 		} else {
 2724 			lp->stats.rx_dropped++;
 2725 		}
 2726 
 2727 		if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
 2728 			lp->stats.multicast++;
 2729 
 2730 		/* reset ownership bit */
 2731 		lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
 2732 
 2733 		/* wrap after last buffer */
 2734 		if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
 2735 			lp->rx_tail = 0;
 2736 		else
 2737 			lp->rx_tail++;
 2738 	}
 2739 }
 2740 
 2741 /* MAC interrupt handler */
 2742 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
 2743 {
 2744 	struct net_device *dev = dev_id;
 2745 	struct macb *lp = netdev_priv(dev);
 2746 	u32 intstatus, ctl;
 2747 
 2748 	/* MAC Interrupt Status register indicates what interrupts are pending.
 2749 	 * It is automatically cleared once read.
 2750 	 */
 2751 	intstatus = macb_readl(lp, ISR);
 2752 
 2753 	/* Receive complete */
 2754 	if (intstatus & MACB_BIT(RCOMP))
 2755 		at91ether_rx(dev);
 2756 
 2757 	/* Transmit complete */
 2758 	if (intstatus & MACB_BIT(TCOMP)) {
 2759 		/* The TCOM bit is set even if the transmission failed */
 2760 		if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
 2761 			lp->stats.tx_errors++;
 2762 
 2763 		if (lp->skb) {
 2764 			dev_kfree_skb_irq(lp->skb);
 2765 			lp->skb = NULL;
 2766 			dma_unmap_single(NULL, lp->skb_physaddr,
 2767 					 lp->skb_length, DMA_TO_DEVICE);
 2768 			lp->stats.tx_packets++;
 2769 			lp->stats.tx_bytes += lp->skb_length;
 2770 		}
 2771 		netif_wake_queue(dev);
 2772 	}
 2773 
 2774 	/* Work-around for EMAC Errata section 41.3.1 */
 2775 	if (intstatus & MACB_BIT(RXUBR)) {
 2776 		ctl = macb_readl(lp, NCR);
 2777 		macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
 2778 		macb_writel(lp, NCR, ctl | MACB_BIT(RE));
 2779 	}
 2780 
 2781 	if (intstatus & MACB_BIT(ISR_ROVR))
 2782 		netdev_err(dev, "ROVR error\n");
 2783 
 2784 	return IRQ_HANDLED;
 2785 }
 2786 
 2787 #ifdef CONFIG_NET_POLL_CONTROLLER
 2788 static void at91ether_poll_controller(struct net_device *dev)
 2789 {
 2790 	unsigned long flags;
 2791 
 2792 	local_irq_save(flags);
 2793 	at91ether_interrupt(dev->irq, dev);
 2794 	local_irq_restore(flags);
 2795 }
 2796 #endif
 2797 
 2798 static const struct net_device_ops at91ether_netdev_ops = {
 2799 	.ndo_open		= at91ether_open,
 2800 	.ndo_stop		= at91ether_close,
 2801 	.ndo_start_xmit		= at91ether_start_xmit,
 2802 	.ndo_get_stats		= macb_get_stats,
 2803 	.ndo_set_rx_mode	= macb_set_rx_mode,
 2804 	.ndo_set_mac_address	= eth_mac_addr,
 2805 	.ndo_do_ioctl		= macb_ioctl,
 2806 	.ndo_validate_addr	= eth_validate_addr,
 2807 	.ndo_change_mtu		= eth_change_mtu,
 2808 #ifdef CONFIG_NET_POLL_CONTROLLER
 2809 	.ndo_poll_controller	= at91ether_poll_controller,
 2810 #endif
 2811 };
 2812 
 2813 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
 2814 			      struct clk **hclk, struct clk **tx_clk,
 2815 			      struct clk **rx_clk)
 2816 {
 2817 	int err;
 2818 
 2819 	*hclk = NULL;
 2820 	*tx_clk = NULL;
 2821 	*rx_clk = NULL;
 2822 
 2823 	*pclk = devm_clk_get(&pdev->dev, "ether_clk");
 2824 	if (IS_ERR(*pclk))
 2825 		return PTR_ERR(*pclk);
 2826 
 2827 	err = clk_prepare_enable(*pclk);
 2828 	if (err) {
 2829 		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
 2830 		return err;
 2831 	}
 2832 
 2833 	return 0;
 2834 }
 2835 
 2836 static int at91ether_init(struct platform_device *pdev)
 2837 {
 2838 	struct net_device *dev = platform_get_drvdata(pdev);
 2839 	struct macb *bp = netdev_priv(dev);
 2840 	int err;
 2841 	u32 reg;
 2842 
 2843 	dev->netdev_ops = &at91ether_netdev_ops;
 2844 	dev->ethtool_ops = &macb_ethtool_ops;
 2845 
 2846 	err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
 2847 			       0, dev->name, dev);
 2848 	if (err)
 2849 		return err;
 2850 
 2851 	macb_writel(bp, NCR, 0);
 2852 
 2853 	reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
 2854 	if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
 2855 		reg |= MACB_BIT(RM9200_RMII);
 2856 
 2857 	macb_writel(bp, NCFGR, reg);
 2858 
 2859 	return 0;
 2860 }
 2861 
 2862 static const struct macb_config at91sam9260_config = {
 2863 	.caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
 2864 	.clk_init = macb_clk_init,
 2865 	.init = macb_init,
 2866 };
 2867 
 2868 static const struct macb_config pc302gem_config = {
 2869 	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
 2870 	.dma_burst_length = 16,
 2871 	.clk_init = macb_clk_init,
 2872 	.init = macb_init,
 2873 };
 2874 
 2875 static const struct macb_config sama5d2_config = {
 2876 	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
 2877 	.dma_burst_length = 16,
 2878 	.clk_init = macb_clk_init,
 2879 	.init = macb_init,
 2880 };
 2881 
 2882 static const struct macb_config sama5d3_config = {
 2883 	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
 2884 	      | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
 2885 	.dma_burst_length = 16,
 2886 	.clk_init = macb_clk_init,
 2887 	.init = macb_init,
 2888 };
 2889 
 2890 static const struct macb_config sama5d4_config = {
 2891 	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
 2892 	.dma_burst_length = 4,
 2893 	.clk_init = macb_clk_init,
 2894 	.init = macb_init,
 2895 };
 2896 
 2897 static const struct macb_config emac_config = {
 2898 	.clk_init = at91ether_clk_init,
 2899 	.init = at91ether_init,
 2900 };
 2901 
 2902 static const struct macb_config np4_config = {
 2903 	.caps = MACB_CAPS_USRIO_DISABLED,
 2904 	.clk_init = macb_clk_init,
 2905 	.init = macb_init,
 2906 };
 2907 
 2908 static const struct macb_config zynqmp_config = {
 2909 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
 2910 	.dma_burst_length = 16,
 2911 	.clk_init = macb_clk_init,
 2912 	.init = macb_init,
 2913 	.jumbo_max_len = 10240,
 2914 };
 2915 
 2916 static const struct macb_config zynq_config = {
 2917 	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
 2918 	.dma_burst_length = 16,
 2919 	.clk_init = macb_clk_init,
 2920 	.init = macb_init,
 2921 };
 2922 
 2923 static const struct of_device_id macb_dt_ids[] = {
 2924 	{ .compatible = "cdns,at32ap7000-macb" },
 2925 	{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
 2926 	{ .compatible = "cdns,macb" },
 2927 	{ .compatible = "cdns,np4-macb", .data = &np4_config },
 2928 	{ .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
 2929 	{ .compatible = "cdns,gem", .data = &pc302gem_config },
 2930 	{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
 2931 	{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
 2932 	{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
 2933 	{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
 2934 	{ .compatible = "cdns,emac", .data = &emac_config },
 2935 	{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
 2936 	{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
 2937 	{ /* sentinel */ }
 2938 };
 2939 MODULE_DEVICE_TABLE(of, macb_dt_ids);
 2940 #endif /* CONFIG_OF */
 2941 
 2942 static int macb_probe(struct platform_device *pdev)
 2943 {
 2944 	int (*clk_init)(struct platform_device *, struct clk **,
 2945 			struct clk **, struct clk **,  struct clk **)
 2946 					      = macb_clk_init;
 2947 	int (*init)(struct platform_device *) = macb_init;
 2948 	struct device_node *np = pdev->dev.of_node;
 2949 	struct device_node *phy_node;
 2950 	const struct macb_config *macb_config = NULL;
 2951 	struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
 2952 	unsigned int queue_mask, num_queues;
 2953 	struct macb_platform_data *pdata;
 2954 	bool native_io;
 2955 	struct phy_device *phydev;
 2956 	struct net_device *dev;
 2957 	struct resource *regs;
 2958 	void __iomem *mem;
 2959 	const char *mac;
 2960 	struct macb *bp;
 2961 	int err;
 2962 
 2963 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 2964 	mem = devm_ioremap_resource(&pdev->dev, regs);
 2965 	if (IS_ERR(mem))
 2966 		return PTR_ERR(mem);
 2967 
 2968 	if (np) {
 2969 		const struct of_device_id *match;
 2970 
 2971 		match = of_match_node(macb_dt_ids, np);
 2972 		if (match && match->data) {
 2973 			macb_config = match->data;
 2974 			clk_init = macb_config->clk_init;
 2975 			init = macb_config->init;
 2976 		}
 2977 	}
 2978 
 2979 	err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
 2980 	if (err)
 2981 		return err;
 2982 
 2983 	native_io = hw_is_native_io(mem);
 2984 
 2985 	macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
 2986 	dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
 2987 	if (!dev) {
 2988 		err = -ENOMEM;
 2989 		goto err_disable_clocks;
 2990 	}
 2991 
 2992 	dev->base_addr = regs->start;
 2993 
 2994 	SET_NETDEV_DEV(dev, &pdev->dev);
 2995 
 2996 	bp = netdev_priv(dev);
 2997 	bp->pdev = pdev;
 2998 	bp->dev = dev;
 2999 	bp->regs = mem;
 3000 	bp->native_io = native_io;
 3001 	if (native_io) {
 3002 		bp->macb_reg_readl = hw_readl_native;
 3003 		bp->macb_reg_writel = hw_writel_native;
 3004 	} else {
 3005 		bp->macb_reg_readl = hw_readl;
 3006 		bp->macb_reg_writel = hw_writel;
 3007 	}
 3008 	bp->num_queues = num_queues;
 3009 	bp->queue_mask = queue_mask;
 3010 	if (macb_config)
 3011 		bp->dma_burst_length = macb_config->dma_burst_length;
 3012 	bp->pclk = pclk;
 3013 	bp->hclk = hclk;
 3014 	bp->tx_clk = tx_clk;
 3015 	bp->rx_clk = rx_clk;
 3016 	if (macb_config)
 3017 		bp->jumbo_max_len = macb_config->jumbo_max_len;
 3018 
 3019 	bp->wol = 0;
 3020 	if (of_get_property(np, "magic-packet", NULL))
 3021 		bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
 3022 	device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
 3023 
 3024 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3025 	if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32)
 3026 		dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
 3027 #endif
 3028 
 3029 	spin_lock_init(&bp->lock);
 3030 
 3031 	/* setup capabilities */
 3032 	macb_configure_caps(bp, macb_config);
 3033 
 3034 	platform_set_drvdata(pdev, dev);
 3035 
 3036 	dev->irq = platform_get_irq(pdev, 0);
 3037 	if (dev->irq < 0) {
 3038 		err = dev->irq;
 3039 		goto err_out_free_netdev;
 3040 	}
 3041 
 3042 	mac = of_get_mac_address(np);
 3043 	if (mac)
 3044 		ether_addr_copy(bp->dev->dev_addr, mac);
 3045 	else
 3046 		macb_get_hwaddr(bp);
 3047 
 3048 	/* Power up the PHY if there is a GPIO reset */
 3049 	phy_node =  of_get_next_available_child(np, NULL);
 3050 	if (phy_node) {
 3051 		int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
 3052 
 3053 		if (gpio_is_valid(gpio)) {
 3054 			bp->reset_gpio = gpio_to_desc(gpio);
 3055 			gpiod_direction_output(bp->reset_gpio, 1);
 3056 		}
 3057 	}
 3058 	of_node_put(phy_node);
 3059 
 3060 	err = of_get_phy_mode(np);
 3061 	if (err < 0) {
 3062 		pdata = dev_get_platdata(&pdev->dev);
 3063 		if (pdata && pdata->is_rmii)
 3064 			bp->phy_interface = PHY_INTERFACE_MODE_RMII;
 3065 		else
 3066 			bp->phy_interface = PHY_INTERFACE_MODE_MII;
 3067 	} else {
 3068 		bp->phy_interface = err;
 3069 	}
 3070 
 3071 	/* IP specific init */
 3072 	err = init(pdev);
 3073 	if (err)
 3074 		goto err_out_free_netdev;
 3075 
 3076 	err = macb_mii_init(bp);
 3077 	if (err)
 3078 		goto err_out_free_netdev;
 3079 
 3080 	phydev = dev->phydev;
 3081 
 3082 	netif_carrier_off(dev);
 3083 
 3084 	err = register_netdev(dev);
 3085 	if (err) {
 3086 		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
 3087 		goto err_out_unregister_mdio;
 3088 	}
 3089 
 3090 	phy_attached_info(phydev);
 3091 
 3092 	netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
 3093 		    macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
 3094 		    dev->base_addr, dev->irq, dev->dev_addr);
 3095 
 3096 	return 0;
 3097 
 3098 err_out_unregister_mdio:
 3099 	phy_disconnect(dev->phydev);
 3100 	mdiobus_unregister(bp->mii_bus);
 3101 	mdiobus_free(bp->mii_bus);
 3102 
 3103 	/* Shutdown the PHY if there is a GPIO reset */
 3104 	if (bp->reset_gpio)
 3105 		gpiod_set_value(bp->reset_gpio, 0);
 3106 
 3107 err_out_free_netdev:
 3108 	free_netdev(dev);
 3109 
 3110 err_disable_clocks:
 3111 	clk_disable_unprepare(tx_clk);
 3112 	clk_disable_unprepare(hclk);
 3113 	clk_disable_unprepare(pclk);
 3114 	clk_disable_unprepare(rx_clk);
 3115 
 3116 	return err;
 3117 }
 3118 
 3119 static int macb_remove(struct platform_device *pdev)
 3120 {
 3121 	struct net_device *dev;
 3122 	struct macb *bp;
 3123 
 3124 	dev = platform_get_drvdata(pdev);
 3125 
 3126 	if (dev) {
 3127 		bp = netdev_priv(dev);
 3128 		if (dev->phydev)
 3129 			phy_disconnect(dev->phydev);
 3130 		mdiobus_unregister(bp->mii_bus);
 3131 		dev->phydev = NULL;
 3132 		mdiobus_free(bp->mii_bus);
 3133 
 3134 		/* Shutdown the PHY if there is a GPIO reset */
 3135 		if (bp->reset_gpio)
 3136 			gpiod_set_value(bp->reset_gpio, 0);
 3137 
 3138 		unregister_netdev(dev);
 3139 		clk_disable_unprepare(bp->tx_clk);
 3140 		clk_disable_unprepare(bp->hclk);
 3141 		clk_disable_unprepare(bp->pclk);
 3142 		clk_disable_unprepare(bp->rx_clk);
 3143 		free_netdev(dev);
 3144 	}
 3145 
 3146 	return 0;
 3147 }
 3148 
 3149 static int __maybe_unused macb_suspend(struct device *dev)
 3150 {
 3151 	struct platform_device *pdev = to_platform_device(dev);
 3152 	struct net_device *netdev = platform_get_drvdata(pdev);
 3153 	struct macb *bp = netdev_priv(netdev);
 3154 
 3155 	netif_carrier_off(netdev);
 3156 	netif_device_detach(netdev);
 3157 
 3158 	if (bp->wol & MACB_WOL_ENABLED) {
 3159 		macb_writel(bp, IER, MACB_BIT(WOL));
 3160 		macb_writel(bp, WOL, MACB_BIT(MAG));
 3161 		enable_irq_wake(bp->queues[0].irq);
 3162 	} else {
 3163 		clk_disable_unprepare(bp->tx_clk);
 3164 		clk_disable_unprepare(bp->hclk);
 3165 		clk_disable_unprepare(bp->pclk);
 3166 		clk_disable_unprepare(bp->rx_clk);
 3167 	}
 3168 
 3169 	return 0;
 3170 }
 3171 
 3172 static int __maybe_unused macb_resume(struct device *dev)
 3173 {
 3174 	struct platform_device *pdev = to_platform_device(dev);
 3175 	struct net_device *netdev = platform_get_drvdata(pdev);
 3176 	struct macb *bp = netdev_priv(netdev);
 3177 
 3178 	if (bp->wol & MACB_WOL_ENABLED) {
 3179 		macb_writel(bp, IDR, MACB_BIT(WOL));
 3180 		macb_writel(bp, WOL, 0);
 3181 		disable_irq_wake(bp->queues[0].irq);
 3182 	} else {
 3183 		clk_prepare_enable(bp->pclk);
 3184 		clk_prepare_enable(bp->hclk);
 3185 		clk_prepare_enable(bp->tx_clk);
 3186 		clk_prepare_enable(bp->rx_clk);
 3187 	}
 3188 
 3189 	netif_device_attach(netdev);
 3190 
 3191 	return 0;
 3192 }
 3193 
 3194 static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
 3195 
 3196 static struct platform_driver macb_driver = {
 3197 	.probe		= macb_probe,
 3198 	.remove		= macb_remove,
 3199 	.driver		= {
 3200 		.name		= "macb",
 3201 		.of_match_table	= of_match_ptr(macb_dt_ids),
 3202 		.pm	= &macb_pm_ops,
 3203 	},
 3204 };
 3205 
 3206 module_platform_driver(macb_driver);
 3207 
 3208 MODULE_LICENSE("GPL");
 3209 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
 3210 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
 3211 MODULE_ALIAS("platform:macb");
 3212 
 3213 
 3214 
 3215 
 3216 
 3217 /* LDV_COMMENT_BEGIN_MAIN */
 3218 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
 3219 
 3220 /*###########################################################################*/
 3221 
 3222 /*############## Driver Environment Generator 0.2 output ####################*/
 3223 
 3224 /*###########################################################################*/
 3225 
 3226 
 3227 
 3228 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
 3229 void ldv_check_final_state(void);
 3230 
 3231 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
 3232 void ldv_check_return_value(int res);
 3233 
 3234 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
 3235 void ldv_check_return_value_probe(int res);
 3236 
 3237 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
 3238 void ldv_initialize(void);
 3239 
 3240 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
 3241 void ldv_handler_precall(void);
 3242 
 3243 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
 3244 int nondet_int(void);
 3245 
 3246 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
 3247 int LDV_IN_INTERRUPT;
 3248 
 3249 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
 3250 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
 3251 
 3252 
 3253 
 3254 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
 3255 	/*============================= VARIABLE DECLARATION PART   =============================*/
 3256 	/** STRUCT: struct type: ethtool_ops, struct name: macb_ethtool_ops **/
 3257 	/* content: static int macb_get_regs_len(struct net_device *netdev)*/
 3258 	/* LDV_COMMENT_BEGIN_PREP */
 3259 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3260 	#define MACB_RX_BUFFER_SIZE	128
 3261 	#define RX_BUFFER_MULTIPLE	64  
 3262 	#define RX_RING_SIZE		512 
 3263 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3264 	#define TX_RING_SIZE		128 
 3265 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3266 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3267 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3268 				 | MACB_BIT(ISR_ROVR))
 3269 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3270 					| MACB_BIT(ISR_RLE)		\
 3271 					| MACB_BIT(TXERR))
 3272 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3273 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3274 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3275 	#define GEM_MTU_MIN_SIZE	68
 3276 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3277 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3278 	#define MACB_HALT_TIMEOUT	1230
 3279 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3280 	#endif
 3281 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3282 	#endif
 3283 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3284 	#endif
 3285 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3286 	#endif
 3287 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3288 	#endif
 3289 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3290 	#endif
 3291 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3292 	#endif
 3293 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3294 	#endif
 3295 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3296 	#endif
 3297 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3298 	#endif
 3299 	/* LDV_COMMENT_END_PREP */
 3300 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_get_regs_len" */
 3301 	struct net_device * var_group1;
 3302 	/* LDV_COMMENT_BEGIN_PREP */
 3303 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3304 	#endif
 3305 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3306 	#endif
 3307 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3308 	#endif
 3309 	#if defined(CONFIG_OF)
 3310 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3311 	#define AT91ETHER_MAX_RX_DESCR	9
 3312 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3313 	#endif
 3314 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3315 	#endif
 3316 	#endif 
 3317 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3318 	#endif
 3319 	/* LDV_COMMENT_END_PREP */
 3320 	/* content: static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)*/
 3321 	/* LDV_COMMENT_BEGIN_PREP */
 3322 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3323 	#define MACB_RX_BUFFER_SIZE	128
 3324 	#define RX_BUFFER_MULTIPLE	64  
 3325 	#define RX_RING_SIZE		512 
 3326 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3327 	#define TX_RING_SIZE		128 
 3328 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3329 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3330 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3331 				 | MACB_BIT(ISR_ROVR))
 3332 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3333 					| MACB_BIT(ISR_RLE)		\
 3334 					| MACB_BIT(TXERR))
 3335 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3336 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3337 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3338 	#define GEM_MTU_MIN_SIZE	68
 3339 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3340 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3341 	#define MACB_HALT_TIMEOUT	1230
 3342 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3343 	#endif
 3344 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3345 	#endif
 3346 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3347 	#endif
 3348 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3349 	#endif
 3350 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3351 	#endif
 3352 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3353 	#endif
 3354 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3355 	#endif
 3356 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3357 	#endif
 3358 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3359 	#endif
 3360 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3361 	#endif
 3362 	/* LDV_COMMENT_END_PREP */
 3363 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_get_regs" */
 3364 	struct ethtool_regs * var_group2;
 3365 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_get_regs" */
 3366 	void * var_macb_get_regs_68_p2;
 3367 	/* LDV_COMMENT_BEGIN_PREP */
 3368 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3369 	#endif
 3370 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3371 	#endif
 3372 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3373 	#endif
 3374 	#if defined(CONFIG_OF)
 3375 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3376 	#define AT91ETHER_MAX_RX_DESCR	9
 3377 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3378 	#endif
 3379 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3380 	#endif
 3381 	#endif 
 3382 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3383 	#endif
 3384 	/* LDV_COMMENT_END_PREP */
 3385 	/* content: static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)*/
 3386 	/* LDV_COMMENT_BEGIN_PREP */
 3387 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3388 	#define MACB_RX_BUFFER_SIZE	128
 3389 	#define RX_BUFFER_MULTIPLE	64  
 3390 	#define RX_RING_SIZE		512 
 3391 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3392 	#define TX_RING_SIZE		128 
 3393 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3394 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3395 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3396 				 | MACB_BIT(ISR_ROVR))
 3397 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3398 					| MACB_BIT(ISR_RLE)		\
 3399 					| MACB_BIT(TXERR))
 3400 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3401 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3402 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3403 	#define GEM_MTU_MIN_SIZE	68
 3404 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3405 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3406 	#define MACB_HALT_TIMEOUT	1230
 3407 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3408 	#endif
 3409 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3410 	#endif
 3411 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3412 	#endif
 3413 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3414 	#endif
 3415 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3416 	#endif
 3417 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3418 	#endif
 3419 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3420 	#endif
 3421 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3422 	#endif
 3423 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3424 	#endif
 3425 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3426 	#endif
 3427 	/* LDV_COMMENT_END_PREP */
 3428 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_get_wol" */
 3429 	struct ethtool_wolinfo * var_group3;
 3430 	/* LDV_COMMENT_BEGIN_PREP */
 3431 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3432 	#endif
 3433 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3434 	#endif
 3435 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3436 	#endif
 3437 	#if defined(CONFIG_OF)
 3438 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3439 	#define AT91ETHER_MAX_RX_DESCR	9
 3440 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3441 	#endif
 3442 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3443 	#endif
 3444 	#endif 
 3445 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3446 	#endif
 3447 	/* LDV_COMMENT_END_PREP */
 3448 	/* content: static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)*/
 3449 	/* LDV_COMMENT_BEGIN_PREP */
 3450 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3451 	#define MACB_RX_BUFFER_SIZE	128
 3452 	#define RX_BUFFER_MULTIPLE	64  
 3453 	#define RX_RING_SIZE		512 
 3454 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3455 	#define TX_RING_SIZE		128 
 3456 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3457 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3458 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3459 				 | MACB_BIT(ISR_ROVR))
 3460 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3461 					| MACB_BIT(ISR_RLE)		\
 3462 					| MACB_BIT(TXERR))
 3463 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3464 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3465 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3466 	#define GEM_MTU_MIN_SIZE	68
 3467 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3468 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3469 	#define MACB_HALT_TIMEOUT	1230
 3470 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3471 	#endif
 3472 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3473 	#endif
 3474 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3475 	#endif
 3476 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3477 	#endif
 3478 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3479 	#endif
 3480 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3481 	#endif
 3482 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3483 	#endif
 3484 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3485 	#endif
 3486 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3487 	#endif
 3488 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3489 	#endif
 3490 	/* LDV_COMMENT_END_PREP */
 3491 	/* LDV_COMMENT_BEGIN_PREP */
 3492 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3493 	#endif
 3494 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3495 	#endif
 3496 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3497 	#endif
 3498 	#if defined(CONFIG_OF)
 3499 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3500 	#define AT91ETHER_MAX_RX_DESCR	9
 3501 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3502 	#endif
 3503 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3504 	#endif
 3505 	#endif 
 3506 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3507 	#endif
 3508 	/* LDV_COMMENT_END_PREP */
 3509 
 3510 	/** STRUCT: struct type: ethtool_ops, struct name: gem_ethtool_ops **/
 3511 	/* content: static int macb_get_regs_len(struct net_device *netdev)*/
 3512 	/* LDV_COMMENT_BEGIN_PREP */
 3513 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3514 	#define MACB_RX_BUFFER_SIZE	128
 3515 	#define RX_BUFFER_MULTIPLE	64  
 3516 	#define RX_RING_SIZE		512 
 3517 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3518 	#define TX_RING_SIZE		128 
 3519 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3520 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3521 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3522 				 | MACB_BIT(ISR_ROVR))
 3523 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3524 					| MACB_BIT(ISR_RLE)		\
 3525 					| MACB_BIT(TXERR))
 3526 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3527 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3528 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3529 	#define GEM_MTU_MIN_SIZE	68
 3530 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3531 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3532 	#define MACB_HALT_TIMEOUT	1230
 3533 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3534 	#endif
 3535 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3536 	#endif
 3537 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3538 	#endif
 3539 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3540 	#endif
 3541 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3542 	#endif
 3543 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3544 	#endif
 3545 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3546 	#endif
 3547 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3548 	#endif
 3549 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3550 	#endif
 3551 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3552 	#endif
 3553 	/* LDV_COMMENT_END_PREP */
 3554 	/* LDV_COMMENT_BEGIN_PREP */
 3555 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3556 	#endif
 3557 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3558 	#endif
 3559 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3560 	#endif
 3561 	#if defined(CONFIG_OF)
 3562 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3563 	#define AT91ETHER_MAX_RX_DESCR	9
 3564 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3565 	#endif
 3566 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3567 	#endif
 3568 	#endif 
 3569 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3570 	#endif
 3571 	/* LDV_COMMENT_END_PREP */
 3572 	/* content: static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)*/
 3573 	/* LDV_COMMENT_BEGIN_PREP */
 3574 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3575 	#define MACB_RX_BUFFER_SIZE	128
 3576 	#define RX_BUFFER_MULTIPLE	64  
 3577 	#define RX_RING_SIZE		512 
 3578 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3579 	#define TX_RING_SIZE		128 
 3580 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3581 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3582 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3583 				 | MACB_BIT(ISR_ROVR))
 3584 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3585 					| MACB_BIT(ISR_RLE)		\
 3586 					| MACB_BIT(TXERR))
 3587 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3588 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3589 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3590 	#define GEM_MTU_MIN_SIZE	68
 3591 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3592 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3593 	#define MACB_HALT_TIMEOUT	1230
 3594 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3595 	#endif
 3596 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3597 	#endif
 3598 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3599 	#endif
 3600 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3601 	#endif
 3602 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3603 	#endif
 3604 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3605 	#endif
 3606 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3607 	#endif
 3608 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3609 	#endif
 3610 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3611 	#endif
 3612 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3613 	#endif
 3614 	/* LDV_COMMENT_END_PREP */
 3615 	/* LDV_COMMENT_BEGIN_PREP */
 3616 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3617 	#endif
 3618 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3619 	#endif
 3620 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3621 	#endif
 3622 	#if defined(CONFIG_OF)
 3623 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3624 	#define AT91ETHER_MAX_RX_DESCR	9
 3625 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3626 	#endif
 3627 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3628 	#endif
 3629 	#endif 
 3630 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3631 	#endif
 3632 	/* LDV_COMMENT_END_PREP */
 3633 	/* content: static void gem_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)*/
 3634 	/* LDV_COMMENT_BEGIN_PREP */
 3635 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3636 	#define MACB_RX_BUFFER_SIZE	128
 3637 	#define RX_BUFFER_MULTIPLE	64  
 3638 	#define RX_RING_SIZE		512 
 3639 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3640 	#define TX_RING_SIZE		128 
 3641 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3642 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3643 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3644 				 | MACB_BIT(ISR_ROVR))
 3645 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3646 					| MACB_BIT(ISR_RLE)		\
 3647 					| MACB_BIT(TXERR))
 3648 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3649 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3650 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3651 	#define GEM_MTU_MIN_SIZE	68
 3652 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3653 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3654 	#define MACB_HALT_TIMEOUT	1230
 3655 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3656 	#endif
 3657 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3658 	#endif
 3659 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3660 	#endif
 3661 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3662 	#endif
 3663 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3664 	#endif
 3665 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3666 	#endif
 3667 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3668 	#endif
 3669 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3670 	#endif
 3671 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3672 	#endif
 3673 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3674 	#endif
 3675 	/* LDV_COMMENT_END_PREP */
 3676 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "gem_get_ethtool_stats" */
 3677 	struct ethtool_stats * var_group4;
 3678 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "gem_get_ethtool_stats" */
 3679 	u64 * var_gem_get_ethtool_stats_63_p2;
 3680 	/* LDV_COMMENT_BEGIN_PREP */
 3681 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3682 	#endif
 3683 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3684 	#endif
 3685 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3686 	#endif
 3687 	#if defined(CONFIG_OF)
 3688 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3689 	#define AT91ETHER_MAX_RX_DESCR	9
 3690 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3691 	#endif
 3692 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3693 	#endif
 3694 	#endif 
 3695 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3696 	#endif
 3697 	/* LDV_COMMENT_END_PREP */
 3698 	/* content: static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)*/
 3699 	/* LDV_COMMENT_BEGIN_PREP */
 3700 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3701 	#define MACB_RX_BUFFER_SIZE	128
 3702 	#define RX_BUFFER_MULTIPLE	64  
 3703 	#define RX_RING_SIZE		512 
 3704 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3705 	#define TX_RING_SIZE		128 
 3706 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3707 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3708 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3709 				 | MACB_BIT(ISR_ROVR))
 3710 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3711 					| MACB_BIT(ISR_RLE)		\
 3712 					| MACB_BIT(TXERR))
 3713 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3714 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3715 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3716 	#define GEM_MTU_MIN_SIZE	68
 3717 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3718 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3719 	#define MACB_HALT_TIMEOUT	1230
 3720 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3721 	#endif
 3722 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3723 	#endif
 3724 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3725 	#endif
 3726 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3727 	#endif
 3728 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3729 	#endif
 3730 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3731 	#endif
 3732 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3733 	#endif
 3734 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3735 	#endif
 3736 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3737 	#endif
 3738 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3739 	#endif
 3740 	/* LDV_COMMENT_END_PREP */
 3741 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "gem_get_ethtool_strings" */
 3742 	u32  var_gem_get_ethtool_strings_65_p1;
 3743 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "gem_get_ethtool_strings" */
 3744 	u8 * var_gem_get_ethtool_strings_65_p2;
 3745 	/* LDV_COMMENT_BEGIN_PREP */
 3746 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3747 	#endif
 3748 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3749 	#endif
 3750 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3751 	#endif
 3752 	#if defined(CONFIG_OF)
 3753 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3754 	#define AT91ETHER_MAX_RX_DESCR	9
 3755 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3756 	#endif
 3757 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3758 	#endif
 3759 	#endif 
 3760 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3761 	#endif
 3762 	/* LDV_COMMENT_END_PREP */
 3763 	/* content: static int gem_get_sset_count(struct net_device *dev, int sset)*/
 3764 	/* LDV_COMMENT_BEGIN_PREP */
 3765 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3766 	#define MACB_RX_BUFFER_SIZE	128
 3767 	#define RX_BUFFER_MULTIPLE	64  
 3768 	#define RX_RING_SIZE		512 
 3769 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3770 	#define TX_RING_SIZE		128 
 3771 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3772 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3773 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3774 				 | MACB_BIT(ISR_ROVR))
 3775 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3776 					| MACB_BIT(ISR_RLE)		\
 3777 					| MACB_BIT(TXERR))
 3778 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3779 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3780 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3781 	#define GEM_MTU_MIN_SIZE	68
 3782 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3783 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3784 	#define MACB_HALT_TIMEOUT	1230
 3785 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3786 	#endif
 3787 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3788 	#endif
 3789 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3790 	#endif
 3791 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3792 	#endif
 3793 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3794 	#endif
 3795 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3796 	#endif
 3797 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3798 	#endif
 3799 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3800 	#endif
 3801 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3802 	#endif
 3803 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3804 	#endif
 3805 	/* LDV_COMMENT_END_PREP */
 3806 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "gem_get_sset_count" */
 3807 	int  var_gem_get_sset_count_64_p1;
 3808 	/* LDV_COMMENT_BEGIN_PREP */
 3809 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3810 	#endif
 3811 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3812 	#endif
 3813 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3814 	#endif
 3815 	#if defined(CONFIG_OF)
 3816 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3817 	#define AT91ETHER_MAX_RX_DESCR	9
 3818 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3819 	#endif
 3820 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3821 	#endif
 3822 	#endif 
 3823 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3824 	#endif
 3825 	/* LDV_COMMENT_END_PREP */
 3826 
 3827 	/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 3828 	/* content: static int macb_open(struct net_device *dev)*/
 3829 	/* LDV_COMMENT_BEGIN_PREP */
 3830 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3831 	#define MACB_RX_BUFFER_SIZE	128
 3832 	#define RX_BUFFER_MULTIPLE	64  
 3833 	#define RX_RING_SIZE		512 
 3834 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3835 	#define TX_RING_SIZE		128 
 3836 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3837 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3838 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3839 				 | MACB_BIT(ISR_ROVR))
 3840 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3841 					| MACB_BIT(ISR_RLE)		\
 3842 					| MACB_BIT(TXERR))
 3843 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3844 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3845 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3846 	#define GEM_MTU_MIN_SIZE	68
 3847 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3848 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3849 	#define MACB_HALT_TIMEOUT	1230
 3850 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3851 	#endif
 3852 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3853 	#endif
 3854 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3855 	#endif
 3856 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3857 	#endif
 3858 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3859 	#endif
 3860 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3861 	#endif
 3862 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3863 	#endif
 3864 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3865 	#endif
 3866 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3867 	#endif
 3868 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3869 	#endif
 3870 	/* LDV_COMMENT_END_PREP */
 3871 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "macb_open" */
 3872 	static int res_macb_open_58;
 3873 	/* LDV_COMMENT_BEGIN_PREP */
 3874 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3875 	#endif
 3876 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3877 	#endif
 3878 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3879 	#endif
 3880 	#if defined(CONFIG_OF)
 3881 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3882 	#define AT91ETHER_MAX_RX_DESCR	9
 3883 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3884 	#endif
 3885 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3886 	#endif
 3887 	#endif 
 3888 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3889 	#endif
 3890 	/* LDV_COMMENT_END_PREP */
 3891 	/* content: static int macb_close(struct net_device *dev)*/
 3892 	/* LDV_COMMENT_BEGIN_PREP */
 3893 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3894 	#define MACB_RX_BUFFER_SIZE	128
 3895 	#define RX_BUFFER_MULTIPLE	64  
 3896 	#define RX_RING_SIZE		512 
 3897 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3898 	#define TX_RING_SIZE		128 
 3899 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3900 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3901 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3902 				 | MACB_BIT(ISR_ROVR))
 3903 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3904 					| MACB_BIT(ISR_RLE)		\
 3905 					| MACB_BIT(TXERR))
 3906 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3907 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3908 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3909 	#define GEM_MTU_MIN_SIZE	68
 3910 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3911 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3912 	#define MACB_HALT_TIMEOUT	1230
 3913 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3914 	#endif
 3915 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3916 	#endif
 3917 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3918 	#endif
 3919 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3920 	#endif
 3921 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3922 	#endif
 3923 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3924 	#endif
 3925 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3926 	#endif
 3927 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3928 	#endif
 3929 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3930 	#endif
 3931 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3932 	#endif
 3933 	/* LDV_COMMENT_END_PREP */
 3934 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "macb_close" */
 3935 	static int res_macb_close_59;
 3936 	/* LDV_COMMENT_BEGIN_PREP */
 3937 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3938 	#endif
 3939 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3940 	#endif
 3941 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3942 	#endif
 3943 	#if defined(CONFIG_OF)
 3944 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 3945 	#define AT91ETHER_MAX_RX_DESCR	9
 3946 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3947 	#endif
 3948 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3949 	#endif
 3950 	#endif 
 3951 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3952 	#endif
 3953 	/* LDV_COMMENT_END_PREP */
 3954 	/* content: static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)*/
 3955 	/* LDV_COMMENT_BEGIN_PREP */
 3956 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 3957 	#define MACB_RX_BUFFER_SIZE	128
 3958 	#define RX_BUFFER_MULTIPLE	64  
 3959 	#define RX_RING_SIZE		512 
 3960 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 3961 	#define TX_RING_SIZE		128 
 3962 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 3963 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 3964 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 3965 				 | MACB_BIT(ISR_ROVR))
 3966 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 3967 					| MACB_BIT(ISR_RLE)		\
 3968 					| MACB_BIT(TXERR))
 3969 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 3970 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 3971 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 3972 	#define GEM_MTU_MIN_SIZE	68
 3973 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 3974 	#define MACB_WOL_ENABLED		(0x1 << 1)
 3975 	#define MACB_HALT_TIMEOUT	1230
 3976 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3977 	#endif
 3978 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3979 	#endif
 3980 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3981 	#endif
 3982 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 3983 	#endif
 3984 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3985 	#endif
 3986 	/* LDV_COMMENT_END_PREP */
 3987 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_start_xmit" */
 3988 	struct sk_buff * var_group5;
 3989 	/* LDV_COMMENT_BEGIN_PREP */
 3990 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3991 	#endif
 3992 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3993 	#endif
 3994 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3995 	#endif
 3996 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 3997 	#endif
 3998 	#ifdef CONFIG_NET_POLL_CONTROLLER
 3999 	#endif
 4000 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4001 	#endif
 4002 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4003 	#endif
 4004 	#if defined(CONFIG_OF)
 4005 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4006 	#define AT91ETHER_MAX_RX_DESCR	9
 4007 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4008 	#endif
 4009 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4010 	#endif
 4011 	#endif 
 4012 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4013 	#endif
 4014 	/* LDV_COMMENT_END_PREP */
 4015 	/* content: static void macb_set_rx_mode(struct net_device *dev)*/
 4016 	/* LDV_COMMENT_BEGIN_PREP */
 4017 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4018 	#define MACB_RX_BUFFER_SIZE	128
 4019 	#define RX_BUFFER_MULTIPLE	64  
 4020 	#define RX_RING_SIZE		512 
 4021 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4022 	#define TX_RING_SIZE		128 
 4023 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4024 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4025 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4026 				 | MACB_BIT(ISR_ROVR))
 4027 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4028 					| MACB_BIT(ISR_RLE)		\
 4029 					| MACB_BIT(TXERR))
 4030 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4031 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4032 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4033 	#define GEM_MTU_MIN_SIZE	68
 4034 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4035 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4036 	#define MACB_HALT_TIMEOUT	1230
 4037 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4038 	#endif
 4039 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4040 	#endif
 4041 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4042 	#endif
 4043 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4044 	#endif
 4045 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4046 	#endif
 4047 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4048 	#endif
 4049 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4050 	#endif
 4051 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4052 	#endif
 4053 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4054 	#endif
 4055 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4056 	#endif
 4057 	/* LDV_COMMENT_END_PREP */
 4058 	/* LDV_COMMENT_BEGIN_PREP */
 4059 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4060 	#endif
 4061 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4062 	#endif
 4063 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4064 	#endif
 4065 	#if defined(CONFIG_OF)
 4066 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4067 	#define AT91ETHER_MAX_RX_DESCR	9
 4068 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4069 	#endif
 4070 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4071 	#endif
 4072 	#endif 
 4073 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4074 	#endif
 4075 	/* LDV_COMMENT_END_PREP */
 4076 	/* content: static struct net_device_stats *macb_get_stats(struct net_device *dev)*/
 4077 	/* LDV_COMMENT_BEGIN_PREP */
 4078 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4079 	#define MACB_RX_BUFFER_SIZE	128
 4080 	#define RX_BUFFER_MULTIPLE	64  
 4081 	#define RX_RING_SIZE		512 
 4082 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4083 	#define TX_RING_SIZE		128 
 4084 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4085 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4086 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4087 				 | MACB_BIT(ISR_ROVR))
 4088 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4089 					| MACB_BIT(ISR_RLE)		\
 4090 					| MACB_BIT(TXERR))
 4091 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4092 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4093 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4094 	#define GEM_MTU_MIN_SIZE	68
 4095 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4096 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4097 	#define MACB_HALT_TIMEOUT	1230
 4098 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4099 	#endif
 4100 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4101 	#endif
 4102 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4103 	#endif
 4104 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4105 	#endif
 4106 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4107 	#endif
 4108 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4109 	#endif
 4110 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4111 	#endif
 4112 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4113 	#endif
 4114 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4115 	#endif
 4116 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4117 	#endif
 4118 	/* LDV_COMMENT_END_PREP */
 4119 	/* LDV_COMMENT_BEGIN_PREP */
 4120 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4121 	#endif
 4122 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4123 	#endif
 4124 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4125 	#endif
 4126 	#if defined(CONFIG_OF)
 4127 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4128 	#define AT91ETHER_MAX_RX_DESCR	9
 4129 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4130 	#endif
 4131 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4132 	#endif
 4133 	#endif 
 4134 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4135 	#endif
 4136 	/* LDV_COMMENT_END_PREP */
 4137 	/* content: static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/
 4138 	/* LDV_COMMENT_BEGIN_PREP */
 4139 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4140 	#define MACB_RX_BUFFER_SIZE	128
 4141 	#define RX_BUFFER_MULTIPLE	64  
 4142 	#define RX_RING_SIZE		512 
 4143 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4144 	#define TX_RING_SIZE		128 
 4145 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4146 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4147 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4148 				 | MACB_BIT(ISR_ROVR))
 4149 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4150 					| MACB_BIT(ISR_RLE)		\
 4151 					| MACB_BIT(TXERR))
 4152 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4153 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4154 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4155 	#define GEM_MTU_MIN_SIZE	68
 4156 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4157 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4158 	#define MACB_HALT_TIMEOUT	1230
 4159 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4160 	#endif
 4161 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4162 	#endif
 4163 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4164 	#endif
 4165 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4166 	#endif
 4167 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4168 	#endif
 4169 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4170 	#endif
 4171 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4172 	#endif
 4173 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4174 	#endif
 4175 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4176 	#endif
 4177 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4178 	#endif
 4179 	/* LDV_COMMENT_END_PREP */
 4180 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_ioctl" */
 4181 	struct ifreq * var_group6;
 4182 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_ioctl" */
 4183 	int  var_macb_ioctl_71_p2;
 4184 	/* LDV_COMMENT_BEGIN_PREP */
 4185 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4186 	#endif
 4187 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4188 	#endif
 4189 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4190 	#endif
 4191 	#if defined(CONFIG_OF)
 4192 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4193 	#define AT91ETHER_MAX_RX_DESCR	9
 4194 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4195 	#endif
 4196 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4197 	#endif
 4198 	#endif 
 4199 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4200 	#endif
 4201 	/* LDV_COMMENT_END_PREP */
 4202 	/* content: static int macb_change_mtu(struct net_device *dev, int new_mtu)*/
 4203 	/* LDV_COMMENT_BEGIN_PREP */
 4204 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4205 	#define MACB_RX_BUFFER_SIZE	128
 4206 	#define RX_BUFFER_MULTIPLE	64  
 4207 	#define RX_RING_SIZE		512 
 4208 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4209 	#define TX_RING_SIZE		128 
 4210 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4211 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4212 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4213 				 | MACB_BIT(ISR_ROVR))
 4214 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4215 					| MACB_BIT(ISR_RLE)		\
 4216 					| MACB_BIT(TXERR))
 4217 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4218 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4219 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4220 	#define GEM_MTU_MIN_SIZE	68
 4221 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4222 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4223 	#define MACB_HALT_TIMEOUT	1230
 4224 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4225 	#endif
 4226 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4227 	#endif
 4228 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4229 	#endif
 4230 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4231 	#endif
 4232 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4233 	#endif
 4234 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4235 	#endif
 4236 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4237 	#endif
 4238 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4239 	#endif
 4240 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4241 	#endif
 4242 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4243 	#endif
 4244 	/* LDV_COMMENT_END_PREP */
 4245 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_change_mtu" */
 4246 	int  var_macb_change_mtu_60_p1;
 4247 	/* LDV_COMMENT_BEGIN_PREP */
 4248 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4249 	#endif
 4250 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4251 	#endif
 4252 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4253 	#endif
 4254 	#if defined(CONFIG_OF)
 4255 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4256 	#define AT91ETHER_MAX_RX_DESCR	9
 4257 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4258 	#endif
 4259 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4260 	#endif
 4261 	#endif 
 4262 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4263 	#endif
 4264 	/* LDV_COMMENT_END_PREP */
 4265 	/* content: static void macb_poll_controller(struct net_device *dev)*/
 4266 	/* LDV_COMMENT_BEGIN_PREP */
 4267 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4268 	#define MACB_RX_BUFFER_SIZE	128
 4269 	#define RX_BUFFER_MULTIPLE	64  
 4270 	#define RX_RING_SIZE		512 
 4271 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4272 	#define TX_RING_SIZE		128 
 4273 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4274 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4275 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4276 				 | MACB_BIT(ISR_ROVR))
 4277 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4278 					| MACB_BIT(ISR_RLE)		\
 4279 					| MACB_BIT(TXERR))
 4280 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4281 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4282 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4283 	#define GEM_MTU_MIN_SIZE	68
 4284 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4285 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4286 	#define MACB_HALT_TIMEOUT	1230
 4287 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4288 	#endif
 4289 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4290 	#endif
 4291 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4292 	#endif
 4293 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4294 	#endif
 4295 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4296 	/* LDV_COMMENT_END_PREP */
 4297 	/* LDV_COMMENT_BEGIN_PREP */
 4298 	#endif
 4299 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4300 	#endif
 4301 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4302 	#endif
 4303 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4304 	#endif
 4305 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4306 	#endif
 4307 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4308 	#endif
 4309 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4310 	#endif
 4311 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4312 	#endif
 4313 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4314 	#endif
 4315 	#if defined(CONFIG_OF)
 4316 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4317 	#define AT91ETHER_MAX_RX_DESCR	9
 4318 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4319 	#endif
 4320 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4321 	#endif
 4322 	#endif 
 4323 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4324 	#endif
 4325 	/* LDV_COMMENT_END_PREP */
 4326 	/* content: static int macb_set_features(struct net_device *netdev, netdev_features_t features)*/
 4327 	/* LDV_COMMENT_BEGIN_PREP */
 4328 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4329 	#define MACB_RX_BUFFER_SIZE	128
 4330 	#define RX_BUFFER_MULTIPLE	64  
 4331 	#define RX_RING_SIZE		512 
 4332 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4333 	#define TX_RING_SIZE		128 
 4334 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4335 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4336 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4337 				 | MACB_BIT(ISR_ROVR))
 4338 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4339 					| MACB_BIT(ISR_RLE)		\
 4340 					| MACB_BIT(TXERR))
 4341 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4342 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4343 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4344 	#define GEM_MTU_MIN_SIZE	68
 4345 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4346 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4347 	#define MACB_HALT_TIMEOUT	1230
 4348 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4349 	#endif
 4350 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4351 	#endif
 4352 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4353 	#endif
 4354 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4355 	#endif
 4356 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4357 	#endif
 4358 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4359 	#endif
 4360 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4361 	#endif
 4362 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4363 	#endif
 4364 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4365 	#endif
 4366 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4367 	#endif
 4368 	/* LDV_COMMENT_END_PREP */
 4369 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_set_features" */
 4370 	netdev_features_t  var_macb_set_features_72_p1;
 4371 	/* LDV_COMMENT_BEGIN_PREP */
 4372 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4373 	#endif
 4374 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4375 	#endif
 4376 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4377 	#endif
 4378 	#if defined(CONFIG_OF)
 4379 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4380 	#define AT91ETHER_MAX_RX_DESCR	9
 4381 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4382 	#endif
 4383 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4384 	#endif
 4385 	#endif 
 4386 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4387 	#endif
 4388 	/* LDV_COMMENT_END_PREP */
 4389 
 4390 	/** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/
 4391 	/* content: static int at91ether_open(struct net_device *dev)*/
 4392 	/* LDV_COMMENT_BEGIN_PREP */
 4393 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4394 	#define MACB_RX_BUFFER_SIZE	128
 4395 	#define RX_BUFFER_MULTIPLE	64  
 4396 	#define RX_RING_SIZE		512 
 4397 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4398 	#define TX_RING_SIZE		128 
 4399 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4400 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4401 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4402 				 | MACB_BIT(ISR_ROVR))
 4403 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4404 					| MACB_BIT(ISR_RLE)		\
 4405 					| MACB_BIT(TXERR))
 4406 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4407 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4408 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4409 	#define GEM_MTU_MIN_SIZE	68
 4410 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4411 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4412 	#define MACB_HALT_TIMEOUT	1230
 4413 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4414 	#endif
 4415 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4416 	#endif
 4417 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4418 	#endif
 4419 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4420 	#endif
 4421 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4422 	#endif
 4423 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4424 	#endif
 4425 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4426 	#endif
 4427 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4428 	#endif
 4429 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4430 	#endif
 4431 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4432 	#endif
 4433 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4434 	#endif
 4435 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4436 	#endif
 4437 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4438 	#endif
 4439 	#if defined(CONFIG_OF)
 4440 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4441 	#define AT91ETHER_MAX_RX_DESCR	9
 4442 	/* LDV_COMMENT_END_PREP */
 4443 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "at91ether_open" */
 4444 	static int res_at91ether_open_78;
 4445 	/* LDV_COMMENT_BEGIN_PREP */
 4446 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4447 	#endif
 4448 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4449 	#endif
 4450 	#endif 
 4451 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4452 	#endif
 4453 	/* LDV_COMMENT_END_PREP */
 4454 	/* content: static int at91ether_close(struct net_device *dev)*/
 4455 	/* LDV_COMMENT_BEGIN_PREP */
 4456 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4457 	#define MACB_RX_BUFFER_SIZE	128
 4458 	#define RX_BUFFER_MULTIPLE	64  
 4459 	#define RX_RING_SIZE		512 
 4460 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4461 	#define TX_RING_SIZE		128 
 4462 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4463 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4464 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4465 				 | MACB_BIT(ISR_ROVR))
 4466 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4467 					| MACB_BIT(ISR_RLE)		\
 4468 					| MACB_BIT(TXERR))
 4469 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4470 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4471 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4472 	#define GEM_MTU_MIN_SIZE	68
 4473 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4474 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4475 	#define MACB_HALT_TIMEOUT	1230
 4476 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4477 	#endif
 4478 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4479 	#endif
 4480 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4481 	#endif
 4482 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4483 	#endif
 4484 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4485 	#endif
 4486 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4487 	#endif
 4488 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4489 	#endif
 4490 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4491 	#endif
 4492 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4493 	#endif
 4494 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4495 	#endif
 4496 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4497 	#endif
 4498 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4499 	#endif
 4500 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4501 	#endif
 4502 	#if defined(CONFIG_OF)
 4503 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4504 	#define AT91ETHER_MAX_RX_DESCR	9
 4505 	/* LDV_COMMENT_END_PREP */
 4506 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "at91ether_close" */
 4507 	static int res_at91ether_close_79;
 4508 	/* LDV_COMMENT_BEGIN_PREP */
 4509 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4510 	#endif
 4511 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4512 	#endif
 4513 	#endif 
 4514 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4515 	#endif
 4516 	/* LDV_COMMENT_END_PREP */
 4517 	/* content: static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)*/
 4518 	/* LDV_COMMENT_BEGIN_PREP */
 4519 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4520 	#define MACB_RX_BUFFER_SIZE	128
 4521 	#define RX_BUFFER_MULTIPLE	64  
 4522 	#define RX_RING_SIZE		512 
 4523 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4524 	#define TX_RING_SIZE		128 
 4525 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4526 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4527 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4528 				 | MACB_BIT(ISR_ROVR))
 4529 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4530 					| MACB_BIT(ISR_RLE)		\
 4531 					| MACB_BIT(TXERR))
 4532 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4533 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4534 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4535 	#define GEM_MTU_MIN_SIZE	68
 4536 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4537 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4538 	#define MACB_HALT_TIMEOUT	1230
 4539 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4540 	#endif
 4541 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4542 	#endif
 4543 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4544 	#endif
 4545 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4546 	#endif
 4547 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4548 	#endif
 4549 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4550 	#endif
 4551 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4552 	#endif
 4553 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4554 	#endif
 4555 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4556 	#endif
 4557 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4558 	#endif
 4559 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4560 	#endif
 4561 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4562 	#endif
 4563 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4564 	#endif
 4565 	#if defined(CONFIG_OF)
 4566 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4567 	#define AT91ETHER_MAX_RX_DESCR	9
 4568 	/* LDV_COMMENT_END_PREP */
 4569 	/* LDV_COMMENT_BEGIN_PREP */
 4570 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4571 	#endif
 4572 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4573 	#endif
 4574 	#endif 
 4575 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4576 	#endif
 4577 	/* LDV_COMMENT_END_PREP */
 4578 	/* content: static struct net_device_stats *macb_get_stats(struct net_device *dev)*/
 4579 	/* LDV_COMMENT_BEGIN_PREP */
 4580 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4581 	#define MACB_RX_BUFFER_SIZE	128
 4582 	#define RX_BUFFER_MULTIPLE	64  
 4583 	#define RX_RING_SIZE		512 
 4584 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4585 	#define TX_RING_SIZE		128 
 4586 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4587 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4588 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4589 				 | MACB_BIT(ISR_ROVR))
 4590 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4591 					| MACB_BIT(ISR_RLE)		\
 4592 					| MACB_BIT(TXERR))
 4593 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4594 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4595 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4596 	#define GEM_MTU_MIN_SIZE	68
 4597 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4598 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4599 	#define MACB_HALT_TIMEOUT	1230
 4600 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4601 	#endif
 4602 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4603 	#endif
 4604 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4605 	#endif
 4606 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4607 	#endif
 4608 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4609 	#endif
 4610 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4611 	#endif
 4612 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4613 	#endif
 4614 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4615 	#endif
 4616 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4617 	#endif
 4618 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4619 	#endif
 4620 	/* LDV_COMMENT_END_PREP */
 4621 	/* LDV_COMMENT_BEGIN_PREP */
 4622 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4623 	#endif
 4624 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4625 	#endif
 4626 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4627 	#endif
 4628 	#if defined(CONFIG_OF)
 4629 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4630 	#define AT91ETHER_MAX_RX_DESCR	9
 4631 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4632 	#endif
 4633 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4634 	#endif
 4635 	#endif 
 4636 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4637 	#endif
 4638 	/* LDV_COMMENT_END_PREP */
 4639 	/* content: static void macb_set_rx_mode(struct net_device *dev)*/
 4640 	/* LDV_COMMENT_BEGIN_PREP */
 4641 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4642 	#define MACB_RX_BUFFER_SIZE	128
 4643 	#define RX_BUFFER_MULTIPLE	64  
 4644 	#define RX_RING_SIZE		512 
 4645 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4646 	#define TX_RING_SIZE		128 
 4647 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4648 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4649 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4650 				 | MACB_BIT(ISR_ROVR))
 4651 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4652 					| MACB_BIT(ISR_RLE)		\
 4653 					| MACB_BIT(TXERR))
 4654 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4655 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4656 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4657 	#define GEM_MTU_MIN_SIZE	68
 4658 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4659 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4660 	#define MACB_HALT_TIMEOUT	1230
 4661 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4662 	#endif
 4663 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4664 	#endif
 4665 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4666 	#endif
 4667 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4668 	#endif
 4669 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4670 	#endif
 4671 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4672 	#endif
 4673 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4674 	#endif
 4675 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4676 	#endif
 4677 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4678 	#endif
 4679 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4680 	#endif
 4681 	/* LDV_COMMENT_END_PREP */
 4682 	/* LDV_COMMENT_BEGIN_PREP */
 4683 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4684 	#endif
 4685 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4686 	#endif
 4687 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4688 	#endif
 4689 	#if defined(CONFIG_OF)
 4690 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4691 	#define AT91ETHER_MAX_RX_DESCR	9
 4692 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4693 	#endif
 4694 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4695 	#endif
 4696 	#endif 
 4697 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4698 	#endif
 4699 	/* LDV_COMMENT_END_PREP */
 4700 	/* content: static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/
 4701 	/* LDV_COMMENT_BEGIN_PREP */
 4702 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4703 	#define MACB_RX_BUFFER_SIZE	128
 4704 	#define RX_BUFFER_MULTIPLE	64  
 4705 	#define RX_RING_SIZE		512 
 4706 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4707 	#define TX_RING_SIZE		128 
 4708 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4709 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4710 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4711 				 | MACB_BIT(ISR_ROVR))
 4712 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4713 					| MACB_BIT(ISR_RLE)		\
 4714 					| MACB_BIT(TXERR))
 4715 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4716 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4717 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4718 	#define GEM_MTU_MIN_SIZE	68
 4719 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4720 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4721 	#define MACB_HALT_TIMEOUT	1230
 4722 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4723 	#endif
 4724 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4725 	#endif
 4726 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4727 	#endif
 4728 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4729 	#endif
 4730 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4731 	#endif
 4732 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4733 	#endif
 4734 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4735 	#endif
 4736 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4737 	#endif
 4738 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4739 	#endif
 4740 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4741 	#endif
 4742 	/* LDV_COMMENT_END_PREP */
 4743 	/* LDV_COMMENT_BEGIN_PREP */
 4744 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4745 	#endif
 4746 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4747 	#endif
 4748 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4749 	#endif
 4750 	#if defined(CONFIG_OF)
 4751 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4752 	#define AT91ETHER_MAX_RX_DESCR	9
 4753 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4754 	#endif
 4755 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4756 	#endif
 4757 	#endif 
 4758 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4759 	#endif
 4760 	/* LDV_COMMENT_END_PREP */
 4761 	/* content: static void at91ether_poll_controller(struct net_device *dev)*/
 4762 	/* LDV_COMMENT_BEGIN_PREP */
 4763 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4764 	#define MACB_RX_BUFFER_SIZE	128
 4765 	#define RX_BUFFER_MULTIPLE	64  
 4766 	#define RX_RING_SIZE		512 
 4767 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4768 	#define TX_RING_SIZE		128 
 4769 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4770 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4771 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4772 				 | MACB_BIT(ISR_ROVR))
 4773 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4774 					| MACB_BIT(ISR_RLE)		\
 4775 					| MACB_BIT(TXERR))
 4776 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4777 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4778 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4779 	#define GEM_MTU_MIN_SIZE	68
 4780 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4781 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4782 	#define MACB_HALT_TIMEOUT	1230
 4783 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4784 	#endif
 4785 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4786 	#endif
 4787 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4788 	#endif
 4789 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4790 	#endif
 4791 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4792 	#endif
 4793 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4794 	#endif
 4795 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4796 	#endif
 4797 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4798 	#endif
 4799 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4800 	#endif
 4801 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4802 	#endif
 4803 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4804 	#endif
 4805 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4806 	#endif
 4807 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4808 	#endif
 4809 	#if defined(CONFIG_OF)
 4810 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4811 	#define AT91ETHER_MAX_RX_DESCR	9
 4812 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4813 	/* LDV_COMMENT_END_PREP */
 4814 	/* LDV_COMMENT_BEGIN_PREP */
 4815 	#endif
 4816 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4817 	#endif
 4818 	#endif 
 4819 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4820 	#endif
 4821 	/* LDV_COMMENT_END_PREP */
 4822 
 4823 	/** STRUCT: struct type: macb_config, struct name: at91sam9260_config **/
 4824 	/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 4825 	/* LDV_COMMENT_BEGIN_PREP */
 4826 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4827 	#define MACB_RX_BUFFER_SIZE	128
 4828 	#define RX_BUFFER_MULTIPLE	64  
 4829 	#define RX_RING_SIZE		512 
 4830 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4831 	#define TX_RING_SIZE		128 
 4832 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4833 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4834 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4835 				 | MACB_BIT(ISR_ROVR))
 4836 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4837 					| MACB_BIT(ISR_RLE)		\
 4838 					| MACB_BIT(TXERR))
 4839 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4840 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4841 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4842 	#define GEM_MTU_MIN_SIZE	68
 4843 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4844 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4845 	#define MACB_HALT_TIMEOUT	1230
 4846 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4847 	#endif
 4848 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4849 	#endif
 4850 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4851 	#endif
 4852 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4853 	#endif
 4854 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4855 	#endif
 4856 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4857 	#endif
 4858 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4859 	#endif
 4860 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4861 	#endif
 4862 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4863 	#endif
 4864 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4865 	#endif
 4866 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4867 	#endif
 4868 	/* LDV_COMMENT_END_PREP */
 4869 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_clk_init" */
 4870 	struct platform_device * var_group7;
 4871 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_clk_init" */
 4872 	struct clk ** var_group8;
 4873 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_clk_init" */
 4874 	struct clk ** var_macb_clk_init_75_p2;
 4875 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_clk_init" */
 4876 	struct clk ** var_macb_clk_init_75_p3;
 4877 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_clk_init" */
 4878 	struct clk ** var_macb_clk_init_75_p4;
 4879 	/* LDV_COMMENT_BEGIN_PREP */
 4880 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4881 	#endif
 4882 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4883 	#endif
 4884 	#if defined(CONFIG_OF)
 4885 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4886 	#define AT91ETHER_MAX_RX_DESCR	9
 4887 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4888 	#endif
 4889 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4890 	#endif
 4891 	#endif 
 4892 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4893 	#endif
 4894 	/* LDV_COMMENT_END_PREP */
 4895 	/* content: static int macb_init(struct platform_device *pdev)*/
 4896 	/* LDV_COMMENT_BEGIN_PREP */
 4897 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4898 	#define MACB_RX_BUFFER_SIZE	128
 4899 	#define RX_BUFFER_MULTIPLE	64  
 4900 	#define RX_RING_SIZE		512 
 4901 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4902 	#define TX_RING_SIZE		128 
 4903 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4904 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4905 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4906 				 | MACB_BIT(ISR_ROVR))
 4907 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4908 					| MACB_BIT(ISR_RLE)		\
 4909 					| MACB_BIT(TXERR))
 4910 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4911 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4912 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4913 	#define GEM_MTU_MIN_SIZE	68
 4914 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4915 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4916 	#define MACB_HALT_TIMEOUT	1230
 4917 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4918 	#endif
 4919 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4920 	#endif
 4921 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4922 	#endif
 4923 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4924 	#endif
 4925 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4926 	#endif
 4927 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4928 	#endif
 4929 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4930 	#endif
 4931 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4932 	#endif
 4933 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4934 	#endif
 4935 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4936 	#endif
 4937 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4938 	#endif
 4939 	/* LDV_COMMENT_END_PREP */
 4940 	/* LDV_COMMENT_BEGIN_PREP */
 4941 	#if defined(CONFIG_OF)
 4942 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 4943 	#define AT91ETHER_MAX_RX_DESCR	9
 4944 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4945 	#endif
 4946 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4947 	#endif
 4948 	#endif 
 4949 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4950 	#endif
 4951 	/* LDV_COMMENT_END_PREP */
 4952 
 4953 	/** STRUCT: struct type: macb_config, struct name: pc302gem_config **/
 4954 	/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 4955 	/* LDV_COMMENT_BEGIN_PREP */
 4956 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 4957 	#define MACB_RX_BUFFER_SIZE	128
 4958 	#define RX_BUFFER_MULTIPLE	64  
 4959 	#define RX_RING_SIZE		512 
 4960 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 4961 	#define TX_RING_SIZE		128 
 4962 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 4963 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 4964 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 4965 				 | MACB_BIT(ISR_ROVR))
 4966 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 4967 					| MACB_BIT(ISR_RLE)		\
 4968 					| MACB_BIT(TXERR))
 4969 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 4970 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 4971 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 4972 	#define GEM_MTU_MIN_SIZE	68
 4973 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 4974 	#define MACB_WOL_ENABLED		(0x1 << 1)
 4975 	#define MACB_HALT_TIMEOUT	1230
 4976 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4977 	#endif
 4978 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4979 	#endif
 4980 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4981 	#endif
 4982 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4983 	#endif
 4984 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4985 	#endif
 4986 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 4987 	#endif
 4988 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4989 	#endif
 4990 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4991 	#endif
 4992 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4993 	#endif
 4994 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 4995 	#endif
 4996 	#ifdef CONFIG_NET_POLL_CONTROLLER
 4997 	#endif
 4998 	/* LDV_COMMENT_END_PREP */
 4999 	/* LDV_COMMENT_BEGIN_PREP */
 5000 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5001 	#endif
 5002 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5003 	#endif
 5004 	#if defined(CONFIG_OF)
 5005 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5006 	#define AT91ETHER_MAX_RX_DESCR	9
 5007 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5008 	#endif
 5009 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5010 	#endif
 5011 	#endif 
 5012 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5013 	#endif
 5014 	/* LDV_COMMENT_END_PREP */
 5015 	/* content: static int macb_init(struct platform_device *pdev)*/
 5016 	/* LDV_COMMENT_BEGIN_PREP */
 5017 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5018 	#define MACB_RX_BUFFER_SIZE	128
 5019 	#define RX_BUFFER_MULTIPLE	64  
 5020 	#define RX_RING_SIZE		512 
 5021 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5022 	#define TX_RING_SIZE		128 
 5023 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5024 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5025 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5026 				 | MACB_BIT(ISR_ROVR))
 5027 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5028 					| MACB_BIT(ISR_RLE)		\
 5029 					| MACB_BIT(TXERR))
 5030 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5031 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5032 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5033 	#define GEM_MTU_MIN_SIZE	68
 5034 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5035 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5036 	#define MACB_HALT_TIMEOUT	1230
 5037 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5038 	#endif
 5039 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5040 	#endif
 5041 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5042 	#endif
 5043 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5044 	#endif
 5045 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5046 	#endif
 5047 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5048 	#endif
 5049 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5050 	#endif
 5051 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5052 	#endif
 5053 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5054 	#endif
 5055 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5056 	#endif
 5057 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5058 	#endif
 5059 	/* LDV_COMMENT_END_PREP */
 5060 	/* LDV_COMMENT_BEGIN_PREP */
 5061 	#if defined(CONFIG_OF)
 5062 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5063 	#define AT91ETHER_MAX_RX_DESCR	9
 5064 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5065 	#endif
 5066 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5067 	#endif
 5068 	#endif 
 5069 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5070 	#endif
 5071 	/* LDV_COMMENT_END_PREP */
 5072 
 5073 	/** STRUCT: struct type: macb_config, struct name: sama5d2_config **/
 5074 	/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 5075 	/* LDV_COMMENT_BEGIN_PREP */
 5076 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5077 	#define MACB_RX_BUFFER_SIZE	128
 5078 	#define RX_BUFFER_MULTIPLE	64  
 5079 	#define RX_RING_SIZE		512 
 5080 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5081 	#define TX_RING_SIZE		128 
 5082 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5083 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5084 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5085 				 | MACB_BIT(ISR_ROVR))
 5086 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5087 					| MACB_BIT(ISR_RLE)		\
 5088 					| MACB_BIT(TXERR))
 5089 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5090 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5091 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5092 	#define GEM_MTU_MIN_SIZE	68
 5093 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5094 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5095 	#define MACB_HALT_TIMEOUT	1230
 5096 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5097 	#endif
 5098 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5099 	#endif
 5100 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5101 	#endif
 5102 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5103 	#endif
 5104 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5105 	#endif
 5106 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5107 	#endif
 5108 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5109 	#endif
 5110 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5111 	#endif
 5112 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5113 	#endif
 5114 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5115 	#endif
 5116 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5117 	#endif
 5118 	/* LDV_COMMENT_END_PREP */
 5119 	/* LDV_COMMENT_BEGIN_PREP */
 5120 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5121 	#endif
 5122 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5123 	#endif
 5124 	#if defined(CONFIG_OF)
 5125 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5126 	#define AT91ETHER_MAX_RX_DESCR	9
 5127 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5128 	#endif
 5129 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5130 	#endif
 5131 	#endif 
 5132 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5133 	#endif
 5134 	/* LDV_COMMENT_END_PREP */
 5135 	/* content: static int macb_init(struct platform_device *pdev)*/
 5136 	/* LDV_COMMENT_BEGIN_PREP */
 5137 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5138 	#define MACB_RX_BUFFER_SIZE	128
 5139 	#define RX_BUFFER_MULTIPLE	64  
 5140 	#define RX_RING_SIZE		512 
 5141 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5142 	#define TX_RING_SIZE		128 
 5143 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5144 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5145 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5146 				 | MACB_BIT(ISR_ROVR))
 5147 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5148 					| MACB_BIT(ISR_RLE)		\
 5149 					| MACB_BIT(TXERR))
 5150 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5151 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5152 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5153 	#define GEM_MTU_MIN_SIZE	68
 5154 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5155 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5156 	#define MACB_HALT_TIMEOUT	1230
 5157 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5158 	#endif
 5159 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5160 	#endif
 5161 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5162 	#endif
 5163 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5164 	#endif
 5165 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5166 	#endif
 5167 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5168 	#endif
 5169 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5170 	#endif
 5171 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5172 	#endif
 5173 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5174 	#endif
 5175 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5176 	#endif
 5177 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5178 	#endif
 5179 	/* LDV_COMMENT_END_PREP */
 5180 	/* LDV_COMMENT_BEGIN_PREP */
 5181 	#if defined(CONFIG_OF)
 5182 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5183 	#define AT91ETHER_MAX_RX_DESCR	9
 5184 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5185 	#endif
 5186 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5187 	#endif
 5188 	#endif 
 5189 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5190 	#endif
 5191 	/* LDV_COMMENT_END_PREP */
 5192 
 5193 	/** STRUCT: struct type: macb_config, struct name: sama5d3_config **/
 5194 	/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 5195 	/* LDV_COMMENT_BEGIN_PREP */
 5196 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5197 	#define MACB_RX_BUFFER_SIZE	128
 5198 	#define RX_BUFFER_MULTIPLE	64  
 5199 	#define RX_RING_SIZE		512 
 5200 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5201 	#define TX_RING_SIZE		128 
 5202 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5203 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5204 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5205 				 | MACB_BIT(ISR_ROVR))
 5206 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5207 					| MACB_BIT(ISR_RLE)		\
 5208 					| MACB_BIT(TXERR))
 5209 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5210 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5211 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5212 	#define GEM_MTU_MIN_SIZE	68
 5213 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5214 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5215 	#define MACB_HALT_TIMEOUT	1230
 5216 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5217 	#endif
 5218 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5219 	#endif
 5220 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5221 	#endif
 5222 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5223 	#endif
 5224 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5225 	#endif
 5226 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5227 	#endif
 5228 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5229 	#endif
 5230 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5231 	#endif
 5232 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5233 	#endif
 5234 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5235 	#endif
 5236 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5237 	#endif
 5238 	/* LDV_COMMENT_END_PREP */
 5239 	/* LDV_COMMENT_BEGIN_PREP */
 5240 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5241 	#endif
 5242 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5243 	#endif
 5244 	#if defined(CONFIG_OF)
 5245 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5246 	#define AT91ETHER_MAX_RX_DESCR	9
 5247 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5248 	#endif
 5249 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5250 	#endif
 5251 	#endif 
 5252 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5253 	#endif
 5254 	/* LDV_COMMENT_END_PREP */
 5255 	/* content: static int macb_init(struct platform_device *pdev)*/
 5256 	/* LDV_COMMENT_BEGIN_PREP */
 5257 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5258 	#define MACB_RX_BUFFER_SIZE	128
 5259 	#define RX_BUFFER_MULTIPLE	64  
 5260 	#define RX_RING_SIZE		512 
 5261 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5262 	#define TX_RING_SIZE		128 
 5263 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5264 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5265 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5266 				 | MACB_BIT(ISR_ROVR))
 5267 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5268 					| MACB_BIT(ISR_RLE)		\
 5269 					| MACB_BIT(TXERR))
 5270 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5271 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5272 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5273 	#define GEM_MTU_MIN_SIZE	68
 5274 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5275 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5276 	#define MACB_HALT_TIMEOUT	1230
 5277 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5278 	#endif
 5279 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5280 	#endif
 5281 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5282 	#endif
 5283 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5284 	#endif
 5285 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5286 	#endif
 5287 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5288 	#endif
 5289 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5290 	#endif
 5291 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5292 	#endif
 5293 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5294 	#endif
 5295 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5296 	#endif
 5297 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5298 	#endif
 5299 	/* LDV_COMMENT_END_PREP */
 5300 	/* LDV_COMMENT_BEGIN_PREP */
 5301 	#if defined(CONFIG_OF)
 5302 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5303 	#define AT91ETHER_MAX_RX_DESCR	9
 5304 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5305 	#endif
 5306 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5307 	#endif
 5308 	#endif 
 5309 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5310 	#endif
 5311 	/* LDV_COMMENT_END_PREP */
 5312 
 5313 	/** STRUCT: struct type: macb_config, struct name: sama5d4_config **/
 5314 	/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 5315 	/* LDV_COMMENT_BEGIN_PREP */
 5316 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5317 	#define MACB_RX_BUFFER_SIZE	128
 5318 	#define RX_BUFFER_MULTIPLE	64  
 5319 	#define RX_RING_SIZE		512 
 5320 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5321 	#define TX_RING_SIZE		128 
 5322 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5323 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5324 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5325 				 | MACB_BIT(ISR_ROVR))
 5326 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5327 					| MACB_BIT(ISR_RLE)		\
 5328 					| MACB_BIT(TXERR))
 5329 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5330 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5331 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5332 	#define GEM_MTU_MIN_SIZE	68
 5333 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5334 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5335 	#define MACB_HALT_TIMEOUT	1230
 5336 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5337 	#endif
 5338 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5339 	#endif
 5340 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5341 	#endif
 5342 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5343 	#endif
 5344 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5345 	#endif
 5346 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5347 	#endif
 5348 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5349 	#endif
 5350 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5351 	#endif
 5352 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5353 	#endif
 5354 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5355 	#endif
 5356 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5357 	#endif
 5358 	/* LDV_COMMENT_END_PREP */
 5359 	/* LDV_COMMENT_BEGIN_PREP */
 5360 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5361 	#endif
 5362 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5363 	#endif
 5364 	#if defined(CONFIG_OF)
 5365 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5366 	#define AT91ETHER_MAX_RX_DESCR	9
 5367 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5368 	#endif
 5369 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5370 	#endif
 5371 	#endif 
 5372 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5373 	#endif
 5374 	/* LDV_COMMENT_END_PREP */
 5375 	/* content: static int macb_init(struct platform_device *pdev)*/
 5376 	/* LDV_COMMENT_BEGIN_PREP */
 5377 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5378 	#define MACB_RX_BUFFER_SIZE	128
 5379 	#define RX_BUFFER_MULTIPLE	64  
 5380 	#define RX_RING_SIZE		512 
 5381 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5382 	#define TX_RING_SIZE		128 
 5383 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5384 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5385 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5386 				 | MACB_BIT(ISR_ROVR))
 5387 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5388 					| MACB_BIT(ISR_RLE)		\
 5389 					| MACB_BIT(TXERR))
 5390 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5391 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5392 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5393 	#define GEM_MTU_MIN_SIZE	68
 5394 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5395 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5396 	#define MACB_HALT_TIMEOUT	1230
 5397 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5398 	#endif
 5399 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5400 	#endif
 5401 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5402 	#endif
 5403 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5404 	#endif
 5405 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5406 	#endif
 5407 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5408 	#endif
 5409 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5410 	#endif
 5411 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5412 	#endif
 5413 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5414 	#endif
 5415 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5416 	#endif
 5417 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5418 	#endif
 5419 	/* LDV_COMMENT_END_PREP */
 5420 	/* LDV_COMMENT_BEGIN_PREP */
 5421 	#if defined(CONFIG_OF)
 5422 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5423 	#define AT91ETHER_MAX_RX_DESCR	9
 5424 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5425 	#endif
 5426 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5427 	#endif
 5428 	#endif 
 5429 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5430 	#endif
 5431 	/* LDV_COMMENT_END_PREP */
 5432 
 5433 	/** STRUCT: struct type: macb_config, struct name: emac_config **/
 5434 	/* content: static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 5435 	/* LDV_COMMENT_BEGIN_PREP */
 5436 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5437 	#define MACB_RX_BUFFER_SIZE	128
 5438 	#define RX_BUFFER_MULTIPLE	64  
 5439 	#define RX_RING_SIZE		512 
 5440 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5441 	#define TX_RING_SIZE		128 
 5442 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5443 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5444 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5445 				 | MACB_BIT(ISR_ROVR))
 5446 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5447 					| MACB_BIT(ISR_RLE)		\
 5448 					| MACB_BIT(TXERR))
 5449 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5450 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5451 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5452 	#define GEM_MTU_MIN_SIZE	68
 5453 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5454 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5455 	#define MACB_HALT_TIMEOUT	1230
 5456 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5457 	#endif
 5458 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5459 	#endif
 5460 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5461 	#endif
 5462 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5463 	#endif
 5464 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5465 	#endif
 5466 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5467 	#endif
 5468 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5469 	#endif
 5470 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5471 	#endif
 5472 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5473 	#endif
 5474 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5475 	#endif
 5476 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5477 	#endif
 5478 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5479 	#endif
 5480 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5481 	#endif
 5482 	#if defined(CONFIG_OF)
 5483 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5484 	#define AT91ETHER_MAX_RX_DESCR	9
 5485 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5486 	#endif
 5487 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5488 	#endif
 5489 	/* LDV_COMMENT_END_PREP */
 5490 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "at91ether_clk_init" */
 5491 	struct clk ** var_at91ether_clk_init_84_p2;
 5492 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "at91ether_clk_init" */
 5493 	struct clk ** var_at91ether_clk_init_84_p3;
 5494 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "at91ether_clk_init" */
 5495 	struct clk ** var_at91ether_clk_init_84_p4;
 5496 	/* LDV_COMMENT_BEGIN_PREP */
 5497 	#endif 
 5498 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5499 	#endif
 5500 	/* LDV_COMMENT_END_PREP */
 5501 	/* content: static int at91ether_init(struct platform_device *pdev)*/
 5502 	/* LDV_COMMENT_BEGIN_PREP */
 5503 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5504 	#define MACB_RX_BUFFER_SIZE	128
 5505 	#define RX_BUFFER_MULTIPLE	64  
 5506 	#define RX_RING_SIZE		512 
 5507 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5508 	#define TX_RING_SIZE		128 
 5509 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5510 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5511 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5512 				 | MACB_BIT(ISR_ROVR))
 5513 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5514 					| MACB_BIT(ISR_RLE)		\
 5515 					| MACB_BIT(TXERR))
 5516 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5517 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5518 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5519 	#define GEM_MTU_MIN_SIZE	68
 5520 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5521 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5522 	#define MACB_HALT_TIMEOUT	1230
 5523 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5524 	#endif
 5525 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5526 	#endif
 5527 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5528 	#endif
 5529 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5530 	#endif
 5531 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5532 	#endif
 5533 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5534 	#endif
 5535 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5536 	#endif
 5537 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5538 	#endif
 5539 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5540 	#endif
 5541 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5542 	#endif
 5543 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5544 	#endif
 5545 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5546 	#endif
 5547 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5548 	#endif
 5549 	#if defined(CONFIG_OF)
 5550 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5551 	#define AT91ETHER_MAX_RX_DESCR	9
 5552 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5553 	#endif
 5554 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5555 	#endif
 5556 	/* LDV_COMMENT_END_PREP */
 5557 	/* LDV_COMMENT_BEGIN_PREP */
 5558 	#endif 
 5559 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5560 	#endif
 5561 	/* LDV_COMMENT_END_PREP */
 5562 
 5563 	/** STRUCT: struct type: macb_config, struct name: np4_config **/
 5564 	/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 5565 	/* LDV_COMMENT_BEGIN_PREP */
 5566 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5567 	#define MACB_RX_BUFFER_SIZE	128
 5568 	#define RX_BUFFER_MULTIPLE	64  
 5569 	#define RX_RING_SIZE		512 
 5570 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5571 	#define TX_RING_SIZE		128 
 5572 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5573 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5574 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5575 				 | MACB_BIT(ISR_ROVR))
 5576 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5577 					| MACB_BIT(ISR_RLE)		\
 5578 					| MACB_BIT(TXERR))
 5579 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5580 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5581 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5582 	#define GEM_MTU_MIN_SIZE	68
 5583 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5584 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5585 	#define MACB_HALT_TIMEOUT	1230
 5586 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5587 	#endif
 5588 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5589 	#endif
 5590 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5591 	#endif
 5592 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5593 	#endif
 5594 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5595 	#endif
 5596 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5597 	#endif
 5598 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5599 	#endif
 5600 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5601 	#endif
 5602 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5603 	#endif
 5604 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5605 	#endif
 5606 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5607 	#endif
 5608 	/* LDV_COMMENT_END_PREP */
 5609 	/* LDV_COMMENT_BEGIN_PREP */
 5610 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5611 	#endif
 5612 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5613 	#endif
 5614 	#if defined(CONFIG_OF)
 5615 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5616 	#define AT91ETHER_MAX_RX_DESCR	9
 5617 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5618 	#endif
 5619 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5620 	#endif
 5621 	#endif 
 5622 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5623 	#endif
 5624 	/* LDV_COMMENT_END_PREP */
 5625 	/* content: static int macb_init(struct platform_device *pdev)*/
 5626 	/* LDV_COMMENT_BEGIN_PREP */
 5627 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5628 	#define MACB_RX_BUFFER_SIZE	128
 5629 	#define RX_BUFFER_MULTIPLE	64  
 5630 	#define RX_RING_SIZE		512 
 5631 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5632 	#define TX_RING_SIZE		128 
 5633 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5634 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5635 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5636 				 | MACB_BIT(ISR_ROVR))
 5637 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5638 					| MACB_BIT(ISR_RLE)		\
 5639 					| MACB_BIT(TXERR))
 5640 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5641 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5642 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5643 	#define GEM_MTU_MIN_SIZE	68
 5644 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5645 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5646 	#define MACB_HALT_TIMEOUT	1230
 5647 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5648 	#endif
 5649 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5650 	#endif
 5651 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5652 	#endif
 5653 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5654 	#endif
 5655 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5656 	#endif
 5657 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5658 	#endif
 5659 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5660 	#endif
 5661 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5662 	#endif
 5663 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5664 	#endif
 5665 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5666 	#endif
 5667 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5668 	#endif
 5669 	/* LDV_COMMENT_END_PREP */
 5670 	/* LDV_COMMENT_BEGIN_PREP */
 5671 	#if defined(CONFIG_OF)
 5672 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5673 	#define AT91ETHER_MAX_RX_DESCR	9
 5674 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5675 	#endif
 5676 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5677 	#endif
 5678 	#endif 
 5679 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5680 	#endif
 5681 	/* LDV_COMMENT_END_PREP */
 5682 
 5683 	/** STRUCT: struct type: macb_config, struct name: zynqmp_config **/
 5684 	/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 5685 	/* LDV_COMMENT_BEGIN_PREP */
 5686 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5687 	#define MACB_RX_BUFFER_SIZE	128
 5688 	#define RX_BUFFER_MULTIPLE	64  
 5689 	#define RX_RING_SIZE		512 
 5690 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5691 	#define TX_RING_SIZE		128 
 5692 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5693 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5694 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5695 				 | MACB_BIT(ISR_ROVR))
 5696 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5697 					| MACB_BIT(ISR_RLE)		\
 5698 					| MACB_BIT(TXERR))
 5699 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5700 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5701 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5702 	#define GEM_MTU_MIN_SIZE	68
 5703 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5704 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5705 	#define MACB_HALT_TIMEOUT	1230
 5706 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5707 	#endif
 5708 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5709 	#endif
 5710 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5711 	#endif
 5712 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5713 	#endif
 5714 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5715 	#endif
 5716 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5717 	#endif
 5718 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5719 	#endif
 5720 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5721 	#endif
 5722 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5723 	#endif
 5724 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5725 	#endif
 5726 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5727 	#endif
 5728 	/* LDV_COMMENT_END_PREP */
 5729 	/* LDV_COMMENT_BEGIN_PREP */
 5730 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5731 	#endif
 5732 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5733 	#endif
 5734 	#if defined(CONFIG_OF)
 5735 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5736 	#define AT91ETHER_MAX_RX_DESCR	9
 5737 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5738 	#endif
 5739 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5740 	#endif
 5741 	#endif 
 5742 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5743 	#endif
 5744 	/* LDV_COMMENT_END_PREP */
 5745 	/* content: static int macb_init(struct platform_device *pdev)*/
 5746 	/* LDV_COMMENT_BEGIN_PREP */
 5747 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5748 	#define MACB_RX_BUFFER_SIZE	128
 5749 	#define RX_BUFFER_MULTIPLE	64  
 5750 	#define RX_RING_SIZE		512 
 5751 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5752 	#define TX_RING_SIZE		128 
 5753 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5754 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5755 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5756 				 | MACB_BIT(ISR_ROVR))
 5757 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5758 					| MACB_BIT(ISR_RLE)		\
 5759 					| MACB_BIT(TXERR))
 5760 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5761 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5762 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5763 	#define GEM_MTU_MIN_SIZE	68
 5764 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5765 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5766 	#define MACB_HALT_TIMEOUT	1230
 5767 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5768 	#endif
 5769 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5770 	#endif
 5771 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5772 	#endif
 5773 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5774 	#endif
 5775 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5776 	#endif
 5777 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5778 	#endif
 5779 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5780 	#endif
 5781 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5782 	#endif
 5783 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5784 	#endif
 5785 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5786 	#endif
 5787 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5788 	#endif
 5789 	/* LDV_COMMENT_END_PREP */
 5790 	/* LDV_COMMENT_BEGIN_PREP */
 5791 	#if defined(CONFIG_OF)
 5792 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5793 	#define AT91ETHER_MAX_RX_DESCR	9
 5794 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5795 	#endif
 5796 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5797 	#endif
 5798 	#endif 
 5799 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5800 	#endif
 5801 	/* LDV_COMMENT_END_PREP */
 5802 
 5803 	/** STRUCT: struct type: macb_config, struct name: zynq_config **/
 5804 	/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 5805 	/* LDV_COMMENT_BEGIN_PREP */
 5806 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5807 	#define MACB_RX_BUFFER_SIZE	128
 5808 	#define RX_BUFFER_MULTIPLE	64  
 5809 	#define RX_RING_SIZE		512 
 5810 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5811 	#define TX_RING_SIZE		128 
 5812 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5813 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5814 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5815 				 | MACB_BIT(ISR_ROVR))
 5816 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5817 					| MACB_BIT(ISR_RLE)		\
 5818 					| MACB_BIT(TXERR))
 5819 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5820 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5821 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5822 	#define GEM_MTU_MIN_SIZE	68
 5823 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5824 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5825 	#define MACB_HALT_TIMEOUT	1230
 5826 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5827 	#endif
 5828 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5829 	#endif
 5830 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5831 	#endif
 5832 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5833 	#endif
 5834 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5835 	#endif
 5836 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5837 	#endif
 5838 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5839 	#endif
 5840 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5841 	#endif
 5842 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5843 	#endif
 5844 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5845 	#endif
 5846 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5847 	#endif
 5848 	/* LDV_COMMENT_END_PREP */
 5849 	/* LDV_COMMENT_BEGIN_PREP */
 5850 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5851 	#endif
 5852 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5853 	#endif
 5854 	#if defined(CONFIG_OF)
 5855 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5856 	#define AT91ETHER_MAX_RX_DESCR	9
 5857 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5858 	#endif
 5859 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5860 	#endif
 5861 	#endif 
 5862 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5863 	#endif
 5864 	/* LDV_COMMENT_END_PREP */
 5865 	/* content: static int macb_init(struct platform_device *pdev)*/
 5866 	/* LDV_COMMENT_BEGIN_PREP */
 5867 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5868 	#define MACB_RX_BUFFER_SIZE	128
 5869 	#define RX_BUFFER_MULTIPLE	64  
 5870 	#define RX_RING_SIZE		512 
 5871 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5872 	#define TX_RING_SIZE		128 
 5873 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5874 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5875 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5876 				 | MACB_BIT(ISR_ROVR))
 5877 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5878 					| MACB_BIT(ISR_RLE)		\
 5879 					| MACB_BIT(TXERR))
 5880 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5881 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5882 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5883 	#define GEM_MTU_MIN_SIZE	68
 5884 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5885 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5886 	#define MACB_HALT_TIMEOUT	1230
 5887 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5888 	#endif
 5889 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5890 	#endif
 5891 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5892 	#endif
 5893 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5894 	#endif
 5895 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5896 	#endif
 5897 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5898 	#endif
 5899 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5900 	#endif
 5901 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5902 	#endif
 5903 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5904 	#endif
 5905 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5906 	#endif
 5907 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5908 	#endif
 5909 	/* LDV_COMMENT_END_PREP */
 5910 	/* LDV_COMMENT_BEGIN_PREP */
 5911 	#if defined(CONFIG_OF)
 5912 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5913 	#define AT91ETHER_MAX_RX_DESCR	9
 5914 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5915 	#endif
 5916 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5917 	#endif
 5918 	#endif 
 5919 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5920 	#endif
 5921 	/* LDV_COMMENT_END_PREP */
 5922 
 5923 	/** STRUCT: struct type: platform_driver, struct name: macb_driver **/
 5924 	/* content: static int macb_probe(struct platform_device *pdev)*/
 5925 	/* LDV_COMMENT_BEGIN_PREP */
 5926 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5927 	#define MACB_RX_BUFFER_SIZE	128
 5928 	#define RX_BUFFER_MULTIPLE	64  
 5929 	#define RX_RING_SIZE		512 
 5930 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5931 	#define TX_RING_SIZE		128 
 5932 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5933 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5934 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5935 				 | MACB_BIT(ISR_ROVR))
 5936 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5937 					| MACB_BIT(ISR_RLE)		\
 5938 					| MACB_BIT(TXERR))
 5939 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5940 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 5941 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 5942 	#define GEM_MTU_MIN_SIZE	68
 5943 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 5944 	#define MACB_WOL_ENABLED		(0x1 << 1)
 5945 	#define MACB_HALT_TIMEOUT	1230
 5946 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5947 	#endif
 5948 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5949 	#endif
 5950 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5951 	#endif
 5952 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5953 	#endif
 5954 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5955 	#endif
 5956 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 5957 	#endif
 5958 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5959 	#endif
 5960 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5961 	#endif
 5962 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5963 	#endif
 5964 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5965 	#endif
 5966 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5967 	#endif
 5968 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5969 	#endif
 5970 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 5971 	#endif
 5972 	#if defined(CONFIG_OF)
 5973 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 5974 	#define AT91ETHER_MAX_RX_DESCR	9
 5975 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5976 	#endif
 5977 	#ifdef CONFIG_NET_POLL_CONTROLLER
 5978 	#endif
 5979 	#endif 
 5980 	/* LDV_COMMENT_END_PREP */
 5981 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "macb_probe" */
 5982 	static int res_macb_probe_86;
 5983 	/* content: static int macb_remove(struct platform_device *pdev)*/
 5984 	/* LDV_COMMENT_BEGIN_PREP */
 5985 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 5986 	#define MACB_RX_BUFFER_SIZE	128
 5987 	#define RX_BUFFER_MULTIPLE	64  
 5988 	#define RX_RING_SIZE		512 
 5989 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 5990 	#define TX_RING_SIZE		128 
 5991 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 5992 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 5993 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 5994 				 | MACB_BIT(ISR_ROVR))
 5995 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 5996 					| MACB_BIT(ISR_RLE)		\
 5997 					| MACB_BIT(TXERR))
 5998 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 5999 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6000 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6001 	#define GEM_MTU_MIN_SIZE	68
 6002 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6003 	#define MACB_WOL_ENABLED		(0x1 << 1)
 6004 	#define MACB_HALT_TIMEOUT	1230
 6005 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6006 	#endif
 6007 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6008 	#endif
 6009 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6010 	#endif
 6011 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6012 	#endif
 6013 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6014 	#endif
 6015 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6016 	#endif
 6017 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6018 	#endif
 6019 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6020 	#endif
 6021 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6022 	#endif
 6023 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6024 	#endif
 6025 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6026 	#endif
 6027 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6028 	#endif
 6029 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6030 	#endif
 6031 	#if defined(CONFIG_OF)
 6032 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6033 	#define AT91ETHER_MAX_RX_DESCR	9
 6034 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6035 	#endif
 6036 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6037 	#endif
 6038 	#endif 
 6039 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6040 	#endif
 6041 	/* LDV_COMMENT_END_PREP */
 6042 
 6043 	/** CALLBACK SECTION request_irq **/
 6044 	/* content: static irqreturn_t at91ether_interrupt(int irq, void *dev_id)*/
 6045 	/* LDV_COMMENT_BEGIN_PREP */
 6046 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6047 	#define MACB_RX_BUFFER_SIZE	128
 6048 	#define RX_BUFFER_MULTIPLE	64  
 6049 	#define RX_RING_SIZE		512 
 6050 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6051 	#define TX_RING_SIZE		128 
 6052 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6053 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6054 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6055 				 | MACB_BIT(ISR_ROVR))
 6056 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6057 					| MACB_BIT(ISR_RLE)		\
 6058 					| MACB_BIT(TXERR))
 6059 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6060 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6061 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6062 	#define GEM_MTU_MIN_SIZE	68
 6063 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6064 	#define MACB_WOL_ENABLED		(0x1 << 1)
 6065 	#define MACB_HALT_TIMEOUT	1230
 6066 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6067 	#endif
 6068 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6069 	#endif
 6070 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6071 	#endif
 6072 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6073 	#endif
 6074 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6075 	#endif
 6076 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6077 	#endif
 6078 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6079 	#endif
 6080 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6081 	#endif
 6082 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6083 	#endif
 6084 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6085 	#endif
 6086 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6087 	#endif
 6088 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6089 	#endif
 6090 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6091 	#endif
 6092 	#if defined(CONFIG_OF)
 6093 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6094 	#define AT91ETHER_MAX_RX_DESCR	9
 6095 	/* LDV_COMMENT_END_PREP */
 6096 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "at91ether_interrupt" */
 6097 	int  var_at91ether_interrupt_82_p0;
 6098 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "at91ether_interrupt" */
 6099 	void * var_at91ether_interrupt_82_p1;
 6100 	/* LDV_COMMENT_BEGIN_PREP */
 6101 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6102 	#endif
 6103 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6104 	#endif
 6105 	#endif 
 6106 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6107 	#endif
 6108 	/* LDV_COMMENT_END_PREP */
 6109 	/* content: static irqreturn_t macb_interrupt(int irq, void *dev_id)*/
 6110 	/* LDV_COMMENT_BEGIN_PREP */
 6111 	#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6112 	#define MACB_RX_BUFFER_SIZE	128
 6113 	#define RX_BUFFER_MULTIPLE	64  
 6114 	#define RX_RING_SIZE		512 
 6115 	#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6116 	#define TX_RING_SIZE		128 
 6117 	#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6118 	#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6119 	#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6120 				 | MACB_BIT(ISR_ROVR))
 6121 	#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6122 					| MACB_BIT(ISR_RLE)		\
 6123 					| MACB_BIT(TXERR))
 6124 	#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6125 	#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6126 	#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6127 	#define GEM_MTU_MIN_SIZE	68
 6128 	#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6129 	#define MACB_WOL_ENABLED		(0x1 << 1)
 6130 	#define MACB_HALT_TIMEOUT	1230
 6131 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6132 	#endif
 6133 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6134 	#endif
 6135 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6136 	#endif
 6137 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6138 	#endif
 6139 	/* LDV_COMMENT_END_PREP */
 6140 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_interrupt" */
 6141 	int  var_macb_interrupt_34_p0;
 6142 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "macb_interrupt" */
 6143 	void * var_macb_interrupt_34_p1;
 6144 	/* LDV_COMMENT_BEGIN_PREP */
 6145 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6146 	#endif
 6147 	#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6148 	#endif
 6149 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6150 	#endif
 6151 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6152 	#endif
 6153 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6154 	#endif
 6155 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6156 	#endif
 6157 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6158 	#endif
 6159 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6160 	#endif
 6161 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6162 	#endif
 6163 	#if defined(CONFIG_OF)
 6164 	#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6165 	#define AT91ETHER_MAX_RX_DESCR	9
 6166 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6167 	#endif
 6168 	#ifdef CONFIG_NET_POLL_CONTROLLER
 6169 	#endif
 6170 	#endif 
 6171 	#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6172 	#endif
 6173 	/* LDV_COMMENT_END_PREP */
 6174 
 6175 
 6176 
 6177 
 6178 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 6179 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 6180 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 6181 	LDV_IN_INTERRUPT=1;
 6182 
 6183 
 6184 
 6185 
 6186 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 6187 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 6188 	/*============================= FUNCTION CALL SECTION       =============================*/
 6189 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 6190 	ldv_initialize();
 6191 	
 6192 
 6193 	
 6194 
 6195 	int ldv_s_macb_netdev_ops_net_device_ops = 0;
 6196 	
 6197 
 6198 	int ldv_s_at91ether_netdev_ops_net_device_ops = 0;
 6199 	
 6200 
 6201 	
 6202 
 6203 	
 6204 
 6205 	
 6206 
 6207 	
 6208 
 6209 	
 6210 
 6211 	
 6212 
 6213 	
 6214 
 6215 	
 6216 
 6217 	
 6218 
 6219 	int ldv_s_macb_driver_platform_driver = 0;
 6220 
 6221 	
 6222 
 6223 
 6224 	while(  nondet_int()
 6225 		|| !(ldv_s_macb_netdev_ops_net_device_ops == 0)
 6226 		|| !(ldv_s_at91ether_netdev_ops_net_device_ops == 0)
 6227 		|| !(ldv_s_macb_driver_platform_driver == 0)
 6228 	) {
 6229 
 6230 		switch(nondet_int()) {
 6231 
 6232 			case 0: {
 6233 
 6234 				/** STRUCT: struct type: ethtool_ops, struct name: macb_ethtool_ops **/
 6235 				
 6236 
 6237 				/* content: static int macb_get_regs_len(struct net_device *netdev)*/
 6238 				/* LDV_COMMENT_BEGIN_PREP */
 6239 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6240 				#define MACB_RX_BUFFER_SIZE	128
 6241 				#define RX_BUFFER_MULTIPLE	64  
 6242 				#define RX_RING_SIZE		512 
 6243 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6244 				#define TX_RING_SIZE		128 
 6245 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6246 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6247 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6248 				 | MACB_BIT(ISR_ROVR))
 6249 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6250 					| MACB_BIT(ISR_RLE)		\
 6251 					| MACB_BIT(TXERR))
 6252 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6253 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6254 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6255 				#define GEM_MTU_MIN_SIZE	68
 6256 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6257 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6258 				#define MACB_HALT_TIMEOUT	1230
 6259 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6260 				#endif
 6261 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6262 				#endif
 6263 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6264 				#endif
 6265 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6266 				#endif
 6267 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6268 				#endif
 6269 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6270 				#endif
 6271 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6272 				#endif
 6273 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6274 				#endif
 6275 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6276 				#endif
 6277 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6278 				#endif
 6279 				/* LDV_COMMENT_END_PREP */
 6280 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_regs_len" from driver structure with callbacks "macb_ethtool_ops" */
 6281 				ldv_handler_precall();
 6282 				macb_get_regs_len( var_group1);
 6283 				/* LDV_COMMENT_BEGIN_PREP */
 6284 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6285 				#endif
 6286 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6287 				#endif
 6288 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6289 				#endif
 6290 				#if defined(CONFIG_OF)
 6291 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6292 				#define AT91ETHER_MAX_RX_DESCR	9
 6293 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6294 				#endif
 6295 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6296 				#endif
 6297 				#endif 
 6298 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6299 				#endif
 6300 				/* LDV_COMMENT_END_PREP */
 6301 				
 6302 
 6303 				
 6304 
 6305 			}
 6306 
 6307 			break;
 6308 			case 1: {
 6309 
 6310 				/** STRUCT: struct type: ethtool_ops, struct name: macb_ethtool_ops **/
 6311 				
 6312 
 6313 				/* content: static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)*/
 6314 				/* LDV_COMMENT_BEGIN_PREP */
 6315 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6316 				#define MACB_RX_BUFFER_SIZE	128
 6317 				#define RX_BUFFER_MULTIPLE	64  
 6318 				#define RX_RING_SIZE		512 
 6319 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6320 				#define TX_RING_SIZE		128 
 6321 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6322 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6323 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6324 				 | MACB_BIT(ISR_ROVR))
 6325 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6326 					| MACB_BIT(ISR_RLE)		\
 6327 					| MACB_BIT(TXERR))
 6328 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6329 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6330 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6331 				#define GEM_MTU_MIN_SIZE	68
 6332 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6333 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6334 				#define MACB_HALT_TIMEOUT	1230
 6335 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6336 				#endif
 6337 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6338 				#endif
 6339 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6340 				#endif
 6341 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6342 				#endif
 6343 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6344 				#endif
 6345 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6346 				#endif
 6347 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6348 				#endif
 6349 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6350 				#endif
 6351 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6352 				#endif
 6353 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6354 				#endif
 6355 				/* LDV_COMMENT_END_PREP */
 6356 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_regs" from driver structure with callbacks "macb_ethtool_ops" */
 6357 				ldv_handler_precall();
 6358 				macb_get_regs( var_group1, var_group2, var_macb_get_regs_68_p2);
 6359 				/* LDV_COMMENT_BEGIN_PREP */
 6360 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6361 				#endif
 6362 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6363 				#endif
 6364 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6365 				#endif
 6366 				#if defined(CONFIG_OF)
 6367 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6368 				#define AT91ETHER_MAX_RX_DESCR	9
 6369 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6370 				#endif
 6371 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6372 				#endif
 6373 				#endif 
 6374 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6375 				#endif
 6376 				/* LDV_COMMENT_END_PREP */
 6377 				
 6378 
 6379 				
 6380 
 6381 			}
 6382 
 6383 			break;
 6384 			case 2: {
 6385 
 6386 				/** STRUCT: struct type: ethtool_ops, struct name: macb_ethtool_ops **/
 6387 				
 6388 
 6389 				/* content: static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)*/
 6390 				/* LDV_COMMENT_BEGIN_PREP */
 6391 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6392 				#define MACB_RX_BUFFER_SIZE	128
 6393 				#define RX_BUFFER_MULTIPLE	64  
 6394 				#define RX_RING_SIZE		512 
 6395 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6396 				#define TX_RING_SIZE		128 
 6397 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6398 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6399 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6400 				 | MACB_BIT(ISR_ROVR))
 6401 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6402 					| MACB_BIT(ISR_RLE)		\
 6403 					| MACB_BIT(TXERR))
 6404 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6405 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6406 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6407 				#define GEM_MTU_MIN_SIZE	68
 6408 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6409 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6410 				#define MACB_HALT_TIMEOUT	1230
 6411 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6412 				#endif
 6413 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6414 				#endif
 6415 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6416 				#endif
 6417 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6418 				#endif
 6419 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6420 				#endif
 6421 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6422 				#endif
 6423 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6424 				#endif
 6425 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6426 				#endif
 6427 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6428 				#endif
 6429 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6430 				#endif
 6431 				/* LDV_COMMENT_END_PREP */
 6432 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_wol" from driver structure with callbacks "macb_ethtool_ops" */
 6433 				ldv_handler_precall();
 6434 				macb_get_wol( var_group1, var_group3);
 6435 				/* LDV_COMMENT_BEGIN_PREP */
 6436 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6437 				#endif
 6438 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6439 				#endif
 6440 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6441 				#endif
 6442 				#if defined(CONFIG_OF)
 6443 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6444 				#define AT91ETHER_MAX_RX_DESCR	9
 6445 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6446 				#endif
 6447 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6448 				#endif
 6449 				#endif 
 6450 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6451 				#endif
 6452 				/* LDV_COMMENT_END_PREP */
 6453 				
 6454 
 6455 				
 6456 
 6457 			}
 6458 
 6459 			break;
 6460 			case 3: {
 6461 
 6462 				/** STRUCT: struct type: ethtool_ops, struct name: macb_ethtool_ops **/
 6463 				
 6464 
 6465 				/* content: static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)*/
 6466 				/* LDV_COMMENT_BEGIN_PREP */
 6467 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6468 				#define MACB_RX_BUFFER_SIZE	128
 6469 				#define RX_BUFFER_MULTIPLE	64  
 6470 				#define RX_RING_SIZE		512 
 6471 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6472 				#define TX_RING_SIZE		128 
 6473 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6474 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6475 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6476 				 | MACB_BIT(ISR_ROVR))
 6477 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6478 					| MACB_BIT(ISR_RLE)		\
 6479 					| MACB_BIT(TXERR))
 6480 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6481 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6482 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6483 				#define GEM_MTU_MIN_SIZE	68
 6484 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6485 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6486 				#define MACB_HALT_TIMEOUT	1230
 6487 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6488 				#endif
 6489 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6490 				#endif
 6491 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6492 				#endif
 6493 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6494 				#endif
 6495 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6496 				#endif
 6497 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6498 				#endif
 6499 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6500 				#endif
 6501 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6502 				#endif
 6503 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6504 				#endif
 6505 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6506 				#endif
 6507 				/* LDV_COMMENT_END_PREP */
 6508 				/* LDV_COMMENT_FUNCTION_CALL Function from field "set_wol" from driver structure with callbacks "macb_ethtool_ops" */
 6509 				ldv_handler_precall();
 6510 				macb_set_wol( var_group1, var_group3);
 6511 				/* LDV_COMMENT_BEGIN_PREP */
 6512 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6513 				#endif
 6514 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6515 				#endif
 6516 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6517 				#endif
 6518 				#if defined(CONFIG_OF)
 6519 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6520 				#define AT91ETHER_MAX_RX_DESCR	9
 6521 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6522 				#endif
 6523 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6524 				#endif
 6525 				#endif 
 6526 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6527 				#endif
 6528 				/* LDV_COMMENT_END_PREP */
 6529 				
 6530 
 6531 				
 6532 
 6533 			}
 6534 
 6535 			break;
 6536 			case 4: {
 6537 
 6538 				/** STRUCT: struct type: ethtool_ops, struct name: gem_ethtool_ops **/
 6539 				
 6540 
 6541 				/* content: static int macb_get_regs_len(struct net_device *netdev)*/
 6542 				/* LDV_COMMENT_BEGIN_PREP */
 6543 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6544 				#define MACB_RX_BUFFER_SIZE	128
 6545 				#define RX_BUFFER_MULTIPLE	64  
 6546 				#define RX_RING_SIZE		512 
 6547 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6548 				#define TX_RING_SIZE		128 
 6549 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6550 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6551 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6552 				 | MACB_BIT(ISR_ROVR))
 6553 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6554 					| MACB_BIT(ISR_RLE)		\
 6555 					| MACB_BIT(TXERR))
 6556 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6557 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6558 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6559 				#define GEM_MTU_MIN_SIZE	68
 6560 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6561 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6562 				#define MACB_HALT_TIMEOUT	1230
 6563 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6564 				#endif
 6565 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6566 				#endif
 6567 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6568 				#endif
 6569 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6570 				#endif
 6571 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6572 				#endif
 6573 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6574 				#endif
 6575 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6576 				#endif
 6577 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6578 				#endif
 6579 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6580 				#endif
 6581 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6582 				#endif
 6583 				/* LDV_COMMENT_END_PREP */
 6584 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_regs_len" from driver structure with callbacks "gem_ethtool_ops" */
 6585 				ldv_handler_precall();
 6586 				macb_get_regs_len( var_group1);
 6587 				/* LDV_COMMENT_BEGIN_PREP */
 6588 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6589 				#endif
 6590 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6591 				#endif
 6592 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6593 				#endif
 6594 				#if defined(CONFIG_OF)
 6595 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6596 				#define AT91ETHER_MAX_RX_DESCR	9
 6597 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6598 				#endif
 6599 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6600 				#endif
 6601 				#endif 
 6602 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6603 				#endif
 6604 				/* LDV_COMMENT_END_PREP */
 6605 				
 6606 
 6607 				
 6608 
 6609 			}
 6610 
 6611 			break;
 6612 			case 5: {
 6613 
 6614 				/** STRUCT: struct type: ethtool_ops, struct name: gem_ethtool_ops **/
 6615 				
 6616 
 6617 				/* content: static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)*/
 6618 				/* LDV_COMMENT_BEGIN_PREP */
 6619 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6620 				#define MACB_RX_BUFFER_SIZE	128
 6621 				#define RX_BUFFER_MULTIPLE	64  
 6622 				#define RX_RING_SIZE		512 
 6623 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6624 				#define TX_RING_SIZE		128 
 6625 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6626 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6627 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6628 				 | MACB_BIT(ISR_ROVR))
 6629 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6630 					| MACB_BIT(ISR_RLE)		\
 6631 					| MACB_BIT(TXERR))
 6632 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6633 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6634 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6635 				#define GEM_MTU_MIN_SIZE	68
 6636 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6637 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6638 				#define MACB_HALT_TIMEOUT	1230
 6639 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6640 				#endif
 6641 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6642 				#endif
 6643 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6644 				#endif
 6645 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6646 				#endif
 6647 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6648 				#endif
 6649 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6650 				#endif
 6651 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6652 				#endif
 6653 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6654 				#endif
 6655 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6656 				#endif
 6657 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6658 				#endif
 6659 				/* LDV_COMMENT_END_PREP */
 6660 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_regs" from driver structure with callbacks "gem_ethtool_ops" */
 6661 				ldv_handler_precall();
 6662 				macb_get_regs( var_group1, var_group2, var_macb_get_regs_68_p2);
 6663 				/* LDV_COMMENT_BEGIN_PREP */
 6664 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6665 				#endif
 6666 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6667 				#endif
 6668 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6669 				#endif
 6670 				#if defined(CONFIG_OF)
 6671 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6672 				#define AT91ETHER_MAX_RX_DESCR	9
 6673 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6674 				#endif
 6675 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6676 				#endif
 6677 				#endif 
 6678 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6679 				#endif
 6680 				/* LDV_COMMENT_END_PREP */
 6681 				
 6682 
 6683 				
 6684 
 6685 			}
 6686 
 6687 			break;
 6688 			case 6: {
 6689 
 6690 				/** STRUCT: struct type: ethtool_ops, struct name: gem_ethtool_ops **/
 6691 				
 6692 
 6693 				/* content: static void gem_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)*/
 6694 				/* LDV_COMMENT_BEGIN_PREP */
 6695 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6696 				#define MACB_RX_BUFFER_SIZE	128
 6697 				#define RX_BUFFER_MULTIPLE	64  
 6698 				#define RX_RING_SIZE		512 
 6699 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6700 				#define TX_RING_SIZE		128 
 6701 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6702 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6703 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6704 				 | MACB_BIT(ISR_ROVR))
 6705 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6706 					| MACB_BIT(ISR_RLE)		\
 6707 					| MACB_BIT(TXERR))
 6708 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6709 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6710 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6711 				#define GEM_MTU_MIN_SIZE	68
 6712 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6713 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6714 				#define MACB_HALT_TIMEOUT	1230
 6715 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6716 				#endif
 6717 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6718 				#endif
 6719 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6720 				#endif
 6721 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6722 				#endif
 6723 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6724 				#endif
 6725 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6726 				#endif
 6727 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6728 				#endif
 6729 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6730 				#endif
 6731 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6732 				#endif
 6733 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6734 				#endif
 6735 				/* LDV_COMMENT_END_PREP */
 6736 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_ethtool_stats" from driver structure with callbacks "gem_ethtool_ops" */
 6737 				ldv_handler_precall();
 6738 				gem_get_ethtool_stats( var_group1, var_group4, var_gem_get_ethtool_stats_63_p2);
 6739 				/* LDV_COMMENT_BEGIN_PREP */
 6740 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6741 				#endif
 6742 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6743 				#endif
 6744 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6745 				#endif
 6746 				#if defined(CONFIG_OF)
 6747 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6748 				#define AT91ETHER_MAX_RX_DESCR	9
 6749 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6750 				#endif
 6751 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6752 				#endif
 6753 				#endif 
 6754 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6755 				#endif
 6756 				/* LDV_COMMENT_END_PREP */
 6757 				
 6758 
 6759 				
 6760 
 6761 			}
 6762 
 6763 			break;
 6764 			case 7: {
 6765 
 6766 				/** STRUCT: struct type: ethtool_ops, struct name: gem_ethtool_ops **/
 6767 				
 6768 
 6769 				/* content: static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)*/
 6770 				/* LDV_COMMENT_BEGIN_PREP */
 6771 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6772 				#define MACB_RX_BUFFER_SIZE	128
 6773 				#define RX_BUFFER_MULTIPLE	64  
 6774 				#define RX_RING_SIZE		512 
 6775 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6776 				#define TX_RING_SIZE		128 
 6777 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6778 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6779 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6780 				 | MACB_BIT(ISR_ROVR))
 6781 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6782 					| MACB_BIT(ISR_RLE)		\
 6783 					| MACB_BIT(TXERR))
 6784 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6785 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6786 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6787 				#define GEM_MTU_MIN_SIZE	68
 6788 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6789 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6790 				#define MACB_HALT_TIMEOUT	1230
 6791 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6792 				#endif
 6793 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6794 				#endif
 6795 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6796 				#endif
 6797 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6798 				#endif
 6799 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6800 				#endif
 6801 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6802 				#endif
 6803 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6804 				#endif
 6805 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6806 				#endif
 6807 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6808 				#endif
 6809 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6810 				#endif
 6811 				/* LDV_COMMENT_END_PREP */
 6812 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_strings" from driver structure with callbacks "gem_ethtool_ops" */
 6813 				ldv_handler_precall();
 6814 				gem_get_ethtool_strings( var_group1, var_gem_get_ethtool_strings_65_p1, var_gem_get_ethtool_strings_65_p2);
 6815 				/* LDV_COMMENT_BEGIN_PREP */
 6816 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6817 				#endif
 6818 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6819 				#endif
 6820 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6821 				#endif
 6822 				#if defined(CONFIG_OF)
 6823 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6824 				#define AT91ETHER_MAX_RX_DESCR	9
 6825 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6826 				#endif
 6827 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6828 				#endif
 6829 				#endif 
 6830 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6831 				#endif
 6832 				/* LDV_COMMENT_END_PREP */
 6833 				
 6834 
 6835 				
 6836 
 6837 			}
 6838 
 6839 			break;
 6840 			case 8: {
 6841 
 6842 				/** STRUCT: struct type: ethtool_ops, struct name: gem_ethtool_ops **/
 6843 				
 6844 
 6845 				/* content: static int gem_get_sset_count(struct net_device *dev, int sset)*/
 6846 				/* LDV_COMMENT_BEGIN_PREP */
 6847 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6848 				#define MACB_RX_BUFFER_SIZE	128
 6849 				#define RX_BUFFER_MULTIPLE	64  
 6850 				#define RX_RING_SIZE		512 
 6851 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6852 				#define TX_RING_SIZE		128 
 6853 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6854 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6855 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6856 				 | MACB_BIT(ISR_ROVR))
 6857 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6858 					| MACB_BIT(ISR_RLE)		\
 6859 					| MACB_BIT(TXERR))
 6860 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6861 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6862 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6863 				#define GEM_MTU_MIN_SIZE	68
 6864 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6865 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6866 				#define MACB_HALT_TIMEOUT	1230
 6867 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6868 				#endif
 6869 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6870 				#endif
 6871 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6872 				#endif
 6873 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6874 				#endif
 6875 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6876 				#endif
 6877 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6878 				#endif
 6879 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6880 				#endif
 6881 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6882 				#endif
 6883 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6884 				#endif
 6885 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6886 				#endif
 6887 				/* LDV_COMMENT_END_PREP */
 6888 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_sset_count" from driver structure with callbacks "gem_ethtool_ops" */
 6889 				ldv_handler_precall();
 6890 				gem_get_sset_count( var_group1, var_gem_get_sset_count_64_p1);
 6891 				/* LDV_COMMENT_BEGIN_PREP */
 6892 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6893 				#endif
 6894 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6895 				#endif
 6896 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6897 				#endif
 6898 				#if defined(CONFIG_OF)
 6899 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6900 				#define AT91ETHER_MAX_RX_DESCR	9
 6901 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6902 				#endif
 6903 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6904 				#endif
 6905 				#endif 
 6906 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6907 				#endif
 6908 				/* LDV_COMMENT_END_PREP */
 6909 				
 6910 
 6911 				
 6912 
 6913 			}
 6914 
 6915 			break;
 6916 			case 9: {
 6917 
 6918 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 6919 				if(ldv_s_macb_netdev_ops_net_device_ops==0) {
 6920 
 6921 				/* content: static int macb_open(struct net_device *dev)*/
 6922 				/* LDV_COMMENT_BEGIN_PREP */
 6923 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 6924 				#define MACB_RX_BUFFER_SIZE	128
 6925 				#define RX_BUFFER_MULTIPLE	64  
 6926 				#define RX_RING_SIZE		512 
 6927 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 6928 				#define TX_RING_SIZE		128 
 6929 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 6930 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 6931 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 6932 				 | MACB_BIT(ISR_ROVR))
 6933 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 6934 					| MACB_BIT(ISR_RLE)		\
 6935 					| MACB_BIT(TXERR))
 6936 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 6937 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 6938 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 6939 				#define GEM_MTU_MIN_SIZE	68
 6940 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 6941 				#define MACB_WOL_ENABLED		(0x1 << 1)
 6942 				#define MACB_HALT_TIMEOUT	1230
 6943 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6944 				#endif
 6945 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6946 				#endif
 6947 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6948 				#endif
 6949 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6950 				#endif
 6951 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6952 				#endif
 6953 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 6954 				#endif
 6955 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6956 				#endif
 6957 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6958 				#endif
 6959 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6960 				#endif
 6961 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6962 				#endif
 6963 				/* LDV_COMMENT_END_PREP */
 6964 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "macb_netdev_ops". Standart function test for correct return result. */
 6965 				ldv_handler_precall();
 6966 				res_macb_open_58 = macb_open( var_group1);
 6967 				 ldv_check_return_value(res_macb_open_58);
 6968 				 if(res_macb_open_58 < 0) 
 6969 					goto ldv_module_exit;
 6970 				/* LDV_COMMENT_BEGIN_PREP */
 6971 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6972 				#endif
 6973 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6974 				#endif
 6975 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6976 				#endif
 6977 				#if defined(CONFIG_OF)
 6978 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 6979 				#define AT91ETHER_MAX_RX_DESCR	9
 6980 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6981 				#endif
 6982 				#ifdef CONFIG_NET_POLL_CONTROLLER
 6983 				#endif
 6984 				#endif 
 6985 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 6986 				#endif
 6987 				/* LDV_COMMENT_END_PREP */
 6988 				ldv_s_macb_netdev_ops_net_device_ops++;
 6989 
 6990 				}
 6991 
 6992 			}
 6993 
 6994 			break;
 6995 			case 10: {
 6996 
 6997 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 6998 				if(ldv_s_macb_netdev_ops_net_device_ops==1) {
 6999 
 7000 				/* content: static int macb_close(struct net_device *dev)*/
 7001 				/* LDV_COMMENT_BEGIN_PREP */
 7002 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7003 				#define MACB_RX_BUFFER_SIZE	128
 7004 				#define RX_BUFFER_MULTIPLE	64  
 7005 				#define RX_RING_SIZE		512 
 7006 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7007 				#define TX_RING_SIZE		128 
 7008 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7009 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7010 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7011 				 | MACB_BIT(ISR_ROVR))
 7012 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7013 					| MACB_BIT(ISR_RLE)		\
 7014 					| MACB_BIT(TXERR))
 7015 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7016 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7017 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7018 				#define GEM_MTU_MIN_SIZE	68
 7019 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7020 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7021 				#define MACB_HALT_TIMEOUT	1230
 7022 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7023 				#endif
 7024 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7025 				#endif
 7026 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7027 				#endif
 7028 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7029 				#endif
 7030 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7031 				#endif
 7032 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7033 				#endif
 7034 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7035 				#endif
 7036 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7037 				#endif
 7038 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7039 				#endif
 7040 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7041 				#endif
 7042 				/* LDV_COMMENT_END_PREP */
 7043 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "macb_netdev_ops". Standart function test for correct return result. */
 7044 				ldv_handler_precall();
 7045 				res_macb_close_59 = macb_close( var_group1);
 7046 				 ldv_check_return_value(res_macb_close_59);
 7047 				 if(res_macb_close_59) 
 7048 					goto ldv_module_exit;
 7049 				/* LDV_COMMENT_BEGIN_PREP */
 7050 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7051 				#endif
 7052 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7053 				#endif
 7054 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7055 				#endif
 7056 				#if defined(CONFIG_OF)
 7057 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7058 				#define AT91ETHER_MAX_RX_DESCR	9
 7059 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7060 				#endif
 7061 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7062 				#endif
 7063 				#endif 
 7064 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7065 				#endif
 7066 				/* LDV_COMMENT_END_PREP */
 7067 				ldv_s_macb_netdev_ops_net_device_ops=0;
 7068 
 7069 				}
 7070 
 7071 			}
 7072 
 7073 			break;
 7074 			case 11: {
 7075 
 7076 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 7077 				
 7078 
 7079 				/* content: static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)*/
 7080 				/* LDV_COMMENT_BEGIN_PREP */
 7081 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7082 				#define MACB_RX_BUFFER_SIZE	128
 7083 				#define RX_BUFFER_MULTIPLE	64  
 7084 				#define RX_RING_SIZE		512 
 7085 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7086 				#define TX_RING_SIZE		128 
 7087 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7088 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7089 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7090 				 | MACB_BIT(ISR_ROVR))
 7091 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7092 					| MACB_BIT(ISR_RLE)		\
 7093 					| MACB_BIT(TXERR))
 7094 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7095 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7096 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7097 				#define GEM_MTU_MIN_SIZE	68
 7098 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7099 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7100 				#define MACB_HALT_TIMEOUT	1230
 7101 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7102 				#endif
 7103 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7104 				#endif
 7105 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7106 				#endif
 7107 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7108 				#endif
 7109 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7110 				#endif
 7111 				/* LDV_COMMENT_END_PREP */
 7112 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_start_xmit" from driver structure with callbacks "macb_netdev_ops" */
 7113 				ldv_handler_precall();
 7114 				macb_start_xmit( var_group5, var_group1);
 7115 				/* LDV_COMMENT_BEGIN_PREP */
 7116 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7117 				#endif
 7118 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7119 				#endif
 7120 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7121 				#endif
 7122 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7123 				#endif
 7124 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7125 				#endif
 7126 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7127 				#endif
 7128 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7129 				#endif
 7130 				#if defined(CONFIG_OF)
 7131 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7132 				#define AT91ETHER_MAX_RX_DESCR	9
 7133 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7134 				#endif
 7135 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7136 				#endif
 7137 				#endif 
 7138 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7139 				#endif
 7140 				/* LDV_COMMENT_END_PREP */
 7141 				
 7142 
 7143 				
 7144 
 7145 			}
 7146 
 7147 			break;
 7148 			case 12: {
 7149 
 7150 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 7151 				
 7152 
 7153 				/* content: static void macb_set_rx_mode(struct net_device *dev)*/
 7154 				/* LDV_COMMENT_BEGIN_PREP */
 7155 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7156 				#define MACB_RX_BUFFER_SIZE	128
 7157 				#define RX_BUFFER_MULTIPLE	64  
 7158 				#define RX_RING_SIZE		512 
 7159 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7160 				#define TX_RING_SIZE		128 
 7161 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7162 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7163 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7164 				 | MACB_BIT(ISR_ROVR))
 7165 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7166 					| MACB_BIT(ISR_RLE)		\
 7167 					| MACB_BIT(TXERR))
 7168 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7169 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7170 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7171 				#define GEM_MTU_MIN_SIZE	68
 7172 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7173 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7174 				#define MACB_HALT_TIMEOUT	1230
 7175 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7176 				#endif
 7177 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7178 				#endif
 7179 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7180 				#endif
 7181 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7182 				#endif
 7183 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7184 				#endif
 7185 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7186 				#endif
 7187 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7188 				#endif
 7189 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7190 				#endif
 7191 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7192 				#endif
 7193 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7194 				#endif
 7195 				/* LDV_COMMENT_END_PREP */
 7196 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_set_rx_mode" from driver structure with callbacks "macb_netdev_ops" */
 7197 				ldv_handler_precall();
 7198 				macb_set_rx_mode( var_group1);
 7199 				/* LDV_COMMENT_BEGIN_PREP */
 7200 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7201 				#endif
 7202 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7203 				#endif
 7204 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7205 				#endif
 7206 				#if defined(CONFIG_OF)
 7207 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7208 				#define AT91ETHER_MAX_RX_DESCR	9
 7209 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7210 				#endif
 7211 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7212 				#endif
 7213 				#endif 
 7214 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7215 				#endif
 7216 				/* LDV_COMMENT_END_PREP */
 7217 				
 7218 
 7219 				
 7220 
 7221 			}
 7222 
 7223 			break;
 7224 			case 13: {
 7225 
 7226 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 7227 				
 7228 
 7229 				/* content: static struct net_device_stats *macb_get_stats(struct net_device *dev)*/
 7230 				/* LDV_COMMENT_BEGIN_PREP */
 7231 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7232 				#define MACB_RX_BUFFER_SIZE	128
 7233 				#define RX_BUFFER_MULTIPLE	64  
 7234 				#define RX_RING_SIZE		512 
 7235 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7236 				#define TX_RING_SIZE		128 
 7237 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7238 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7239 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7240 				 | MACB_BIT(ISR_ROVR))
 7241 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7242 					| MACB_BIT(ISR_RLE)		\
 7243 					| MACB_BIT(TXERR))
 7244 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7245 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7246 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7247 				#define GEM_MTU_MIN_SIZE	68
 7248 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7249 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7250 				#define MACB_HALT_TIMEOUT	1230
 7251 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7252 				#endif
 7253 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7254 				#endif
 7255 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7256 				#endif
 7257 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7258 				#endif
 7259 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7260 				#endif
 7261 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7262 				#endif
 7263 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7264 				#endif
 7265 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7266 				#endif
 7267 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7268 				#endif
 7269 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7270 				#endif
 7271 				/* LDV_COMMENT_END_PREP */
 7272 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_get_stats" from driver structure with callbacks "macb_netdev_ops" */
 7273 				ldv_handler_precall();
 7274 				macb_get_stats( var_group1);
 7275 				/* LDV_COMMENT_BEGIN_PREP */
 7276 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7277 				#endif
 7278 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7279 				#endif
 7280 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7281 				#endif
 7282 				#if defined(CONFIG_OF)
 7283 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7284 				#define AT91ETHER_MAX_RX_DESCR	9
 7285 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7286 				#endif
 7287 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7288 				#endif
 7289 				#endif 
 7290 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7291 				#endif
 7292 				/* LDV_COMMENT_END_PREP */
 7293 				
 7294 
 7295 				
 7296 
 7297 			}
 7298 
 7299 			break;
 7300 			case 14: {
 7301 
 7302 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 7303 				
 7304 
 7305 				/* content: static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/
 7306 				/* LDV_COMMENT_BEGIN_PREP */
 7307 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7308 				#define MACB_RX_BUFFER_SIZE	128
 7309 				#define RX_BUFFER_MULTIPLE	64  
 7310 				#define RX_RING_SIZE		512 
 7311 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7312 				#define TX_RING_SIZE		128 
 7313 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7314 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7315 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7316 				 | MACB_BIT(ISR_ROVR))
 7317 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7318 					| MACB_BIT(ISR_RLE)		\
 7319 					| MACB_BIT(TXERR))
 7320 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7321 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7322 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7323 				#define GEM_MTU_MIN_SIZE	68
 7324 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7325 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7326 				#define MACB_HALT_TIMEOUT	1230
 7327 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7328 				#endif
 7329 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7330 				#endif
 7331 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7332 				#endif
 7333 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7334 				#endif
 7335 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7336 				#endif
 7337 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7338 				#endif
 7339 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7340 				#endif
 7341 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7342 				#endif
 7343 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7344 				#endif
 7345 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7346 				#endif
 7347 				/* LDV_COMMENT_END_PREP */
 7348 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_do_ioctl" from driver structure with callbacks "macb_netdev_ops" */
 7349 				ldv_handler_precall();
 7350 				macb_ioctl( var_group1, var_group6, var_macb_ioctl_71_p2);
 7351 				/* LDV_COMMENT_BEGIN_PREP */
 7352 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7353 				#endif
 7354 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7355 				#endif
 7356 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7357 				#endif
 7358 				#if defined(CONFIG_OF)
 7359 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7360 				#define AT91ETHER_MAX_RX_DESCR	9
 7361 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7362 				#endif
 7363 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7364 				#endif
 7365 				#endif 
 7366 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7367 				#endif
 7368 				/* LDV_COMMENT_END_PREP */
 7369 				
 7370 
 7371 				
 7372 
 7373 			}
 7374 
 7375 			break;
 7376 			case 15: {
 7377 
 7378 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 7379 				
 7380 
 7381 				/* content: static int macb_change_mtu(struct net_device *dev, int new_mtu)*/
 7382 				/* LDV_COMMENT_BEGIN_PREP */
 7383 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7384 				#define MACB_RX_BUFFER_SIZE	128
 7385 				#define RX_BUFFER_MULTIPLE	64  
 7386 				#define RX_RING_SIZE		512 
 7387 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7388 				#define TX_RING_SIZE		128 
 7389 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7390 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7391 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7392 				 | MACB_BIT(ISR_ROVR))
 7393 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7394 					| MACB_BIT(ISR_RLE)		\
 7395 					| MACB_BIT(TXERR))
 7396 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7397 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7398 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7399 				#define GEM_MTU_MIN_SIZE	68
 7400 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7401 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7402 				#define MACB_HALT_TIMEOUT	1230
 7403 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7404 				#endif
 7405 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7406 				#endif
 7407 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7408 				#endif
 7409 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7410 				#endif
 7411 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7412 				#endif
 7413 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7414 				#endif
 7415 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7416 				#endif
 7417 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7418 				#endif
 7419 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7420 				#endif
 7421 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7422 				#endif
 7423 				/* LDV_COMMENT_END_PREP */
 7424 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_change_mtu" from driver structure with callbacks "macb_netdev_ops" */
 7425 				ldv_handler_precall();
 7426 				macb_change_mtu( var_group1, var_macb_change_mtu_60_p1);
 7427 				/* LDV_COMMENT_BEGIN_PREP */
 7428 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7429 				#endif
 7430 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7431 				#endif
 7432 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7433 				#endif
 7434 				#if defined(CONFIG_OF)
 7435 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7436 				#define AT91ETHER_MAX_RX_DESCR	9
 7437 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7438 				#endif
 7439 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7440 				#endif
 7441 				#endif 
 7442 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7443 				#endif
 7444 				/* LDV_COMMENT_END_PREP */
 7445 				
 7446 
 7447 				
 7448 
 7449 			}
 7450 
 7451 			break;
 7452 			case 16: {
 7453 
 7454 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 7455 				
 7456 
 7457 				/* content: static void macb_poll_controller(struct net_device *dev)*/
 7458 				/* LDV_COMMENT_BEGIN_PREP */
 7459 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7460 				#define MACB_RX_BUFFER_SIZE	128
 7461 				#define RX_BUFFER_MULTIPLE	64  
 7462 				#define RX_RING_SIZE		512 
 7463 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7464 				#define TX_RING_SIZE		128 
 7465 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7466 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7467 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7468 				 | MACB_BIT(ISR_ROVR))
 7469 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7470 					| MACB_BIT(ISR_RLE)		\
 7471 					| MACB_BIT(TXERR))
 7472 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7473 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7474 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7475 				#define GEM_MTU_MIN_SIZE	68
 7476 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7477 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7478 				#define MACB_HALT_TIMEOUT	1230
 7479 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7480 				#endif
 7481 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7482 				#endif
 7483 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7484 				#endif
 7485 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7486 				#endif
 7487 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7488 				/* LDV_COMMENT_END_PREP */
 7489 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_poll_controller" from driver structure with callbacks "macb_netdev_ops" */
 7490 				ldv_handler_precall();
 7491 				macb_poll_controller( var_group1);
 7492 				/* LDV_COMMENT_BEGIN_PREP */
 7493 				#endif
 7494 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7495 				#endif
 7496 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7497 				#endif
 7498 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7499 				#endif
 7500 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7501 				#endif
 7502 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7503 				#endif
 7504 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7505 				#endif
 7506 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7507 				#endif
 7508 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7509 				#endif
 7510 				#if defined(CONFIG_OF)
 7511 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7512 				#define AT91ETHER_MAX_RX_DESCR	9
 7513 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7514 				#endif
 7515 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7516 				#endif
 7517 				#endif 
 7518 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7519 				#endif
 7520 				/* LDV_COMMENT_END_PREP */
 7521 				
 7522 
 7523 				
 7524 
 7525 			}
 7526 
 7527 			break;
 7528 			case 17: {
 7529 
 7530 				/** STRUCT: struct type: net_device_ops, struct name: macb_netdev_ops **/
 7531 				
 7532 
 7533 				/* content: static int macb_set_features(struct net_device *netdev, netdev_features_t features)*/
 7534 				/* LDV_COMMENT_BEGIN_PREP */
 7535 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7536 				#define MACB_RX_BUFFER_SIZE	128
 7537 				#define RX_BUFFER_MULTIPLE	64  
 7538 				#define RX_RING_SIZE		512 
 7539 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7540 				#define TX_RING_SIZE		128 
 7541 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7542 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7543 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7544 				 | MACB_BIT(ISR_ROVR))
 7545 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7546 					| MACB_BIT(ISR_RLE)		\
 7547 					| MACB_BIT(TXERR))
 7548 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7549 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7550 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7551 				#define GEM_MTU_MIN_SIZE	68
 7552 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7553 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7554 				#define MACB_HALT_TIMEOUT	1230
 7555 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7556 				#endif
 7557 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7558 				#endif
 7559 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7560 				#endif
 7561 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7562 				#endif
 7563 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7564 				#endif
 7565 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7566 				#endif
 7567 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7568 				#endif
 7569 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7570 				#endif
 7571 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7572 				#endif
 7573 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7574 				#endif
 7575 				/* LDV_COMMENT_END_PREP */
 7576 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_set_features" from driver structure with callbacks "macb_netdev_ops" */
 7577 				ldv_handler_precall();
 7578 				macb_set_features( var_group1, var_macb_set_features_72_p1);
 7579 				/* LDV_COMMENT_BEGIN_PREP */
 7580 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7581 				#endif
 7582 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7583 				#endif
 7584 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7585 				#endif
 7586 				#if defined(CONFIG_OF)
 7587 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7588 				#define AT91ETHER_MAX_RX_DESCR	9
 7589 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7590 				#endif
 7591 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7592 				#endif
 7593 				#endif 
 7594 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7595 				#endif
 7596 				/* LDV_COMMENT_END_PREP */
 7597 				
 7598 
 7599 				
 7600 
 7601 			}
 7602 
 7603 			break;
 7604 			case 18: {
 7605 
 7606 				/** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/
 7607 				if(ldv_s_at91ether_netdev_ops_net_device_ops==0) {
 7608 
 7609 				/* content: static int at91ether_open(struct net_device *dev)*/
 7610 				/* LDV_COMMENT_BEGIN_PREP */
 7611 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7612 				#define MACB_RX_BUFFER_SIZE	128
 7613 				#define RX_BUFFER_MULTIPLE	64  
 7614 				#define RX_RING_SIZE		512 
 7615 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7616 				#define TX_RING_SIZE		128 
 7617 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7618 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7619 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7620 				 | MACB_BIT(ISR_ROVR))
 7621 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7622 					| MACB_BIT(ISR_RLE)		\
 7623 					| MACB_BIT(TXERR))
 7624 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7625 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7626 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7627 				#define GEM_MTU_MIN_SIZE	68
 7628 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7629 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7630 				#define MACB_HALT_TIMEOUT	1230
 7631 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7632 				#endif
 7633 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7634 				#endif
 7635 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7636 				#endif
 7637 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7638 				#endif
 7639 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7640 				#endif
 7641 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7642 				#endif
 7643 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7644 				#endif
 7645 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7646 				#endif
 7647 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7648 				#endif
 7649 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7650 				#endif
 7651 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7652 				#endif
 7653 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7654 				#endif
 7655 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7656 				#endif
 7657 				#if defined(CONFIG_OF)
 7658 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7659 				#define AT91ETHER_MAX_RX_DESCR	9
 7660 				/* LDV_COMMENT_END_PREP */
 7661 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "at91ether_netdev_ops". Standart function test for correct return result. */
 7662 				ldv_handler_precall();
 7663 				res_at91ether_open_78 = at91ether_open( var_group1);
 7664 				 ldv_check_return_value(res_at91ether_open_78);
 7665 				 if(res_at91ether_open_78 < 0) 
 7666 					goto ldv_module_exit;
 7667 				/* LDV_COMMENT_BEGIN_PREP */
 7668 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7669 				#endif
 7670 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7671 				#endif
 7672 				#endif 
 7673 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7674 				#endif
 7675 				/* LDV_COMMENT_END_PREP */
 7676 				ldv_s_at91ether_netdev_ops_net_device_ops++;
 7677 
 7678 				}
 7679 
 7680 			}
 7681 
 7682 			break;
 7683 			case 19: {
 7684 
 7685 				/** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/
 7686 				if(ldv_s_at91ether_netdev_ops_net_device_ops==1) {
 7687 
 7688 				/* content: static int at91ether_close(struct net_device *dev)*/
 7689 				/* LDV_COMMENT_BEGIN_PREP */
 7690 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7691 				#define MACB_RX_BUFFER_SIZE	128
 7692 				#define RX_BUFFER_MULTIPLE	64  
 7693 				#define RX_RING_SIZE		512 
 7694 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7695 				#define TX_RING_SIZE		128 
 7696 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7697 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7698 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7699 				 | MACB_BIT(ISR_ROVR))
 7700 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7701 					| MACB_BIT(ISR_RLE)		\
 7702 					| MACB_BIT(TXERR))
 7703 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7704 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7705 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7706 				#define GEM_MTU_MIN_SIZE	68
 7707 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7708 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7709 				#define MACB_HALT_TIMEOUT	1230
 7710 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7711 				#endif
 7712 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7713 				#endif
 7714 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7715 				#endif
 7716 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7717 				#endif
 7718 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7719 				#endif
 7720 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7721 				#endif
 7722 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7723 				#endif
 7724 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7725 				#endif
 7726 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7727 				#endif
 7728 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7729 				#endif
 7730 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7731 				#endif
 7732 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7733 				#endif
 7734 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7735 				#endif
 7736 				#if defined(CONFIG_OF)
 7737 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7738 				#define AT91ETHER_MAX_RX_DESCR	9
 7739 				/* LDV_COMMENT_END_PREP */
 7740 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "at91ether_netdev_ops". Standart function test for correct return result. */
 7741 				ldv_handler_precall();
 7742 				res_at91ether_close_79 = at91ether_close( var_group1);
 7743 				 ldv_check_return_value(res_at91ether_close_79);
 7744 				 if(res_at91ether_close_79) 
 7745 					goto ldv_module_exit;
 7746 				/* LDV_COMMENT_BEGIN_PREP */
 7747 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7748 				#endif
 7749 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7750 				#endif
 7751 				#endif 
 7752 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7753 				#endif
 7754 				/* LDV_COMMENT_END_PREP */
 7755 				ldv_s_at91ether_netdev_ops_net_device_ops=0;
 7756 
 7757 				}
 7758 
 7759 			}
 7760 
 7761 			break;
 7762 			case 20: {
 7763 
 7764 				/** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/
 7765 				
 7766 
 7767 				/* content: static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)*/
 7768 				/* LDV_COMMENT_BEGIN_PREP */
 7769 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7770 				#define MACB_RX_BUFFER_SIZE	128
 7771 				#define RX_BUFFER_MULTIPLE	64  
 7772 				#define RX_RING_SIZE		512 
 7773 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7774 				#define TX_RING_SIZE		128 
 7775 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7776 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7777 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7778 				 | MACB_BIT(ISR_ROVR))
 7779 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7780 					| MACB_BIT(ISR_RLE)		\
 7781 					| MACB_BIT(TXERR))
 7782 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7783 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7784 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7785 				#define GEM_MTU_MIN_SIZE	68
 7786 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7787 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7788 				#define MACB_HALT_TIMEOUT	1230
 7789 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7790 				#endif
 7791 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7792 				#endif
 7793 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7794 				#endif
 7795 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7796 				#endif
 7797 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7798 				#endif
 7799 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7800 				#endif
 7801 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7802 				#endif
 7803 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7804 				#endif
 7805 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7806 				#endif
 7807 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7808 				#endif
 7809 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7810 				#endif
 7811 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7812 				#endif
 7813 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7814 				#endif
 7815 				#if defined(CONFIG_OF)
 7816 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7817 				#define AT91ETHER_MAX_RX_DESCR	9
 7818 				/* LDV_COMMENT_END_PREP */
 7819 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_start_xmit" from driver structure with callbacks "at91ether_netdev_ops" */
 7820 				ldv_handler_precall();
 7821 				at91ether_start_xmit( var_group5, var_group1);
 7822 				/* LDV_COMMENT_BEGIN_PREP */
 7823 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7824 				#endif
 7825 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7826 				#endif
 7827 				#endif 
 7828 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7829 				#endif
 7830 				/* LDV_COMMENT_END_PREP */
 7831 				
 7832 
 7833 				
 7834 
 7835 			}
 7836 
 7837 			break;
 7838 			case 21: {
 7839 
 7840 				/** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/
 7841 				
 7842 
 7843 				/* content: static struct net_device_stats *macb_get_stats(struct net_device *dev)*/
 7844 				/* LDV_COMMENT_BEGIN_PREP */
 7845 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7846 				#define MACB_RX_BUFFER_SIZE	128
 7847 				#define RX_BUFFER_MULTIPLE	64  
 7848 				#define RX_RING_SIZE		512 
 7849 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7850 				#define TX_RING_SIZE		128 
 7851 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7852 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7853 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7854 				 | MACB_BIT(ISR_ROVR))
 7855 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7856 					| MACB_BIT(ISR_RLE)		\
 7857 					| MACB_BIT(TXERR))
 7858 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7859 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7860 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7861 				#define GEM_MTU_MIN_SIZE	68
 7862 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7863 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7864 				#define MACB_HALT_TIMEOUT	1230
 7865 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7866 				#endif
 7867 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7868 				#endif
 7869 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7870 				#endif
 7871 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7872 				#endif
 7873 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7874 				#endif
 7875 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7876 				#endif
 7877 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7878 				#endif
 7879 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7880 				#endif
 7881 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7882 				#endif
 7883 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7884 				#endif
 7885 				/* LDV_COMMENT_END_PREP */
 7886 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_get_stats" from driver structure with callbacks "at91ether_netdev_ops" */
 7887 				ldv_handler_precall();
 7888 				macb_get_stats( var_group1);
 7889 				/* LDV_COMMENT_BEGIN_PREP */
 7890 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7891 				#endif
 7892 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7893 				#endif
 7894 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7895 				#endif
 7896 				#if defined(CONFIG_OF)
 7897 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7898 				#define AT91ETHER_MAX_RX_DESCR	9
 7899 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7900 				#endif
 7901 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7902 				#endif
 7903 				#endif 
 7904 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7905 				#endif
 7906 				/* LDV_COMMENT_END_PREP */
 7907 				
 7908 
 7909 				
 7910 
 7911 			}
 7912 
 7913 			break;
 7914 			case 22: {
 7915 
 7916 				/** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/
 7917 				
 7918 
 7919 				/* content: static void macb_set_rx_mode(struct net_device *dev)*/
 7920 				/* LDV_COMMENT_BEGIN_PREP */
 7921 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7922 				#define MACB_RX_BUFFER_SIZE	128
 7923 				#define RX_BUFFER_MULTIPLE	64  
 7924 				#define RX_RING_SIZE		512 
 7925 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 7926 				#define TX_RING_SIZE		128 
 7927 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 7928 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 7929 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 7930 				 | MACB_BIT(ISR_ROVR))
 7931 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 7932 					| MACB_BIT(ISR_RLE)		\
 7933 					| MACB_BIT(TXERR))
 7934 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 7935 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 7936 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 7937 				#define GEM_MTU_MIN_SIZE	68
 7938 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 7939 				#define MACB_WOL_ENABLED		(0x1 << 1)
 7940 				#define MACB_HALT_TIMEOUT	1230
 7941 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7942 				#endif
 7943 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7944 				#endif
 7945 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7946 				#endif
 7947 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7948 				#endif
 7949 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7950 				#endif
 7951 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 7952 				#endif
 7953 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7954 				#endif
 7955 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7956 				#endif
 7957 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7958 				#endif
 7959 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7960 				#endif
 7961 				/* LDV_COMMENT_END_PREP */
 7962 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_set_rx_mode" from driver structure with callbacks "at91ether_netdev_ops" */
 7963 				ldv_handler_precall();
 7964 				macb_set_rx_mode( var_group1);
 7965 				/* LDV_COMMENT_BEGIN_PREP */
 7966 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7967 				#endif
 7968 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7969 				#endif
 7970 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7971 				#endif
 7972 				#if defined(CONFIG_OF)
 7973 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 7974 				#define AT91ETHER_MAX_RX_DESCR	9
 7975 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7976 				#endif
 7977 				#ifdef CONFIG_NET_POLL_CONTROLLER
 7978 				#endif
 7979 				#endif 
 7980 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 7981 				#endif
 7982 				/* LDV_COMMENT_END_PREP */
 7983 				
 7984 
 7985 				
 7986 
 7987 			}
 7988 
 7989 			break;
 7990 			case 23: {
 7991 
 7992 				/** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/
 7993 				
 7994 
 7995 				/* content: static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/
 7996 				/* LDV_COMMENT_BEGIN_PREP */
 7997 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 7998 				#define MACB_RX_BUFFER_SIZE	128
 7999 				#define RX_BUFFER_MULTIPLE	64  
 8000 				#define RX_RING_SIZE		512 
 8001 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8002 				#define TX_RING_SIZE		128 
 8003 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8004 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8005 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8006 				 | MACB_BIT(ISR_ROVR))
 8007 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8008 					| MACB_BIT(ISR_RLE)		\
 8009 					| MACB_BIT(TXERR))
 8010 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8011 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8012 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8013 				#define GEM_MTU_MIN_SIZE	68
 8014 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8015 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8016 				#define MACB_HALT_TIMEOUT	1230
 8017 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8018 				#endif
 8019 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8020 				#endif
 8021 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8022 				#endif
 8023 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8024 				#endif
 8025 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8026 				#endif
 8027 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8028 				#endif
 8029 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8030 				#endif
 8031 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8032 				#endif
 8033 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8034 				#endif
 8035 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8036 				#endif
 8037 				/* LDV_COMMENT_END_PREP */
 8038 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_do_ioctl" from driver structure with callbacks "at91ether_netdev_ops" */
 8039 				ldv_handler_precall();
 8040 				macb_ioctl( var_group1, var_group6, var_macb_ioctl_71_p2);
 8041 				/* LDV_COMMENT_BEGIN_PREP */
 8042 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8043 				#endif
 8044 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8045 				#endif
 8046 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8047 				#endif
 8048 				#if defined(CONFIG_OF)
 8049 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8050 				#define AT91ETHER_MAX_RX_DESCR	9
 8051 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8052 				#endif
 8053 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8054 				#endif
 8055 				#endif 
 8056 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8057 				#endif
 8058 				/* LDV_COMMENT_END_PREP */
 8059 				
 8060 
 8061 				
 8062 
 8063 			}
 8064 
 8065 			break;
 8066 			case 24: {
 8067 
 8068 				/** STRUCT: struct type: net_device_ops, struct name: at91ether_netdev_ops **/
 8069 				
 8070 
 8071 				/* content: static void at91ether_poll_controller(struct net_device *dev)*/
 8072 				/* LDV_COMMENT_BEGIN_PREP */
 8073 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8074 				#define MACB_RX_BUFFER_SIZE	128
 8075 				#define RX_BUFFER_MULTIPLE	64  
 8076 				#define RX_RING_SIZE		512 
 8077 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8078 				#define TX_RING_SIZE		128 
 8079 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8080 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8081 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8082 				 | MACB_BIT(ISR_ROVR))
 8083 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8084 					| MACB_BIT(ISR_RLE)		\
 8085 					| MACB_BIT(TXERR))
 8086 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8087 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8088 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8089 				#define GEM_MTU_MIN_SIZE	68
 8090 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8091 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8092 				#define MACB_HALT_TIMEOUT	1230
 8093 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8094 				#endif
 8095 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8096 				#endif
 8097 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8098 				#endif
 8099 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8100 				#endif
 8101 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8102 				#endif
 8103 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8104 				#endif
 8105 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8106 				#endif
 8107 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8108 				#endif
 8109 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8110 				#endif
 8111 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8112 				#endif
 8113 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8114 				#endif
 8115 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8116 				#endif
 8117 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8118 				#endif
 8119 				#if defined(CONFIG_OF)
 8120 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8121 				#define AT91ETHER_MAX_RX_DESCR	9
 8122 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8123 				/* LDV_COMMENT_END_PREP */
 8124 				/* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_poll_controller" from driver structure with callbacks "at91ether_netdev_ops" */
 8125 				ldv_handler_precall();
 8126 				at91ether_poll_controller( var_group1);
 8127 				/* LDV_COMMENT_BEGIN_PREP */
 8128 				#endif
 8129 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8130 				#endif
 8131 				#endif 
 8132 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8133 				#endif
 8134 				/* LDV_COMMENT_END_PREP */
 8135 				
 8136 
 8137 				
 8138 
 8139 			}
 8140 
 8141 			break;
 8142 			case 25: {
 8143 
 8144 				/** STRUCT: struct type: macb_config, struct name: at91sam9260_config **/
 8145 				
 8146 
 8147 				/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 8148 				/* LDV_COMMENT_BEGIN_PREP */
 8149 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8150 				#define MACB_RX_BUFFER_SIZE	128
 8151 				#define RX_BUFFER_MULTIPLE	64  
 8152 				#define RX_RING_SIZE		512 
 8153 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8154 				#define TX_RING_SIZE		128 
 8155 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8156 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8157 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8158 				 | MACB_BIT(ISR_ROVR))
 8159 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8160 					| MACB_BIT(ISR_RLE)		\
 8161 					| MACB_BIT(TXERR))
 8162 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8163 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8164 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8165 				#define GEM_MTU_MIN_SIZE	68
 8166 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8167 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8168 				#define MACB_HALT_TIMEOUT	1230
 8169 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8170 				#endif
 8171 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8172 				#endif
 8173 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8174 				#endif
 8175 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8176 				#endif
 8177 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8178 				#endif
 8179 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8180 				#endif
 8181 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8182 				#endif
 8183 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8184 				#endif
 8185 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8186 				#endif
 8187 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8188 				#endif
 8189 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8190 				#endif
 8191 				/* LDV_COMMENT_END_PREP */
 8192 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "at91sam9260_config" */
 8193 				ldv_handler_precall();
 8194 				macb_clk_init( var_group7, var_group8, var_macb_clk_init_75_p2, var_macb_clk_init_75_p3, var_macb_clk_init_75_p4);
 8195 				/* LDV_COMMENT_BEGIN_PREP */
 8196 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8197 				#endif
 8198 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8199 				#endif
 8200 				#if defined(CONFIG_OF)
 8201 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8202 				#define AT91ETHER_MAX_RX_DESCR	9
 8203 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8204 				#endif
 8205 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8206 				#endif
 8207 				#endif 
 8208 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8209 				#endif
 8210 				/* LDV_COMMENT_END_PREP */
 8211 				
 8212 
 8213 				
 8214 
 8215 			}
 8216 
 8217 			break;
 8218 			case 26: {
 8219 
 8220 				/** STRUCT: struct type: macb_config, struct name: at91sam9260_config **/
 8221 				
 8222 
 8223 				/* content: static int macb_init(struct platform_device *pdev)*/
 8224 				/* LDV_COMMENT_BEGIN_PREP */
 8225 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8226 				#define MACB_RX_BUFFER_SIZE	128
 8227 				#define RX_BUFFER_MULTIPLE	64  
 8228 				#define RX_RING_SIZE		512 
 8229 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8230 				#define TX_RING_SIZE		128 
 8231 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8232 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8233 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8234 				 | MACB_BIT(ISR_ROVR))
 8235 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8236 					| MACB_BIT(ISR_RLE)		\
 8237 					| MACB_BIT(TXERR))
 8238 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8239 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8240 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8241 				#define GEM_MTU_MIN_SIZE	68
 8242 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8243 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8244 				#define MACB_HALT_TIMEOUT	1230
 8245 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8246 				#endif
 8247 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8248 				#endif
 8249 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8250 				#endif
 8251 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8252 				#endif
 8253 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8254 				#endif
 8255 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8256 				#endif
 8257 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8258 				#endif
 8259 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8260 				#endif
 8261 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8262 				#endif
 8263 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8264 				#endif
 8265 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8266 				#endif
 8267 				/* LDV_COMMENT_END_PREP */
 8268 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "at91sam9260_config" */
 8269 				ldv_handler_precall();
 8270 				macb_init( var_group7);
 8271 				/* LDV_COMMENT_BEGIN_PREP */
 8272 				#if defined(CONFIG_OF)
 8273 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8274 				#define AT91ETHER_MAX_RX_DESCR	9
 8275 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8276 				#endif
 8277 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8278 				#endif
 8279 				#endif 
 8280 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8281 				#endif
 8282 				/* LDV_COMMENT_END_PREP */
 8283 				
 8284 
 8285 				
 8286 
 8287 			}
 8288 
 8289 			break;
 8290 			case 27: {
 8291 
 8292 				/** STRUCT: struct type: macb_config, struct name: pc302gem_config **/
 8293 				
 8294 
 8295 				/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 8296 				/* LDV_COMMENT_BEGIN_PREP */
 8297 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8298 				#define MACB_RX_BUFFER_SIZE	128
 8299 				#define RX_BUFFER_MULTIPLE	64  
 8300 				#define RX_RING_SIZE		512 
 8301 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8302 				#define TX_RING_SIZE		128 
 8303 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8304 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8305 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8306 				 | MACB_BIT(ISR_ROVR))
 8307 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8308 					| MACB_BIT(ISR_RLE)		\
 8309 					| MACB_BIT(TXERR))
 8310 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8311 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8312 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8313 				#define GEM_MTU_MIN_SIZE	68
 8314 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8315 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8316 				#define MACB_HALT_TIMEOUT	1230
 8317 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8318 				#endif
 8319 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8320 				#endif
 8321 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8322 				#endif
 8323 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8324 				#endif
 8325 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8326 				#endif
 8327 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8328 				#endif
 8329 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8330 				#endif
 8331 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8332 				#endif
 8333 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8334 				#endif
 8335 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8336 				#endif
 8337 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8338 				#endif
 8339 				/* LDV_COMMENT_END_PREP */
 8340 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "pc302gem_config" */
 8341 				ldv_handler_precall();
 8342 				macb_clk_init( var_group7, var_group8, var_macb_clk_init_75_p2, var_macb_clk_init_75_p3, var_macb_clk_init_75_p4);
 8343 				/* LDV_COMMENT_BEGIN_PREP */
 8344 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8345 				#endif
 8346 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8347 				#endif
 8348 				#if defined(CONFIG_OF)
 8349 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8350 				#define AT91ETHER_MAX_RX_DESCR	9
 8351 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8352 				#endif
 8353 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8354 				#endif
 8355 				#endif 
 8356 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8357 				#endif
 8358 				/* LDV_COMMENT_END_PREP */
 8359 				
 8360 
 8361 				
 8362 
 8363 			}
 8364 
 8365 			break;
 8366 			case 28: {
 8367 
 8368 				/** STRUCT: struct type: macb_config, struct name: pc302gem_config **/
 8369 				
 8370 
 8371 				/* content: static int macb_init(struct platform_device *pdev)*/
 8372 				/* LDV_COMMENT_BEGIN_PREP */
 8373 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8374 				#define MACB_RX_BUFFER_SIZE	128
 8375 				#define RX_BUFFER_MULTIPLE	64  
 8376 				#define RX_RING_SIZE		512 
 8377 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8378 				#define TX_RING_SIZE		128 
 8379 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8380 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8381 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8382 				 | MACB_BIT(ISR_ROVR))
 8383 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8384 					| MACB_BIT(ISR_RLE)		\
 8385 					| MACB_BIT(TXERR))
 8386 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8387 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8388 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8389 				#define GEM_MTU_MIN_SIZE	68
 8390 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8391 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8392 				#define MACB_HALT_TIMEOUT	1230
 8393 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8394 				#endif
 8395 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8396 				#endif
 8397 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8398 				#endif
 8399 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8400 				#endif
 8401 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8402 				#endif
 8403 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8404 				#endif
 8405 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8406 				#endif
 8407 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8408 				#endif
 8409 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8410 				#endif
 8411 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8412 				#endif
 8413 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8414 				#endif
 8415 				/* LDV_COMMENT_END_PREP */
 8416 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "pc302gem_config" */
 8417 				ldv_handler_precall();
 8418 				macb_init( var_group7);
 8419 				/* LDV_COMMENT_BEGIN_PREP */
 8420 				#if defined(CONFIG_OF)
 8421 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8422 				#define AT91ETHER_MAX_RX_DESCR	9
 8423 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8424 				#endif
 8425 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8426 				#endif
 8427 				#endif 
 8428 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8429 				#endif
 8430 				/* LDV_COMMENT_END_PREP */
 8431 				
 8432 
 8433 				
 8434 
 8435 			}
 8436 
 8437 			break;
 8438 			case 29: {
 8439 
 8440 				/** STRUCT: struct type: macb_config, struct name: sama5d2_config **/
 8441 				
 8442 
 8443 				/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 8444 				/* LDV_COMMENT_BEGIN_PREP */
 8445 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8446 				#define MACB_RX_BUFFER_SIZE	128
 8447 				#define RX_BUFFER_MULTIPLE	64  
 8448 				#define RX_RING_SIZE		512 
 8449 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8450 				#define TX_RING_SIZE		128 
 8451 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8452 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8453 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8454 				 | MACB_BIT(ISR_ROVR))
 8455 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8456 					| MACB_BIT(ISR_RLE)		\
 8457 					| MACB_BIT(TXERR))
 8458 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8459 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8460 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8461 				#define GEM_MTU_MIN_SIZE	68
 8462 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8463 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8464 				#define MACB_HALT_TIMEOUT	1230
 8465 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8466 				#endif
 8467 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8468 				#endif
 8469 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8470 				#endif
 8471 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8472 				#endif
 8473 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8474 				#endif
 8475 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8476 				#endif
 8477 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8478 				#endif
 8479 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8480 				#endif
 8481 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8482 				#endif
 8483 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8484 				#endif
 8485 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8486 				#endif
 8487 				/* LDV_COMMENT_END_PREP */
 8488 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "sama5d2_config" */
 8489 				ldv_handler_precall();
 8490 				macb_clk_init( var_group7, var_group8, var_macb_clk_init_75_p2, var_macb_clk_init_75_p3, var_macb_clk_init_75_p4);
 8491 				/* LDV_COMMENT_BEGIN_PREP */
 8492 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8493 				#endif
 8494 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8495 				#endif
 8496 				#if defined(CONFIG_OF)
 8497 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8498 				#define AT91ETHER_MAX_RX_DESCR	9
 8499 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8500 				#endif
 8501 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8502 				#endif
 8503 				#endif 
 8504 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8505 				#endif
 8506 				/* LDV_COMMENT_END_PREP */
 8507 				
 8508 
 8509 				
 8510 
 8511 			}
 8512 
 8513 			break;
 8514 			case 30: {
 8515 
 8516 				/** STRUCT: struct type: macb_config, struct name: sama5d2_config **/
 8517 				
 8518 
 8519 				/* content: static int macb_init(struct platform_device *pdev)*/
 8520 				/* LDV_COMMENT_BEGIN_PREP */
 8521 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8522 				#define MACB_RX_BUFFER_SIZE	128
 8523 				#define RX_BUFFER_MULTIPLE	64  
 8524 				#define RX_RING_SIZE		512 
 8525 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8526 				#define TX_RING_SIZE		128 
 8527 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8528 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8529 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8530 				 | MACB_BIT(ISR_ROVR))
 8531 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8532 					| MACB_BIT(ISR_RLE)		\
 8533 					| MACB_BIT(TXERR))
 8534 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8535 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8536 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8537 				#define GEM_MTU_MIN_SIZE	68
 8538 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8539 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8540 				#define MACB_HALT_TIMEOUT	1230
 8541 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8542 				#endif
 8543 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8544 				#endif
 8545 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8546 				#endif
 8547 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8548 				#endif
 8549 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8550 				#endif
 8551 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8552 				#endif
 8553 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8554 				#endif
 8555 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8556 				#endif
 8557 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8558 				#endif
 8559 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8560 				#endif
 8561 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8562 				#endif
 8563 				/* LDV_COMMENT_END_PREP */
 8564 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "sama5d2_config" */
 8565 				ldv_handler_precall();
 8566 				macb_init( var_group7);
 8567 				/* LDV_COMMENT_BEGIN_PREP */
 8568 				#if defined(CONFIG_OF)
 8569 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8570 				#define AT91ETHER_MAX_RX_DESCR	9
 8571 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8572 				#endif
 8573 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8574 				#endif
 8575 				#endif 
 8576 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8577 				#endif
 8578 				/* LDV_COMMENT_END_PREP */
 8579 				
 8580 
 8581 				
 8582 
 8583 			}
 8584 
 8585 			break;
 8586 			case 31: {
 8587 
 8588 				/** STRUCT: struct type: macb_config, struct name: sama5d3_config **/
 8589 				
 8590 
 8591 				/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 8592 				/* LDV_COMMENT_BEGIN_PREP */
 8593 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8594 				#define MACB_RX_BUFFER_SIZE	128
 8595 				#define RX_BUFFER_MULTIPLE	64  
 8596 				#define RX_RING_SIZE		512 
 8597 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8598 				#define TX_RING_SIZE		128 
 8599 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8600 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8601 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8602 				 | MACB_BIT(ISR_ROVR))
 8603 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8604 					| MACB_BIT(ISR_RLE)		\
 8605 					| MACB_BIT(TXERR))
 8606 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8607 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8608 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8609 				#define GEM_MTU_MIN_SIZE	68
 8610 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8611 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8612 				#define MACB_HALT_TIMEOUT	1230
 8613 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8614 				#endif
 8615 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8616 				#endif
 8617 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8618 				#endif
 8619 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8620 				#endif
 8621 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8622 				#endif
 8623 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8624 				#endif
 8625 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8626 				#endif
 8627 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8628 				#endif
 8629 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8630 				#endif
 8631 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8632 				#endif
 8633 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8634 				#endif
 8635 				/* LDV_COMMENT_END_PREP */
 8636 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "sama5d3_config" */
 8637 				ldv_handler_precall();
 8638 				macb_clk_init( var_group7, var_group8, var_macb_clk_init_75_p2, var_macb_clk_init_75_p3, var_macb_clk_init_75_p4);
 8639 				/* LDV_COMMENT_BEGIN_PREP */
 8640 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8641 				#endif
 8642 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8643 				#endif
 8644 				#if defined(CONFIG_OF)
 8645 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8646 				#define AT91ETHER_MAX_RX_DESCR	9
 8647 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8648 				#endif
 8649 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8650 				#endif
 8651 				#endif 
 8652 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8653 				#endif
 8654 				/* LDV_COMMENT_END_PREP */
 8655 				
 8656 
 8657 				
 8658 
 8659 			}
 8660 
 8661 			break;
 8662 			case 32: {
 8663 
 8664 				/** STRUCT: struct type: macb_config, struct name: sama5d3_config **/
 8665 				
 8666 
 8667 				/* content: static int macb_init(struct platform_device *pdev)*/
 8668 				/* LDV_COMMENT_BEGIN_PREP */
 8669 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8670 				#define MACB_RX_BUFFER_SIZE	128
 8671 				#define RX_BUFFER_MULTIPLE	64  
 8672 				#define RX_RING_SIZE		512 
 8673 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8674 				#define TX_RING_SIZE		128 
 8675 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8676 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8677 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8678 				 | MACB_BIT(ISR_ROVR))
 8679 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8680 					| MACB_BIT(ISR_RLE)		\
 8681 					| MACB_BIT(TXERR))
 8682 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8683 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8684 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8685 				#define GEM_MTU_MIN_SIZE	68
 8686 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8687 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8688 				#define MACB_HALT_TIMEOUT	1230
 8689 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8690 				#endif
 8691 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8692 				#endif
 8693 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8694 				#endif
 8695 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8696 				#endif
 8697 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8698 				#endif
 8699 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8700 				#endif
 8701 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8702 				#endif
 8703 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8704 				#endif
 8705 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8706 				#endif
 8707 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8708 				#endif
 8709 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8710 				#endif
 8711 				/* LDV_COMMENT_END_PREP */
 8712 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "sama5d3_config" */
 8713 				ldv_handler_precall();
 8714 				macb_init( var_group7);
 8715 				/* LDV_COMMENT_BEGIN_PREP */
 8716 				#if defined(CONFIG_OF)
 8717 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8718 				#define AT91ETHER_MAX_RX_DESCR	9
 8719 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8720 				#endif
 8721 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8722 				#endif
 8723 				#endif 
 8724 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8725 				#endif
 8726 				/* LDV_COMMENT_END_PREP */
 8727 				
 8728 
 8729 				
 8730 
 8731 			}
 8732 
 8733 			break;
 8734 			case 33: {
 8735 
 8736 				/** STRUCT: struct type: macb_config, struct name: sama5d4_config **/
 8737 				
 8738 
 8739 				/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 8740 				/* LDV_COMMENT_BEGIN_PREP */
 8741 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8742 				#define MACB_RX_BUFFER_SIZE	128
 8743 				#define RX_BUFFER_MULTIPLE	64  
 8744 				#define RX_RING_SIZE		512 
 8745 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8746 				#define TX_RING_SIZE		128 
 8747 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8748 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8749 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8750 				 | MACB_BIT(ISR_ROVR))
 8751 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8752 					| MACB_BIT(ISR_RLE)		\
 8753 					| MACB_BIT(TXERR))
 8754 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8755 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8756 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8757 				#define GEM_MTU_MIN_SIZE	68
 8758 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8759 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8760 				#define MACB_HALT_TIMEOUT	1230
 8761 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8762 				#endif
 8763 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8764 				#endif
 8765 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8766 				#endif
 8767 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8768 				#endif
 8769 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8770 				#endif
 8771 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8772 				#endif
 8773 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8774 				#endif
 8775 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8776 				#endif
 8777 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8778 				#endif
 8779 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8780 				#endif
 8781 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8782 				#endif
 8783 				/* LDV_COMMENT_END_PREP */
 8784 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "sama5d4_config" */
 8785 				ldv_handler_precall();
 8786 				macb_clk_init( var_group7, var_group8, var_macb_clk_init_75_p2, var_macb_clk_init_75_p3, var_macb_clk_init_75_p4);
 8787 				/* LDV_COMMENT_BEGIN_PREP */
 8788 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8789 				#endif
 8790 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8791 				#endif
 8792 				#if defined(CONFIG_OF)
 8793 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8794 				#define AT91ETHER_MAX_RX_DESCR	9
 8795 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8796 				#endif
 8797 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8798 				#endif
 8799 				#endif 
 8800 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8801 				#endif
 8802 				/* LDV_COMMENT_END_PREP */
 8803 				
 8804 
 8805 				
 8806 
 8807 			}
 8808 
 8809 			break;
 8810 			case 34: {
 8811 
 8812 				/** STRUCT: struct type: macb_config, struct name: sama5d4_config **/
 8813 				
 8814 
 8815 				/* content: static int macb_init(struct platform_device *pdev)*/
 8816 				/* LDV_COMMENT_BEGIN_PREP */
 8817 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8818 				#define MACB_RX_BUFFER_SIZE	128
 8819 				#define RX_BUFFER_MULTIPLE	64  
 8820 				#define RX_RING_SIZE		512 
 8821 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8822 				#define TX_RING_SIZE		128 
 8823 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8824 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8825 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8826 				 | MACB_BIT(ISR_ROVR))
 8827 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8828 					| MACB_BIT(ISR_RLE)		\
 8829 					| MACB_BIT(TXERR))
 8830 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8831 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8832 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8833 				#define GEM_MTU_MIN_SIZE	68
 8834 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8835 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8836 				#define MACB_HALT_TIMEOUT	1230
 8837 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8838 				#endif
 8839 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8840 				#endif
 8841 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8842 				#endif
 8843 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8844 				#endif
 8845 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8846 				#endif
 8847 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8848 				#endif
 8849 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8850 				#endif
 8851 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8852 				#endif
 8853 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8854 				#endif
 8855 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8856 				#endif
 8857 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8858 				#endif
 8859 				/* LDV_COMMENT_END_PREP */
 8860 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "sama5d4_config" */
 8861 				ldv_handler_precall();
 8862 				macb_init( var_group7);
 8863 				/* LDV_COMMENT_BEGIN_PREP */
 8864 				#if defined(CONFIG_OF)
 8865 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8866 				#define AT91ETHER_MAX_RX_DESCR	9
 8867 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8868 				#endif
 8869 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8870 				#endif
 8871 				#endif 
 8872 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8873 				#endif
 8874 				/* LDV_COMMENT_END_PREP */
 8875 				
 8876 
 8877 				
 8878 
 8879 			}
 8880 
 8881 			break;
 8882 			case 35: {
 8883 
 8884 				/** STRUCT: struct type: macb_config, struct name: emac_config **/
 8885 				
 8886 
 8887 				/* content: static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 8888 				/* LDV_COMMENT_BEGIN_PREP */
 8889 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8890 				#define MACB_RX_BUFFER_SIZE	128
 8891 				#define RX_BUFFER_MULTIPLE	64  
 8892 				#define RX_RING_SIZE		512 
 8893 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8894 				#define TX_RING_SIZE		128 
 8895 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8896 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8897 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8898 				 | MACB_BIT(ISR_ROVR))
 8899 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8900 					| MACB_BIT(ISR_RLE)		\
 8901 					| MACB_BIT(TXERR))
 8902 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8903 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8904 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8905 				#define GEM_MTU_MIN_SIZE	68
 8906 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8907 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8908 				#define MACB_HALT_TIMEOUT	1230
 8909 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8910 				#endif
 8911 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8912 				#endif
 8913 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8914 				#endif
 8915 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8916 				#endif
 8917 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8918 				#endif
 8919 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8920 				#endif
 8921 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8922 				#endif
 8923 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8924 				#endif
 8925 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8926 				#endif
 8927 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8928 				#endif
 8929 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8930 				#endif
 8931 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8932 				#endif
 8933 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8934 				#endif
 8935 				#if defined(CONFIG_OF)
 8936 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 8937 				#define AT91ETHER_MAX_RX_DESCR	9
 8938 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8939 				#endif
 8940 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8941 				#endif
 8942 				/* LDV_COMMENT_END_PREP */
 8943 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "emac_config" */
 8944 				ldv_handler_precall();
 8945 				at91ether_clk_init( var_group7, var_group8, var_at91ether_clk_init_84_p2, var_at91ether_clk_init_84_p3, var_at91ether_clk_init_84_p4);
 8946 				/* LDV_COMMENT_BEGIN_PREP */
 8947 				#endif 
 8948 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8949 				#endif
 8950 				/* LDV_COMMENT_END_PREP */
 8951 				
 8952 
 8953 				
 8954 
 8955 			}
 8956 
 8957 			break;
 8958 			case 36: {
 8959 
 8960 				/** STRUCT: struct type: macb_config, struct name: emac_config **/
 8961 				
 8962 
 8963 				/* content: static int at91ether_init(struct platform_device *pdev)*/
 8964 				/* LDV_COMMENT_BEGIN_PREP */
 8965 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 8966 				#define MACB_RX_BUFFER_SIZE	128
 8967 				#define RX_BUFFER_MULTIPLE	64  
 8968 				#define RX_RING_SIZE		512 
 8969 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 8970 				#define TX_RING_SIZE		128 
 8971 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 8972 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 8973 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 8974 				 | MACB_BIT(ISR_ROVR))
 8975 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 8976 					| MACB_BIT(ISR_RLE)		\
 8977 					| MACB_BIT(TXERR))
 8978 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 8979 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 8980 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 8981 				#define GEM_MTU_MIN_SIZE	68
 8982 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 8983 				#define MACB_WOL_ENABLED		(0x1 << 1)
 8984 				#define MACB_HALT_TIMEOUT	1230
 8985 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8986 				#endif
 8987 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8988 				#endif
 8989 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8990 				#endif
 8991 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8992 				#endif
 8993 				#ifdef CONFIG_NET_POLL_CONTROLLER
 8994 				#endif
 8995 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 8996 				#endif
 8997 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 8998 				#endif
 8999 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9000 				#endif
 9001 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9002 				#endif
 9003 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9004 				#endif
 9005 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9006 				#endif
 9007 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9008 				#endif
 9009 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9010 				#endif
 9011 				#if defined(CONFIG_OF)
 9012 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9013 				#define AT91ETHER_MAX_RX_DESCR	9
 9014 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9015 				#endif
 9016 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9017 				#endif
 9018 				/* LDV_COMMENT_END_PREP */
 9019 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "emac_config" */
 9020 				ldv_handler_precall();
 9021 				at91ether_init( var_group7);
 9022 				/* LDV_COMMENT_BEGIN_PREP */
 9023 				#endif 
 9024 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9025 				#endif
 9026 				/* LDV_COMMENT_END_PREP */
 9027 				
 9028 
 9029 				
 9030 
 9031 			}
 9032 
 9033 			break;
 9034 			case 37: {
 9035 
 9036 				/** STRUCT: struct type: macb_config, struct name: np4_config **/
 9037 				
 9038 
 9039 				/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 9040 				/* LDV_COMMENT_BEGIN_PREP */
 9041 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9042 				#define MACB_RX_BUFFER_SIZE	128
 9043 				#define RX_BUFFER_MULTIPLE	64  
 9044 				#define RX_RING_SIZE		512 
 9045 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9046 				#define TX_RING_SIZE		128 
 9047 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9048 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9049 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9050 				 | MACB_BIT(ISR_ROVR))
 9051 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9052 					| MACB_BIT(ISR_RLE)		\
 9053 					| MACB_BIT(TXERR))
 9054 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9055 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9056 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9057 				#define GEM_MTU_MIN_SIZE	68
 9058 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9059 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9060 				#define MACB_HALT_TIMEOUT	1230
 9061 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9062 				#endif
 9063 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9064 				#endif
 9065 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9066 				#endif
 9067 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9068 				#endif
 9069 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9070 				#endif
 9071 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9072 				#endif
 9073 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9074 				#endif
 9075 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9076 				#endif
 9077 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9078 				#endif
 9079 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9080 				#endif
 9081 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9082 				#endif
 9083 				/* LDV_COMMENT_END_PREP */
 9084 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "np4_config" */
 9085 				ldv_handler_precall();
 9086 				macb_clk_init( var_group7, var_group8, var_macb_clk_init_75_p2, var_macb_clk_init_75_p3, var_macb_clk_init_75_p4);
 9087 				/* LDV_COMMENT_BEGIN_PREP */
 9088 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9089 				#endif
 9090 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9091 				#endif
 9092 				#if defined(CONFIG_OF)
 9093 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9094 				#define AT91ETHER_MAX_RX_DESCR	9
 9095 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9096 				#endif
 9097 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9098 				#endif
 9099 				#endif 
 9100 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9101 				#endif
 9102 				/* LDV_COMMENT_END_PREP */
 9103 				
 9104 
 9105 				
 9106 
 9107 			}
 9108 
 9109 			break;
 9110 			case 38: {
 9111 
 9112 				/** STRUCT: struct type: macb_config, struct name: np4_config **/
 9113 				
 9114 
 9115 				/* content: static int macb_init(struct platform_device *pdev)*/
 9116 				/* LDV_COMMENT_BEGIN_PREP */
 9117 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9118 				#define MACB_RX_BUFFER_SIZE	128
 9119 				#define RX_BUFFER_MULTIPLE	64  
 9120 				#define RX_RING_SIZE		512 
 9121 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9122 				#define TX_RING_SIZE		128 
 9123 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9124 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9125 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9126 				 | MACB_BIT(ISR_ROVR))
 9127 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9128 					| MACB_BIT(ISR_RLE)		\
 9129 					| MACB_BIT(TXERR))
 9130 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9131 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9132 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9133 				#define GEM_MTU_MIN_SIZE	68
 9134 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9135 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9136 				#define MACB_HALT_TIMEOUT	1230
 9137 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9138 				#endif
 9139 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9140 				#endif
 9141 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9142 				#endif
 9143 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9144 				#endif
 9145 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9146 				#endif
 9147 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9148 				#endif
 9149 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9150 				#endif
 9151 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9152 				#endif
 9153 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9154 				#endif
 9155 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9156 				#endif
 9157 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9158 				#endif
 9159 				/* LDV_COMMENT_END_PREP */
 9160 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "np4_config" */
 9161 				ldv_handler_precall();
 9162 				macb_init( var_group7);
 9163 				/* LDV_COMMENT_BEGIN_PREP */
 9164 				#if defined(CONFIG_OF)
 9165 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9166 				#define AT91ETHER_MAX_RX_DESCR	9
 9167 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9168 				#endif
 9169 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9170 				#endif
 9171 				#endif 
 9172 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9173 				#endif
 9174 				/* LDV_COMMENT_END_PREP */
 9175 				
 9176 
 9177 				
 9178 
 9179 			}
 9180 
 9181 			break;
 9182 			case 39: {
 9183 
 9184 				/** STRUCT: struct type: macb_config, struct name: zynqmp_config **/
 9185 				
 9186 
 9187 				/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 9188 				/* LDV_COMMENT_BEGIN_PREP */
 9189 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9190 				#define MACB_RX_BUFFER_SIZE	128
 9191 				#define RX_BUFFER_MULTIPLE	64  
 9192 				#define RX_RING_SIZE		512 
 9193 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9194 				#define TX_RING_SIZE		128 
 9195 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9196 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9197 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9198 				 | MACB_BIT(ISR_ROVR))
 9199 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9200 					| MACB_BIT(ISR_RLE)		\
 9201 					| MACB_BIT(TXERR))
 9202 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9203 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9204 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9205 				#define GEM_MTU_MIN_SIZE	68
 9206 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9207 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9208 				#define MACB_HALT_TIMEOUT	1230
 9209 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9210 				#endif
 9211 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9212 				#endif
 9213 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9214 				#endif
 9215 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9216 				#endif
 9217 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9218 				#endif
 9219 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9220 				#endif
 9221 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9222 				#endif
 9223 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9224 				#endif
 9225 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9226 				#endif
 9227 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9228 				#endif
 9229 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9230 				#endif
 9231 				/* LDV_COMMENT_END_PREP */
 9232 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "zynqmp_config" */
 9233 				ldv_handler_precall();
 9234 				macb_clk_init( var_group7, var_group8, var_macb_clk_init_75_p2, var_macb_clk_init_75_p3, var_macb_clk_init_75_p4);
 9235 				/* LDV_COMMENT_BEGIN_PREP */
 9236 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9237 				#endif
 9238 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9239 				#endif
 9240 				#if defined(CONFIG_OF)
 9241 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9242 				#define AT91ETHER_MAX_RX_DESCR	9
 9243 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9244 				#endif
 9245 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9246 				#endif
 9247 				#endif 
 9248 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9249 				#endif
 9250 				/* LDV_COMMENT_END_PREP */
 9251 				
 9252 
 9253 				
 9254 
 9255 			}
 9256 
 9257 			break;
 9258 			case 40: {
 9259 
 9260 				/** STRUCT: struct type: macb_config, struct name: zynqmp_config **/
 9261 				
 9262 
 9263 				/* content: static int macb_init(struct platform_device *pdev)*/
 9264 				/* LDV_COMMENT_BEGIN_PREP */
 9265 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9266 				#define MACB_RX_BUFFER_SIZE	128
 9267 				#define RX_BUFFER_MULTIPLE	64  
 9268 				#define RX_RING_SIZE		512 
 9269 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9270 				#define TX_RING_SIZE		128 
 9271 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9272 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9273 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9274 				 | MACB_BIT(ISR_ROVR))
 9275 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9276 					| MACB_BIT(ISR_RLE)		\
 9277 					| MACB_BIT(TXERR))
 9278 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9279 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9280 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9281 				#define GEM_MTU_MIN_SIZE	68
 9282 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9283 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9284 				#define MACB_HALT_TIMEOUT	1230
 9285 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9286 				#endif
 9287 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9288 				#endif
 9289 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9290 				#endif
 9291 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9292 				#endif
 9293 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9294 				#endif
 9295 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9296 				#endif
 9297 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9298 				#endif
 9299 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9300 				#endif
 9301 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9302 				#endif
 9303 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9304 				#endif
 9305 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9306 				#endif
 9307 				/* LDV_COMMENT_END_PREP */
 9308 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "zynqmp_config" */
 9309 				ldv_handler_precall();
 9310 				macb_init( var_group7);
 9311 				/* LDV_COMMENT_BEGIN_PREP */
 9312 				#if defined(CONFIG_OF)
 9313 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9314 				#define AT91ETHER_MAX_RX_DESCR	9
 9315 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9316 				#endif
 9317 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9318 				#endif
 9319 				#endif 
 9320 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9321 				#endif
 9322 				/* LDV_COMMENT_END_PREP */
 9323 				
 9324 
 9325 				
 9326 
 9327 			}
 9328 
 9329 			break;
 9330 			case 41: {
 9331 
 9332 				/** STRUCT: struct type: macb_config, struct name: zynq_config **/
 9333 				
 9334 
 9335 				/* content: static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk)*/
 9336 				/* LDV_COMMENT_BEGIN_PREP */
 9337 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9338 				#define MACB_RX_BUFFER_SIZE	128
 9339 				#define RX_BUFFER_MULTIPLE	64  
 9340 				#define RX_RING_SIZE		512 
 9341 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9342 				#define TX_RING_SIZE		128 
 9343 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9344 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9345 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9346 				 | MACB_BIT(ISR_ROVR))
 9347 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9348 					| MACB_BIT(ISR_RLE)		\
 9349 					| MACB_BIT(TXERR))
 9350 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9351 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9352 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9353 				#define GEM_MTU_MIN_SIZE	68
 9354 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9355 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9356 				#define MACB_HALT_TIMEOUT	1230
 9357 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9358 				#endif
 9359 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9360 				#endif
 9361 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9362 				#endif
 9363 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9364 				#endif
 9365 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9366 				#endif
 9367 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9368 				#endif
 9369 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9370 				#endif
 9371 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9372 				#endif
 9373 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9374 				#endif
 9375 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9376 				#endif
 9377 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9378 				#endif
 9379 				/* LDV_COMMENT_END_PREP */
 9380 				/* LDV_COMMENT_FUNCTION_CALL Function from field "clk_init" from driver structure with callbacks "zynq_config" */
 9381 				ldv_handler_precall();
 9382 				macb_clk_init( var_group7, var_group8, var_macb_clk_init_75_p2, var_macb_clk_init_75_p3, var_macb_clk_init_75_p4);
 9383 				/* LDV_COMMENT_BEGIN_PREP */
 9384 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9385 				#endif
 9386 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9387 				#endif
 9388 				#if defined(CONFIG_OF)
 9389 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9390 				#define AT91ETHER_MAX_RX_DESCR	9
 9391 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9392 				#endif
 9393 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9394 				#endif
 9395 				#endif 
 9396 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9397 				#endif
 9398 				/* LDV_COMMENT_END_PREP */
 9399 				
 9400 
 9401 				
 9402 
 9403 			}
 9404 
 9405 			break;
 9406 			case 42: {
 9407 
 9408 				/** STRUCT: struct type: macb_config, struct name: zynq_config **/
 9409 				
 9410 
 9411 				/* content: static int macb_init(struct platform_device *pdev)*/
 9412 				/* LDV_COMMENT_BEGIN_PREP */
 9413 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9414 				#define MACB_RX_BUFFER_SIZE	128
 9415 				#define RX_BUFFER_MULTIPLE	64  
 9416 				#define RX_RING_SIZE		512 
 9417 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9418 				#define TX_RING_SIZE		128 
 9419 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9420 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9421 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9422 				 | MACB_BIT(ISR_ROVR))
 9423 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9424 					| MACB_BIT(ISR_RLE)		\
 9425 					| MACB_BIT(TXERR))
 9426 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9427 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9428 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9429 				#define GEM_MTU_MIN_SIZE	68
 9430 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9431 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9432 				#define MACB_HALT_TIMEOUT	1230
 9433 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9434 				#endif
 9435 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9436 				#endif
 9437 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9438 				#endif
 9439 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9440 				#endif
 9441 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9442 				#endif
 9443 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9444 				#endif
 9445 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9446 				#endif
 9447 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9448 				#endif
 9449 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9450 				#endif
 9451 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9452 				#endif
 9453 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9454 				#endif
 9455 				/* LDV_COMMENT_END_PREP */
 9456 				/* LDV_COMMENT_FUNCTION_CALL Function from field "init" from driver structure with callbacks "zynq_config" */
 9457 				ldv_handler_precall();
 9458 				macb_init( var_group7);
 9459 				/* LDV_COMMENT_BEGIN_PREP */
 9460 				#if defined(CONFIG_OF)
 9461 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9462 				#define AT91ETHER_MAX_RX_DESCR	9
 9463 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9464 				#endif
 9465 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9466 				#endif
 9467 				#endif 
 9468 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9469 				#endif
 9470 				/* LDV_COMMENT_END_PREP */
 9471 				
 9472 
 9473 				
 9474 
 9475 			}
 9476 
 9477 			break;
 9478 			case 43: {
 9479 
 9480 				/** STRUCT: struct type: platform_driver, struct name: macb_driver **/
 9481 				if(ldv_s_macb_driver_platform_driver==0) {
 9482 
 9483 				/* content: static int macb_probe(struct platform_device *pdev)*/
 9484 				/* LDV_COMMENT_BEGIN_PREP */
 9485 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9486 				#define MACB_RX_BUFFER_SIZE	128
 9487 				#define RX_BUFFER_MULTIPLE	64  
 9488 				#define RX_RING_SIZE		512 
 9489 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9490 				#define TX_RING_SIZE		128 
 9491 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9492 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9493 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9494 				 | MACB_BIT(ISR_ROVR))
 9495 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9496 					| MACB_BIT(ISR_RLE)		\
 9497 					| MACB_BIT(TXERR))
 9498 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9499 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9500 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9501 				#define GEM_MTU_MIN_SIZE	68
 9502 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9503 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9504 				#define MACB_HALT_TIMEOUT	1230
 9505 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9506 				#endif
 9507 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9508 				#endif
 9509 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9510 				#endif
 9511 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9512 				#endif
 9513 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9514 				#endif
 9515 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9516 				#endif
 9517 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9518 				#endif
 9519 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9520 				#endif
 9521 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9522 				#endif
 9523 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9524 				#endif
 9525 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9526 				#endif
 9527 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9528 				#endif
 9529 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9530 				#endif
 9531 				#if defined(CONFIG_OF)
 9532 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9533 				#define AT91ETHER_MAX_RX_DESCR	9
 9534 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9535 				#endif
 9536 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9537 				#endif
 9538 				#endif 
 9539 				/* LDV_COMMENT_END_PREP */
 9540 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "macb_driver". Standart function test for correct return result. */
 9541 				res_macb_probe_86 = macb_probe( var_group7);
 9542 				 ldv_check_return_value(res_macb_probe_86);
 9543 				 ldv_check_return_value_probe(res_macb_probe_86);
 9544 				 if(res_macb_probe_86) 
 9545 					goto ldv_module_exit;
 9546 				ldv_s_macb_driver_platform_driver++;
 9547 
 9548 				}
 9549 
 9550 			}
 9551 
 9552 			break;
 9553 			case 44: {
 9554 
 9555 				/** STRUCT: struct type: platform_driver, struct name: macb_driver **/
 9556 				if(ldv_s_macb_driver_platform_driver==1) {
 9557 
 9558 				/* content: static int macb_remove(struct platform_device *pdev)*/
 9559 				/* LDV_COMMENT_BEGIN_PREP */
 9560 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9561 				#define MACB_RX_BUFFER_SIZE	128
 9562 				#define RX_BUFFER_MULTIPLE	64  
 9563 				#define RX_RING_SIZE		512 
 9564 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9565 				#define TX_RING_SIZE		128 
 9566 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9567 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9568 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9569 				 | MACB_BIT(ISR_ROVR))
 9570 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9571 					| MACB_BIT(ISR_RLE)		\
 9572 					| MACB_BIT(TXERR))
 9573 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9574 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9575 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9576 				#define GEM_MTU_MIN_SIZE	68
 9577 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9578 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9579 				#define MACB_HALT_TIMEOUT	1230
 9580 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9581 				#endif
 9582 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9583 				#endif
 9584 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9585 				#endif
 9586 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9587 				#endif
 9588 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9589 				#endif
 9590 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9591 				#endif
 9592 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9593 				#endif
 9594 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9595 				#endif
 9596 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9597 				#endif
 9598 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9599 				#endif
 9600 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9601 				#endif
 9602 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9603 				#endif
 9604 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9605 				#endif
 9606 				#if defined(CONFIG_OF)
 9607 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9608 				#define AT91ETHER_MAX_RX_DESCR	9
 9609 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9610 				#endif
 9611 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9612 				#endif
 9613 				#endif 
 9614 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9615 				#endif
 9616 				/* LDV_COMMENT_END_PREP */
 9617 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "macb_driver" */
 9618 				ldv_handler_precall();
 9619 				macb_remove( var_group7);
 9620 				ldv_s_macb_driver_platform_driver=0;
 9621 
 9622 				}
 9623 
 9624 			}
 9625 
 9626 			break;
 9627 			case 45: {
 9628 
 9629 				/** CALLBACK SECTION request_irq **/
 9630 				LDV_IN_INTERRUPT=2;
 9631 
 9632 				/* content: static irqreturn_t at91ether_interrupt(int irq, void *dev_id)*/
 9633 				/* LDV_COMMENT_BEGIN_PREP */
 9634 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9635 				#define MACB_RX_BUFFER_SIZE	128
 9636 				#define RX_BUFFER_MULTIPLE	64  
 9637 				#define RX_RING_SIZE		512 
 9638 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9639 				#define TX_RING_SIZE		128 
 9640 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9641 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9642 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9643 				 | MACB_BIT(ISR_ROVR))
 9644 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9645 					| MACB_BIT(ISR_RLE)		\
 9646 					| MACB_BIT(TXERR))
 9647 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9648 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9649 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9650 				#define GEM_MTU_MIN_SIZE	68
 9651 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9652 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9653 				#define MACB_HALT_TIMEOUT	1230
 9654 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9655 				#endif
 9656 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9657 				#endif
 9658 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9659 				#endif
 9660 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9661 				#endif
 9662 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9663 				#endif
 9664 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9665 				#endif
 9666 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9667 				#endif
 9668 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9669 				#endif
 9670 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9671 				#endif
 9672 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9673 				#endif
 9674 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9675 				#endif
 9676 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9677 				#endif
 9678 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9679 				#endif
 9680 				#if defined(CONFIG_OF)
 9681 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9682 				#define AT91ETHER_MAX_RX_DESCR	9
 9683 				/* LDV_COMMENT_END_PREP */
 9684 				/* LDV_COMMENT_FUNCTION_CALL */
 9685 				ldv_handler_precall();
 9686 				at91ether_interrupt( var_at91ether_interrupt_82_p0, var_at91ether_interrupt_82_p1);
 9687 				/* LDV_COMMENT_BEGIN_PREP */
 9688 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9689 				#endif
 9690 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9691 				#endif
 9692 				#endif 
 9693 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9694 				#endif
 9695 				/* LDV_COMMENT_END_PREP */
 9696 				LDV_IN_INTERRUPT=1;
 9697 
 9698 				
 9699 
 9700 			}
 9701 
 9702 			break;
 9703 			case 46: {
 9704 
 9705 				/** CALLBACK SECTION request_irq **/
 9706 				LDV_IN_INTERRUPT=2;
 9707 
 9708 				/* content: static irqreturn_t macb_interrupt(int irq, void *dev_id)*/
 9709 				/* LDV_COMMENT_BEGIN_PREP */
 9710 				#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 9711 				#define MACB_RX_BUFFER_SIZE	128
 9712 				#define RX_BUFFER_MULTIPLE	64  
 9713 				#define RX_RING_SIZE		512 
 9714 				#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 9715 				#define TX_RING_SIZE		128 
 9716 				#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 9717 				#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
 9718 				#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
 9719 				 | MACB_BIT(ISR_ROVR))
 9720 				#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 9721 					| MACB_BIT(ISR_RLE)		\
 9722 					| MACB_BIT(TXERR))
 9723 				#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 9724 				#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 9725 				#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 9726 				#define GEM_MTU_MIN_SIZE	68
 9727 				#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 9728 				#define MACB_WOL_ENABLED		(0x1 << 1)
 9729 				#define MACB_HALT_TIMEOUT	1230
 9730 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9731 				#endif
 9732 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9733 				#endif
 9734 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9735 				#endif
 9736 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9737 				#endif
 9738 				/* LDV_COMMENT_END_PREP */
 9739 				/* LDV_COMMENT_FUNCTION_CALL */
 9740 				ldv_handler_precall();
 9741 				macb_interrupt( var_macb_interrupt_34_p0, var_macb_interrupt_34_p1);
 9742 				/* LDV_COMMENT_BEGIN_PREP */
 9743 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9744 				#endif
 9745 				#if defined(DEBUG) && defined(VERBOSE_DEBUG)
 9746 				#endif
 9747 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9748 				#endif
 9749 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9750 				#endif
 9751 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9752 				#endif
 9753 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9754 				#endif
 9755 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9756 				#endif
 9757 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9758 				#endif
 9759 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9760 				#endif
 9761 				#if defined(CONFIG_OF)
 9762 				#define AT91ETHER_MAX_RBUFF_SZ	0x600
 9763 				#define AT91ETHER_MAX_RX_DESCR	9
 9764 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9765 				#endif
 9766 				#ifdef CONFIG_NET_POLL_CONTROLLER
 9767 				#endif
 9768 				#endif 
 9769 				#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 9770 				#endif
 9771 				/* LDV_COMMENT_END_PREP */
 9772 				LDV_IN_INTERRUPT=1;
 9773 
 9774 				
 9775 
 9776 			}
 9777 
 9778 			break;
 9779 			default: break;
 9780 
 9781 		}
 9782 
 9783 	}
 9784 
 9785 	ldv_module_exit: 
 9786 
 9787 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 9788 	ldv_final: ldv_check_final_state();
 9789 
 9790 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 9791 	return;
 9792 
 9793 }
 9794 #endif
 9795 
 9796 /* LDV_COMMENT_END_MAIN */
 9797 
 9798 #line 10 "/home/vitaly/ldv-launches/work/current--X--drivers--X--defaultlinux-4.9-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.9-rc1.tar.xz/csd_deg_dscv/5529/dscv_tempdir/dscv/ri/331_1a/drivers/net/ethernet/cadence/macb.o.c.prepared"                 1 
    2 #include <verifier/rcv.h>
    3 #include <kernel-model/ERR.inc>
    4 
    5 int LDV_DMA_MAP_CALLS = 0;
    6 
    7 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_page') maps page */
    8 void ldv_dma_map_page(void) {
    9  /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked */
   10  ldv_assert(LDV_DMA_MAP_CALLS == 0);
   11  /* LDV_COMMENT_CHANGE_STATE Increase dma_mapping counter */
   12  LDV_DMA_MAP_CALLS++;
   13 }
   14 
   15 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_mapping_error') unmaps page */
   16 void ldv_dma_mapping_error(void) {
   17  /* LDV_COMMENT_ASSERT No dma_mapping calls to verify */
   18  ldv_assert(LDV_DMA_MAP_CALLS != 0);
   19  /* LDV_COMMENT_CHANGE_STATE Check that previos dma_mapping call was checked */
   20  LDV_DMA_MAP_CALLS--;
   21 }
   22 
   23 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all module reference counters have their initial values at the end */
   24 void ldv_check_final_state(void) {
   25  /* LDV_COMMENT_ASSERT All incremented module reference counters should be decremented before module unloading*/
   26  ldv_assert(LDV_DMA_MAP_CALLS == 0);
   27 }                 1 #ifndef _LDV_RCV_H_
    2 #define _LDV_RCV_H_
    3 
    4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
    5    label like the standard assert(). */
    6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
    7 
    8 /* The error label wrapper. It is used because of some static verifiers (like
    9    BLAST) don't accept multiple error labels through a program. */
   10 static inline void ldv_error(void)
   11 {
   12   LDV_ERROR: goto LDV_ERROR;
   13 }
   14 
   15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
   16    avoided by verifiers. */
   17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
   18 
   19 /* Infinite loop, that causes verifiers to skip such paths. */
   20 static inline void ldv_stop(void) {
   21   LDV_STOP: goto LDV_STOP;
   22 }
   23 
   24 /* Special nondeterministic functions. */
   25 int ldv_undef_int(void);
   26 void *ldv_undef_ptr(void);
   27 unsigned long ldv_undef_ulong(void);
   28 long ldv_undef_long(void);
   29 /* Return nondeterministic negative integer number. */
   30 static inline int ldv_undef_int_negative(void)
   31 {
   32   int ret = ldv_undef_int();
   33 
   34   ldv_assume(ret < 0);
   35 
   36   return ret;
   37 }
   38 /* Return nondeterministic nonpositive integer number. */
   39 static inline int ldv_undef_int_nonpositive(void)
   40 {
   41   int ret = ldv_undef_int();
   42 
   43   ldv_assume(ret <= 0);
   44 
   45   return ret;
   46 }
   47 
   48 /* Add explicit model for __builin_expect GCC function. Without the model a
   49    return value will be treated as nondetermined by verifiers. */
   50 static inline long __builtin_expect(long exp, long c)
   51 {
   52   return exp;
   53 }
   54 
   55 /* This function causes the program to exit abnormally. GCC implements this
   56 function by using a target-dependent mechanism (such as intentionally executing
   57 an illegal instruction) or by calling abort. The mechanism used may vary from
   58 release to release so you should not rely on any particular implementation.
   59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
   60 static inline void __builtin_trap(void)
   61 {
   62   ldv_assert(0);
   63 }
   64 
   65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
   66 #define LDV_PTR_MAX 2012
   67 
   68 #endif /* _LDV_RCV_H_ */                 1 #ifndef _LINUX_DMA_MAPPING_H
    2 #define _LINUX_DMA_MAPPING_H
    3 
    4 #include <linux/sizes.h>
    5 #include <linux/string.h>
    6 #include <linux/device.h>
    7 #include <linux/err.h>
    8 #include <linux/dma-debug.h>
    9 #include <linux/dma-direction.h>
   10 #include <linux/scatterlist.h>
   11 #include <linux/kmemcheck.h>
   12 #include <linux/bug.h>
   13 
   14 /**
   15  * List of possible attributes associated with a DMA mapping. The semantics
   16  * of each attribute should be defined in Documentation/DMA-attributes.txt.
   17  *
   18  * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
   19  * forces all pending DMA writes to complete.
   20  */
   21 #define DMA_ATTR_WRITE_BARRIER		(1UL << 0)
   22 /*
   23  * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
   24  * may be weakly ordered, that is that reads and writes may pass each other.
   25  */
   26 #define DMA_ATTR_WEAK_ORDERING		(1UL << 1)
   27 /*
   28  * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
   29  * buffered to improve performance.
   30  */
   31 #define DMA_ATTR_WRITE_COMBINE		(1UL << 2)
   32 /*
   33  * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
   34  * consistent or non-consistent memory as it sees fit.
   35  */
   36 #define DMA_ATTR_NON_CONSISTENT		(1UL << 3)
   37 /*
   38  * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
   39  * virtual mapping for the allocated buffer.
   40  */
   41 #define DMA_ATTR_NO_KERNEL_MAPPING	(1UL << 4)
   42 /*
   43  * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
   44  * the CPU cache for the given buffer assuming that it has been already
   45  * transferred to 'device' domain.
   46  */
   47 #define DMA_ATTR_SKIP_CPU_SYNC		(1UL << 5)
   48 /*
   49  * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
   50  * in physical memory.
   51  */
   52 #define DMA_ATTR_FORCE_CONTIGUOUS	(1UL << 6)
   53 /*
   54  * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
   55  * that it's probably not worth the time to try to allocate memory to in a way
   56  * that gives better TLB efficiency.
   57  */
   58 #define DMA_ATTR_ALLOC_SINGLE_PAGES	(1UL << 7)
   59 /*
   60  * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
   61  * allocation failure reports (similarly to __GFP_NOWARN).
   62  */
   63 #define DMA_ATTR_NO_WARN	(1UL << 8)
   64 
   65 /*
   66  * A dma_addr_t can hold any valid DMA or bus address for the platform.
   67  * It can be given to a device to use as a DMA source or target.  A CPU cannot
   68  * reference a dma_addr_t directly because there may be translation between
   69  * its physical address space and the bus address space.
   70  */
   71 struct dma_map_ops {
   72 	void* (*alloc)(struct device *dev, size_t size,
   73 				dma_addr_t *dma_handle, gfp_t gfp,
   74 				unsigned long attrs);
   75 	void (*free)(struct device *dev, size_t size,
   76 			      void *vaddr, dma_addr_t dma_handle,
   77 			      unsigned long attrs);
   78 	int (*mmap)(struct device *, struct vm_area_struct *,
   79 			  void *, dma_addr_t, size_t,
   80 			  unsigned long attrs);
   81 
   82 	int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
   83 			   dma_addr_t, size_t, unsigned long attrs);
   84 
   85 	dma_addr_t (*map_page)(struct device *dev, struct page *page,
   86 			       unsigned long offset, size_t size,
   87 			       enum dma_data_direction dir,
   88 			       unsigned long attrs);
   89 	void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
   90 			   size_t size, enum dma_data_direction dir,
   91 			   unsigned long attrs);
   92 	/*
   93 	 * map_sg returns 0 on error and a value > 0 on success.
   94 	 * It should never return a value < 0.
   95 	 */
   96 	int (*map_sg)(struct device *dev, struct scatterlist *sg,
   97 		      int nents, enum dma_data_direction dir,
   98 		      unsigned long attrs);
   99 	void (*unmap_sg)(struct device *dev,
  100 			 struct scatterlist *sg, int nents,
  101 			 enum dma_data_direction dir,
  102 			 unsigned long attrs);
  103 	dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
  104 			       size_t size, enum dma_data_direction dir,
  105 			       unsigned long attrs);
  106 	void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
  107 			   size_t size, enum dma_data_direction dir,
  108 			   unsigned long attrs);
  109 	void (*sync_single_for_cpu)(struct device *dev,
  110 				    dma_addr_t dma_handle, size_t size,
  111 				    enum dma_data_direction dir);
  112 	void (*sync_single_for_device)(struct device *dev,
  113 				       dma_addr_t dma_handle, size_t size,
  114 				       enum dma_data_direction dir);
  115 	void (*sync_sg_for_cpu)(struct device *dev,
  116 				struct scatterlist *sg, int nents,
  117 				enum dma_data_direction dir);
  118 	void (*sync_sg_for_device)(struct device *dev,
  119 				   struct scatterlist *sg, int nents,
  120 				   enum dma_data_direction dir);
  121 	int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
  122 	int (*dma_supported)(struct device *dev, u64 mask);
  123 	int (*set_dma_mask)(struct device *dev, u64 mask);
  124 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
  125 	u64 (*get_required_mask)(struct device *dev);
  126 #endif
  127 	int is_phys;
  128 };
  129 
  130 extern struct dma_map_ops dma_noop_ops;
  131 
  132 #define DMA_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
  133 
  134 #define DMA_MASK_NONE	0x0ULL
  135 
  136 static inline int valid_dma_direction(int dma_direction)
  137 {
  138 	return ((dma_direction == DMA_BIDIRECTIONAL) ||
  139 		(dma_direction == DMA_TO_DEVICE) ||
  140 		(dma_direction == DMA_FROM_DEVICE));
  141 }
  142 
  143 static inline int is_device_dma_capable(struct device *dev)
  144 {
  145 	return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
  146 }
  147 
  148 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
  149 /*
  150  * These three functions are only for dma allocator.
  151  * Don't use them in device drivers.
  152  */
  153 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
  154 				       dma_addr_t *dma_handle, void **ret);
  155 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
  156 
  157 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
  158 			    void *cpu_addr, size_t size, int *ret);
  159 #else
  160 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
  161 #define dma_release_from_coherent(dev, order, vaddr) (0)
  162 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
  163 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  164 
  165 #ifdef CONFIG_HAS_DMA
  166 #include <asm/dma-mapping.h>
  167 #else
  168 /*
  169  * Define the dma api to allow compilation but not linking of
  170  * dma dependent code.  Code that depends on the dma-mapping
  171  * API needs to set 'depends on HAS_DMA' in its Kconfig
  172  */
  173 extern struct dma_map_ops bad_dma_ops;
  174 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
  175 {
  176 	return &bad_dma_ops;
  177 }
  178 #endif
  179 
  180 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
  181 					      size_t size,
  182 					      enum dma_data_direction dir,
  183 					      unsigned long attrs)
  184 {
  185 	struct dma_map_ops *ops = get_dma_ops(dev);
  186 	dma_addr_t addr;
  187 
  188 	kmemcheck_mark_initialized(ptr, size);
  189 	BUG_ON(!valid_dma_direction(dir));
  190 	addr = ops->map_page(dev, virt_to_page(ptr),
  191 			     offset_in_page(ptr), size,
  192 			     dir, attrs);
  193 	debug_dma_map_page(dev, virt_to_page(ptr),
  194 			   offset_in_page(ptr), size,
  195 			   dir, addr, true);
  196 	return addr;
  197 }
  198 
  199 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
  200 					  size_t size,
  201 					  enum dma_data_direction dir,
  202 					  unsigned long attrs)
  203 {
  204 	struct dma_map_ops *ops = get_dma_ops(dev);
  205 
  206 	BUG_ON(!valid_dma_direction(dir));
  207 	if (ops->unmap_page)
  208 		ops->unmap_page(dev, addr, size, dir, attrs);
  209 	debug_dma_unmap_page(dev, addr, size, dir, true);
  210 }
  211 
  212 /*
  213  * dma_maps_sg_attrs returns 0 on error and > 0 on success.
  214  * It should never return a value < 0.
  215  */
  216 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
  217 				   int nents, enum dma_data_direction dir,
  218 				   unsigned long attrs)
  219 {
  220 	struct dma_map_ops *ops = get_dma_ops(dev);
  221 	int i, ents;
  222 	struct scatterlist *s;
  223 
  224 	for_each_sg(sg, s, nents, i)
  225 		kmemcheck_mark_initialized(sg_virt(s), s->length);
  226 	BUG_ON(!valid_dma_direction(dir));
  227 	ents = ops->map_sg(dev, sg, nents, dir, attrs);
  228 	BUG_ON(ents < 0);
  229 	debug_dma_map_sg(dev, sg, nents, ents, dir);
  230 
  231 	return ents;
  232 }
  233 
  234 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
  235 				      int nents, enum dma_data_direction dir,
  236 				      unsigned long attrs)
  237 {
  238 	struct dma_map_ops *ops = get_dma_ops(dev);
  239 
  240 	BUG_ON(!valid_dma_direction(dir));
  241 	debug_dma_unmap_sg(dev, sg, nents, dir);
  242 	if (ops->unmap_sg)
  243 		ops->unmap_sg(dev, sg, nents, dir, attrs);
  244 }
  245 
  246 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  247 				      size_t offset, size_t size,
  248 				      enum dma_data_direction dir)
  249 {
  250 	struct dma_map_ops *ops = get_dma_ops(dev);
  251 	dma_addr_t addr;
  252 
  253 	kmemcheck_mark_initialized(page_address(page) + offset, size);
  254 	BUG_ON(!valid_dma_direction(dir));
  255 	addr = ops->map_page(dev, page, offset, size, dir, 0);
  256 	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
  257 
  258 	return addr;
  259 }
  260 
  261 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
  262 				  size_t size, enum dma_data_direction dir)
  263 {
  264 	struct dma_map_ops *ops = get_dma_ops(dev);
  265 
  266 	BUG_ON(!valid_dma_direction(dir));
  267 	if (ops->unmap_page)
  268 		ops->unmap_page(dev, addr, size, dir, 0);
  269 	debug_dma_unmap_page(dev, addr, size, dir, false);
  270 }
  271 
  272 static inline dma_addr_t dma_map_resource(struct device *dev,
  273 					  phys_addr_t phys_addr,
  274 					  size_t size,
  275 					  enum dma_data_direction dir,
  276 					  unsigned long attrs)
  277 {
  278 	struct dma_map_ops *ops = get_dma_ops(dev);
  279 	dma_addr_t addr;
  280 
  281 	BUG_ON(!valid_dma_direction(dir));
  282 
  283 	/* Don't allow RAM to be mapped */
  284 	BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
  285 
  286 	addr = phys_addr;
  287 	if (ops->map_resource)
  288 		addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
  289 
  290 	debug_dma_map_resource(dev, phys_addr, size, dir, addr);
  291 
  292 	return addr;
  293 }
  294 
  295 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
  296 				      size_t size, enum dma_data_direction dir,
  297 				      unsigned long attrs)
  298 {
  299 	struct dma_map_ops *ops = get_dma_ops(dev);
  300 
  301 	BUG_ON(!valid_dma_direction(dir));
  302 	if (ops->unmap_resource)
  303 		ops->unmap_resource(dev, addr, size, dir, attrs);
  304 	debug_dma_unmap_resource(dev, addr, size, dir);
  305 }
  306 
  307 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
  308 					   size_t size,
  309 					   enum dma_data_direction dir)
  310 {
  311 	struct dma_map_ops *ops = get_dma_ops(dev);
  312 
  313 	BUG_ON(!valid_dma_direction(dir));
  314 	if (ops->sync_single_for_cpu)
  315 		ops->sync_single_for_cpu(dev, addr, size, dir);
  316 	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
  317 }
  318 
  319 static inline void dma_sync_single_for_device(struct device *dev,
  320 					      dma_addr_t addr, size_t size,
  321 					      enum dma_data_direction dir)
  322 {
  323 	struct dma_map_ops *ops = get_dma_ops(dev);
  324 
  325 	BUG_ON(!valid_dma_direction(dir));
  326 	if (ops->sync_single_for_device)
  327 		ops->sync_single_for_device(dev, addr, size, dir);
  328 	debug_dma_sync_single_for_device(dev, addr, size, dir);
  329 }
  330 
  331 static inline void dma_sync_single_range_for_cpu(struct device *dev,
  332 						 dma_addr_t addr,
  333 						 unsigned long offset,
  334 						 size_t size,
  335 						 enum dma_data_direction dir)
  336 {
  337 	const struct dma_map_ops *ops = get_dma_ops(dev);
  338 
  339 	BUG_ON(!valid_dma_direction(dir));
  340 	if (ops->sync_single_for_cpu)
  341 		ops->sync_single_for_cpu(dev, addr + offset, size, dir);
  342 	debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
  343 }
  344 
  345 static inline void dma_sync_single_range_for_device(struct device *dev,
  346 						    dma_addr_t addr,
  347 						    unsigned long offset,
  348 						    size_t size,
  349 						    enum dma_data_direction dir)
  350 {
  351 	const struct dma_map_ops *ops = get_dma_ops(dev);
  352 
  353 	BUG_ON(!valid_dma_direction(dir));
  354 	if (ops->sync_single_for_device)
  355 		ops->sync_single_for_device(dev, addr + offset, size, dir);
  356 	debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
  357 }
  358 
  359 static inline void
  360 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  361 		    int nelems, enum dma_data_direction dir)
  362 {
  363 	struct dma_map_ops *ops = get_dma_ops(dev);
  364 
  365 	BUG_ON(!valid_dma_direction(dir));
  366 	if (ops->sync_sg_for_cpu)
  367 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
  368 	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
  369 }
  370 
  371 static inline void
  372 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  373 		       int nelems, enum dma_data_direction dir)
  374 {
  375 	struct dma_map_ops *ops = get_dma_ops(dev);
  376 
  377 	BUG_ON(!valid_dma_direction(dir));
  378 	if (ops->sync_sg_for_device)
  379 		ops->sync_sg_for_device(dev, sg, nelems, dir);
  380 	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
  381 
  382 }
  383 
  384 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
  385 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
  386 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
  387 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
  388 
  389 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
  390 			   void *cpu_addr, dma_addr_t dma_addr, size_t size);
  391 
  392 void *dma_common_contiguous_remap(struct page *page, size_t size,
  393 			unsigned long vm_flags,
  394 			pgprot_t prot, const void *caller);
  395 
  396 void *dma_common_pages_remap(struct page **pages, size_t size,
  397 			unsigned long vm_flags, pgprot_t prot,
  398 			const void *caller);
  399 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
  400 
  401 /**
  402  * dma_mmap_attrs - map a coherent DMA allocation into user space
  403  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  404  * @vma: vm_area_struct describing requested user mapping
  405  * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
  406  * @handle: device-view address returned from dma_alloc_attrs
  407  * @size: size of memory originally requested in dma_alloc_attrs
  408  * @attrs: attributes of mapping properties requested in dma_alloc_attrs
  409  *
  410  * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
  411  * into user space.  The coherent DMA buffer must not be freed by the
  412  * driver until the user space mapping has been released.
  413  */
  414 static inline int
  415 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
  416 	       dma_addr_t dma_addr, size_t size, unsigned long attrs)
  417 {
  418 	struct dma_map_ops *ops = get_dma_ops(dev);
  419 	BUG_ON(!ops);
  420 	if (ops->mmap)
  421 		return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
  422 	return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
  423 }
  424 
  425 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
  426 
  427 int
  428 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
  429 		       void *cpu_addr, dma_addr_t dma_addr, size_t size);
  430 
  431 static inline int
  432 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
  433 		      dma_addr_t dma_addr, size_t size,
  434 		      unsigned long attrs)
  435 {
  436 	struct dma_map_ops *ops = get_dma_ops(dev);
  437 	BUG_ON(!ops);
  438 	if (ops->get_sgtable)
  439 		return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
  440 					attrs);
  441 	return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
  442 }
  443 
  444 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
  445 
  446 #ifndef arch_dma_alloc_attrs
  447 #define arch_dma_alloc_attrs(dev, flag)	(true)
  448 #endif
  449 
  450 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
  451 				       dma_addr_t *dma_handle, gfp_t flag,
  452 				       unsigned long attrs)
  453 {
  454 	struct dma_map_ops *ops = get_dma_ops(dev);
  455 	void *cpu_addr;
  456 
  457 	BUG_ON(!ops);
  458 
  459 	if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
  460 		return cpu_addr;
  461 
  462 	if (!arch_dma_alloc_attrs(&dev, &flag))
  463 		return NULL;
  464 	if (!ops->alloc)
  465 		return NULL;
  466 
  467 	cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
  468 	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
  469 	return cpu_addr;
  470 }
  471 
  472 static inline void dma_free_attrs(struct device *dev, size_t size,
  473 				     void *cpu_addr, dma_addr_t dma_handle,
  474 				     unsigned long attrs)
  475 {
  476 	struct dma_map_ops *ops = get_dma_ops(dev);
  477 
  478 	BUG_ON(!ops);
  479 	WARN_ON(irqs_disabled());
  480 
  481 	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
  482 		return;
  483 
  484 	if (!ops->free || !cpu_addr)
  485 		return;
  486 
  487 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
  488 	ops->free(dev, size, cpu_addr, dma_handle, attrs);
  489 }
  490 
  491 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  492 		dma_addr_t *dma_handle, gfp_t flag)
  493 {
  494 	return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
  495 }
  496 
  497 static inline void dma_free_coherent(struct device *dev, size_t size,
  498 		void *cpu_addr, dma_addr_t dma_handle)
  499 {
  500 	return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
  501 }
  502 
  503 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
  504 		dma_addr_t *dma_handle, gfp_t gfp)
  505 {
  506 	return dma_alloc_attrs(dev, size, dma_handle, gfp,
  507 			       DMA_ATTR_NON_CONSISTENT);
  508 }
  509 
  510 static inline void dma_free_noncoherent(struct device *dev, size_t size,
  511 		void *cpu_addr, dma_addr_t dma_handle)
  512 {
  513 	dma_free_attrs(dev, size, cpu_addr, dma_handle,
  514 		       DMA_ATTR_NON_CONSISTENT);
  515 }
  516 
  517 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  518 {
  519 	debug_dma_mapping_error(dev, dma_addr);
  520 
  521 	if (get_dma_ops(dev)->mapping_error)
  522 		return get_dma_ops(dev)->mapping_error(dev, dma_addr);
  523 
  524 #ifdef DMA_ERROR_CODE
  525 	return dma_addr == DMA_ERROR_CODE;
  526 #else
  527 	return 0;
  528 #endif
  529 }
  530 
  531 #ifndef HAVE_ARCH_DMA_SUPPORTED
  532 static inline int dma_supported(struct device *dev, u64 mask)
  533 {
  534 	struct dma_map_ops *ops = get_dma_ops(dev);
  535 
  536 	if (!ops)
  537 		return 0;
  538 	if (!ops->dma_supported)
  539 		return 1;
  540 	return ops->dma_supported(dev, mask);
  541 }
  542 #endif
  543 
  544 #ifndef HAVE_ARCH_DMA_SET_MASK
  545 static inline int dma_set_mask(struct device *dev, u64 mask)
  546 {
  547 	struct dma_map_ops *ops = get_dma_ops(dev);
  548 
  549 	if (ops->set_dma_mask)
  550 		return ops->set_dma_mask(dev, mask);
  551 
  552 	if (!dev->dma_mask || !dma_supported(dev, mask))
  553 		return -EIO;
  554 	*dev->dma_mask = mask;
  555 	return 0;
  556 }
  557 #endif
  558 
  559 static inline u64 dma_get_mask(struct device *dev)
  560 {
  561 	if (dev && dev->dma_mask && *dev->dma_mask)
  562 		return *dev->dma_mask;
  563 	return DMA_BIT_MASK(32);
  564 }
  565 
  566 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
  567 int dma_set_coherent_mask(struct device *dev, u64 mask);
  568 #else
  569 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
  570 {
  571 	if (!dma_supported(dev, mask))
  572 		return -EIO;
  573 	dev->coherent_dma_mask = mask;
  574 	return 0;
  575 }
  576 #endif
  577 
  578 /*
  579  * Set both the DMA mask and the coherent DMA mask to the same thing.
  580  * Note that we don't check the return value from dma_set_coherent_mask()
  581  * as the DMA API guarantees that the coherent DMA mask can be set to
  582  * the same or smaller than the streaming DMA mask.
  583  */
  584 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
  585 {
  586 	int rc = dma_set_mask(dev, mask);
  587 	if (rc == 0)
  588 		dma_set_coherent_mask(dev, mask);
  589 	return rc;
  590 }
  591 
  592 /*
  593  * Similar to the above, except it deals with the case where the device
  594  * does not have dev->dma_mask appropriately setup.
  595  */
  596 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
  597 {
  598 	dev->dma_mask = &dev->coherent_dma_mask;
  599 	return dma_set_mask_and_coherent(dev, mask);
  600 }
  601 
  602 extern u64 dma_get_required_mask(struct device *dev);
  603 
  604 #ifndef arch_setup_dma_ops
  605 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
  606 				      u64 size, const struct iommu_ops *iommu,
  607 				      bool coherent) { }
  608 #endif
  609 
  610 #ifndef arch_teardown_dma_ops
  611 static inline void arch_teardown_dma_ops(struct device *dev) { }
  612 #endif
  613 
  614 static inline unsigned int dma_get_max_seg_size(struct device *dev)
  615 {
  616 	if (dev->dma_parms && dev->dma_parms->max_segment_size)
  617 		return dev->dma_parms->max_segment_size;
  618 	return SZ_64K;
  619 }
  620 
  621 static inline unsigned int dma_set_max_seg_size(struct device *dev,
  622 						unsigned int size)
  623 {
  624 	if (dev->dma_parms) {
  625 		dev->dma_parms->max_segment_size = size;
  626 		return 0;
  627 	}
  628 	return -EIO;
  629 }
  630 
  631 static inline unsigned long dma_get_seg_boundary(struct device *dev)
  632 {
  633 	if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
  634 		return dev->dma_parms->segment_boundary_mask;
  635 	return DMA_BIT_MASK(32);
  636 }
  637 
  638 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
  639 {
  640 	if (dev->dma_parms) {
  641 		dev->dma_parms->segment_boundary_mask = mask;
  642 		return 0;
  643 	}
  644 	return -EIO;
  645 }
  646 
  647 #ifndef dma_max_pfn
  648 static inline unsigned long dma_max_pfn(struct device *dev)
  649 {
  650 	return *dev->dma_mask >> PAGE_SHIFT;
  651 }
  652 #endif
  653 
  654 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
  655 					dma_addr_t *dma_handle, gfp_t flag)
  656 {
  657 	void *ret = dma_alloc_coherent(dev, size, dma_handle,
  658 				       flag | __GFP_ZERO);
  659 	return ret;
  660 }
  661 
  662 #ifdef CONFIG_HAS_DMA
  663 static inline int dma_get_cache_alignment(void)
  664 {
  665 #ifdef ARCH_DMA_MINALIGN
  666 	return ARCH_DMA_MINALIGN;
  667 #endif
  668 	return 1;
  669 }
  670 #endif
  671 
  672 /* flags for the coherent memory api */
  673 #define	DMA_MEMORY_MAP			0x01
  674 #define DMA_MEMORY_IO			0x02
  675 #define DMA_MEMORY_INCLUDES_CHILDREN	0x04
  676 #define DMA_MEMORY_EXCLUSIVE		0x08
  677 
  678 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
  679 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
  680 				dma_addr_t device_addr, size_t size, int flags);
  681 void dma_release_declared_memory(struct device *dev);
  682 void *dma_mark_declared_memory_occupied(struct device *dev,
  683 					dma_addr_t device_addr, size_t size);
  684 #else
  685 static inline int
  686 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
  687 			    dma_addr_t device_addr, size_t size, int flags)
  688 {
  689 	return 0;
  690 }
  691 
  692 static inline void
  693 dma_release_declared_memory(struct device *dev)
  694 {
  695 }
  696 
  697 static inline void *
  698 dma_mark_declared_memory_occupied(struct device *dev,
  699 				  dma_addr_t device_addr, size_t size)
  700 {
  701 	return ERR_PTR(-EBUSY);
  702 }
  703 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  704 
  705 /*
  706  * Managed DMA API
  707  */
  708 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
  709 				 dma_addr_t *dma_handle, gfp_t gfp);
  710 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
  711 			       dma_addr_t dma_handle);
  712 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
  713 				    dma_addr_t *dma_handle, gfp_t gfp);
  714 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  715 				  dma_addr_t dma_handle);
  716 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
  717 extern int dmam_declare_coherent_memory(struct device *dev,
  718 					phys_addr_t phys_addr,
  719 					dma_addr_t device_addr, size_t size,
  720 					int flags);
  721 extern void dmam_release_declared_memory(struct device *dev);
  722 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  723 static inline int dmam_declare_coherent_memory(struct device *dev,
  724 				phys_addr_t phys_addr, dma_addr_t device_addr,
  725 				size_t size, gfp_t gfp)
  726 {
  727 	return 0;
  728 }
  729 
  730 static inline void dmam_release_declared_memory(struct device *dev)
  731 {
  732 }
  733 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  734 
  735 static inline void *dma_alloc_wc(struct device *dev, size_t size,
  736 				 dma_addr_t *dma_addr, gfp_t gfp)
  737 {
  738 	return dma_alloc_attrs(dev, size, dma_addr, gfp,
  739 			       DMA_ATTR_WRITE_COMBINE);
  740 }
  741 #ifndef dma_alloc_writecombine
  742 #define dma_alloc_writecombine dma_alloc_wc
  743 #endif
  744 
  745 static inline void dma_free_wc(struct device *dev, size_t size,
  746 			       void *cpu_addr, dma_addr_t dma_addr)
  747 {
  748 	return dma_free_attrs(dev, size, cpu_addr, dma_addr,
  749 			      DMA_ATTR_WRITE_COMBINE);
  750 }
  751 #ifndef dma_free_writecombine
  752 #define dma_free_writecombine dma_free_wc
  753 #endif
  754 
  755 static inline int dma_mmap_wc(struct device *dev,
  756 			      struct vm_area_struct *vma,
  757 			      void *cpu_addr, dma_addr_t dma_addr,
  758 			      size_t size)
  759 {
  760 	return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
  761 			      DMA_ATTR_WRITE_COMBINE);
  762 }
  763 #ifndef dma_mmap_writecombine
  764 #define dma_mmap_writecombine dma_mmap_wc
  765 #endif
  766 
  767 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
  768 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
  769 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
  770 #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
  771 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
  772 #define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
  773 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
  774 #else
  775 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
  776 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
  777 #define dma_unmap_addr(PTR, ADDR_NAME)           (0)
  778 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  do { } while (0)
  779 #define dma_unmap_len(PTR, LEN_NAME)             (0)
  780 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    do { } while (0)
  781 #endif
  782 
  783 #endif                 1 #ifndef LINUX_KMEMCHECK_H
    2 #define LINUX_KMEMCHECK_H
    3 
    4 #include <linux/mm_types.h>
    5 #include <linux/types.h>
    6 
    7 #ifdef CONFIG_KMEMCHECK
    8 extern int kmemcheck_enabled;
    9 
   10 /* The slab-related functions. */
   11 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
   12 void kmemcheck_free_shadow(struct page *page, int order);
   13 void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
   14 			  size_t size);
   15 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
   16 
   17 void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
   18 			       gfp_t gfpflags);
   19 
   20 void kmemcheck_show_pages(struct page *p, unsigned int n);
   21 void kmemcheck_hide_pages(struct page *p, unsigned int n);
   22 
   23 bool kmemcheck_page_is_tracked(struct page *p);
   24 
   25 void kmemcheck_mark_unallocated(void *address, unsigned int n);
   26 void kmemcheck_mark_uninitialized(void *address, unsigned int n);
   27 void kmemcheck_mark_initialized(void *address, unsigned int n);
   28 void kmemcheck_mark_freed(void *address, unsigned int n);
   29 
   30 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
   31 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
   32 void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
   33 
   34 int kmemcheck_show_addr(unsigned long address);
   35 int kmemcheck_hide_addr(unsigned long address);
   36 
   37 bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
   38 
   39 /*
   40  * Bitfield annotations
   41  *
   42  * How to use: If you have a struct using bitfields, for example
   43  *
   44  *     struct a {
   45  *             int x:8, y:8;
   46  *     };
   47  *
   48  * then this should be rewritten as
   49  *
   50  *     struct a {
   51  *             kmemcheck_bitfield_begin(flags);
   52  *             int x:8, y:8;
   53  *             kmemcheck_bitfield_end(flags);
   54  *     };
   55  *
   56  * Now the "flags_begin" and "flags_end" members may be used to refer to the
   57  * beginning and end, respectively, of the bitfield (and things like
   58  * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
   59  * fields should be annotated:
   60  *
   61  *     struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
   62  *     kmemcheck_annotate_bitfield(a, flags);
   63  */
   64 #define kmemcheck_bitfield_begin(name)	\
   65 	int name##_begin[0];
   66 
   67 #define kmemcheck_bitfield_end(name)	\
   68 	int name##_end[0];
   69 
   70 #define kmemcheck_annotate_bitfield(ptr, name)				\
   71 	do {								\
   72 		int _n;							\
   73 									\
   74 		if (!ptr)						\
   75 			break;						\
   76 									\
   77 		_n = (long) &((ptr)->name##_end)			\
   78 			- (long) &((ptr)->name##_begin);		\
   79 		BUILD_BUG_ON(_n < 0);					\
   80 									\
   81 		kmemcheck_mark_initialized(&((ptr)->name##_begin), _n);	\
   82 	} while (0)
   83 
   84 #define kmemcheck_annotate_variable(var)				\
   85 	do {								\
   86 		kmemcheck_mark_initialized(&(var), sizeof(var));	\
   87 	} while (0)							\
   88 
   89 #else
   90 #define kmemcheck_enabled 0
   91 
   92 static inline void
   93 kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
   94 {
   95 }
   96 
   97 static inline void
   98 kmemcheck_free_shadow(struct page *page, int order)
   99 {
  100 }
  101 
  102 static inline void
  103 kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
  104 		     size_t size)
  105 {
  106 }
  107 
  108 static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
  109 				       size_t size)
  110 {
  111 }
  112 
  113 static inline void kmemcheck_pagealloc_alloc(struct page *p,
  114 	unsigned int order, gfp_t gfpflags)
  115 {
  116 }
  117 
  118 static inline bool kmemcheck_page_is_tracked(struct page *p)
  119 {
  120 	return false;
  121 }
  122 
  123 static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
  124 {
  125 }
  126 
  127 static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
  128 {
  129 }
  130 
  131 static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
  132 {
  133 }
  134 
  135 static inline void kmemcheck_mark_freed(void *address, unsigned int n)
  136 {
  137 }
  138 
  139 static inline void kmemcheck_mark_unallocated_pages(struct page *p,
  140 						    unsigned int n)
  141 {
  142 }
  143 
  144 static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
  145 						      unsigned int n)
  146 {
  147 }
  148 
  149 static inline void kmemcheck_mark_initialized_pages(struct page *p,
  150 						    unsigned int n)
  151 {
  152 }
  153 
  154 static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
  155 {
  156 	return true;
  157 }
  158 
  159 #define kmemcheck_bitfield_begin(name)
  160 #define kmemcheck_bitfield_end(name)
  161 #define kmemcheck_annotate_bitfield(ptr, name)	\
  162 	do {					\
  163 	} while (0)
  164 
  165 #define kmemcheck_annotate_variable(var)	\
  166 	do {					\
  167 	} while (0)
  168 
  169 #endif /* CONFIG_KMEMCHECK */
  170 
  171 #endif /* LINUX_KMEMCHECK_H */                 1 /*
    2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
    3  *		operating system.  INET is implemented using the  BSD Socket
    4  *		interface as the means of communication with the user level.
    5  *
    6  *		Definitions for the Interfaces handler.
    7  *
    8  * Version:	@(#)dev.h	1.0.10	08/12/93
    9  *
   10  * Authors:	Ross Biro
   11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
   12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
   13  *		Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
   14  *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
   15  *		Bjorn Ekwall. <bj0rn@blox.se>
   16  *              Pekka Riikonen <priikone@poseidon.pspt.fi>
   17  *
   18  *		This program is free software; you can redistribute it and/or
   19  *		modify it under the terms of the GNU General Public License
   20  *		as published by the Free Software Foundation; either version
   21  *		2 of the License, or (at your option) any later version.
   22  *
   23  *		Moved to /usr/include/linux for NET3
   24  */
   25 #ifndef _LINUX_NETDEVICE_H
   26 #define _LINUX_NETDEVICE_H
   27 
   28 #include <linux/timer.h>
   29 #include <linux/bug.h>
   30 #include <linux/delay.h>
   31 #include <linux/atomic.h>
   32 #include <linux/prefetch.h>
   33 #include <asm/cache.h>
   34 #include <asm/byteorder.h>
   35 
   36 #include <linux/percpu.h>
   37 #include <linux/rculist.h>
   38 #include <linux/dmaengine.h>
   39 #include <linux/workqueue.h>
   40 #include <linux/dynamic_queue_limits.h>
   41 
   42 #include <linux/ethtool.h>
   43 #include <net/net_namespace.h>
   44 #include <net/dsa.h>
   45 #ifdef CONFIG_DCB
   46 #include <net/dcbnl.h>
   47 #endif
   48 #include <net/netprio_cgroup.h>
   49 
   50 #include <linux/netdev_features.h>
   51 #include <linux/neighbour.h>
   52 #include <uapi/linux/netdevice.h>
   53 #include <uapi/linux/if_bonding.h>
   54 #include <uapi/linux/pkt_cls.h>
   55 #include <linux/hashtable.h>
   56 
   57 struct netpoll_info;
   58 struct device;
   59 struct phy_device;
   60 /* 802.11 specific */
   61 struct wireless_dev;
   62 /* 802.15.4 specific */
   63 struct wpan_dev;
   64 struct mpls_dev;
   65 /* UDP Tunnel offloads */
   66 struct udp_tunnel_info;
   67 struct bpf_prog;
   68 
   69 void netdev_set_default_ethtool_ops(struct net_device *dev,
   70 				    const struct ethtool_ops *ops);
   71 
   72 /* Backlog congestion levels */
   73 #define NET_RX_SUCCESS		0	/* keep 'em coming, baby */
   74 #define NET_RX_DROP		1	/* packet dropped */
   75 
   76 /*
   77  * Transmit return codes: transmit return codes originate from three different
   78  * namespaces:
   79  *
   80  * - qdisc return codes
   81  * - driver transmit return codes
   82  * - errno values
   83  *
   84  * Drivers are allowed to return any one of those in their hard_start_xmit()
   85  * function. Real network devices commonly used with qdiscs should only return
   86  * the driver transmit return codes though - when qdiscs are used, the actual
   87  * transmission happens asynchronously, so the value is not propagated to
   88  * higher layers. Virtual network devices transmit synchronously; in this case
   89  * the driver transmit return codes are consumed by dev_queue_xmit(), and all
   90  * others are propagated to higher layers.
   91  */
   92 
   93 /* qdisc ->enqueue() return codes. */
   94 #define NET_XMIT_SUCCESS	0x00
   95 #define NET_XMIT_DROP		0x01	/* skb dropped			*/
   96 #define NET_XMIT_CN		0x02	/* congestion notification	*/
   97 #define NET_XMIT_MASK		0x0f	/* qdisc flags in net/sch_generic.h */
   98 
   99 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  100  * indicates that the device will soon be dropping packets, or already drops
  101  * some packets of the same priority; prompting us to send less aggressively. */
  102 #define net_xmit_eval(e)	((e) == NET_XMIT_CN ? 0 : (e))
  103 #define net_xmit_errno(e)	((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  104 
  105 /* Driver transmit return codes */
  106 #define NETDEV_TX_MASK		0xf0
  107 
  108 enum netdev_tx {
  109 	__NETDEV_TX_MIN	 = INT_MIN,	/* make sure enum is signed */
  110 	NETDEV_TX_OK	 = 0x00,	/* driver took care of packet */
  111 	NETDEV_TX_BUSY	 = 0x10,	/* driver tx path was busy*/
  112 };
  113 typedef enum netdev_tx netdev_tx_t;
  114 
  115 /*
  116  * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
  117  * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
  118  */
  119 static inline bool dev_xmit_complete(int rc)
  120 {
  121 	/*
  122 	 * Positive cases with an skb consumed by a driver:
  123 	 * - successful transmission (rc == NETDEV_TX_OK)
  124 	 * - error while transmitting (rc < 0)
  125 	 * - error while queueing to a different device (rc & NET_XMIT_MASK)
  126 	 */
  127 	if (likely(rc < NET_XMIT_MASK))
  128 		return true;
  129 
  130 	return false;
  131 }
  132 
  133 /*
  134  *	Compute the worst-case header length according to the protocols
  135  *	used.
  136  */
  137 
  138 #if defined(CONFIG_HYPERV_NET)
  139 # define LL_MAX_HEADER 128
  140 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
  141 # if defined(CONFIG_MAC80211_MESH)
  142 #  define LL_MAX_HEADER 128
  143 # else
  144 #  define LL_MAX_HEADER 96
  145 # endif
  146 #else
  147 # define LL_MAX_HEADER 32
  148 #endif
  149 
  150 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
  151     !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
  152 #define MAX_HEADER LL_MAX_HEADER
  153 #else
  154 #define MAX_HEADER (LL_MAX_HEADER + 48)
  155 #endif
  156 
  157 /*
  158  *	Old network device statistics. Fields are native words
  159  *	(unsigned long) so they can be read and written atomically.
  160  */
  161 
  162 struct net_device_stats {
  163 	unsigned long	rx_packets;
  164 	unsigned long	tx_packets;
  165 	unsigned long	rx_bytes;
  166 	unsigned long	tx_bytes;
  167 	unsigned long	rx_errors;
  168 	unsigned long	tx_errors;
  169 	unsigned long	rx_dropped;
  170 	unsigned long	tx_dropped;
  171 	unsigned long	multicast;
  172 	unsigned long	collisions;
  173 	unsigned long	rx_length_errors;
  174 	unsigned long	rx_over_errors;
  175 	unsigned long	rx_crc_errors;
  176 	unsigned long	rx_frame_errors;
  177 	unsigned long	rx_fifo_errors;
  178 	unsigned long	rx_missed_errors;
  179 	unsigned long	tx_aborted_errors;
  180 	unsigned long	tx_carrier_errors;
  181 	unsigned long	tx_fifo_errors;
  182 	unsigned long	tx_heartbeat_errors;
  183 	unsigned long	tx_window_errors;
  184 	unsigned long	rx_compressed;
  185 	unsigned long	tx_compressed;
  186 };
  187 
  188 
  189 #include <linux/cache.h>
  190 #include <linux/skbuff.h>
  191 
  192 #ifdef CONFIG_RPS
  193 #include <linux/static_key.h>
  194 extern struct static_key rps_needed;
  195 #endif
  196 
  197 struct neighbour;
  198 struct neigh_parms;
  199 struct sk_buff;
  200 
  201 struct netdev_hw_addr {
  202 	struct list_head	list;
  203 	unsigned char		addr[MAX_ADDR_LEN];
  204 	unsigned char		type;
  205 #define NETDEV_HW_ADDR_T_LAN		1
  206 #define NETDEV_HW_ADDR_T_SAN		2
  207 #define NETDEV_HW_ADDR_T_SLAVE		3
  208 #define NETDEV_HW_ADDR_T_UNICAST	4
  209 #define NETDEV_HW_ADDR_T_MULTICAST	5
  210 	bool			global_use;
  211 	int			sync_cnt;
  212 	int			refcount;
  213 	int			synced;
  214 	struct rcu_head		rcu_head;
  215 };
  216 
  217 struct netdev_hw_addr_list {
  218 	struct list_head	list;
  219 	int			count;
  220 };
  221 
  222 #define netdev_hw_addr_list_count(l) ((l)->count)
  223 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
  224 #define netdev_hw_addr_list_for_each(ha, l) \
  225 	list_for_each_entry(ha, &(l)->list, list)
  226 
  227 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
  228 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
  229 #define netdev_for_each_uc_addr(ha, dev) \
  230 	netdev_hw_addr_list_for_each(ha, &(dev)->uc)
  231 
  232 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
  233 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
  234 #define netdev_for_each_mc_addr(ha, dev) \
  235 	netdev_hw_addr_list_for_each(ha, &(dev)->mc)
  236 
  237 struct hh_cache {
  238 	u16		hh_len;
  239 	u16		__pad;
  240 	seqlock_t	hh_lock;
  241 
  242 	/* cached hardware header; allow for machine alignment needs.        */
  243 #define HH_DATA_MOD	16
  244 #define HH_DATA_OFF(__len) \
  245 	(HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
  246 #define HH_DATA_ALIGN(__len) \
  247 	(((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
  248 	unsigned long	hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
  249 };
  250 
  251 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
  252  * Alternative is:
  253  *   dev->hard_header_len ? (dev->hard_header_len +
  254  *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
  255  *
  256  * We could use other alignment values, but we must maintain the
  257  * relationship HH alignment <= LL alignment.
  258  */
  259 #define LL_RESERVED_SPACE(dev) \
  260 	((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  261 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
  262 	((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  263 
  264 struct header_ops {
  265 	int	(*create) (struct sk_buff *skb, struct net_device *dev,
  266 			   unsigned short type, const void *daddr,
  267 			   const void *saddr, unsigned int len);
  268 	int	(*parse)(const struct sk_buff *skb, unsigned char *haddr);
  269 	int	(*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
  270 	void	(*cache_update)(struct hh_cache *hh,
  271 				const struct net_device *dev,
  272 				const unsigned char *haddr);
  273 	bool	(*validate)(const char *ll_header, unsigned int len);
  274 };
  275 
  276 /* These flag bits are private to the generic network queueing
  277  * layer; they may not be explicitly referenced by any other
  278  * code.
  279  */
  280 
  281 enum netdev_state_t {
  282 	__LINK_STATE_START,
  283 	__LINK_STATE_PRESENT,
  284 	__LINK_STATE_NOCARRIER,
  285 	__LINK_STATE_LINKWATCH_PENDING,
  286 	__LINK_STATE_DORMANT,
  287 };
  288 
  289 
  290 /*
  291  * This structure holds boot-time configured netdevice settings. They
  292  * are then used in the device probing.
  293  */
  294 struct netdev_boot_setup {
  295 	char name[IFNAMSIZ];
  296 	struct ifmap map;
  297 };
  298 #define NETDEV_BOOT_SETUP_MAX 8
  299 
  300 int __init netdev_boot_setup(char *str);
  301 
  302 /*
  303  * Structure for NAPI scheduling similar to tasklet but with weighting
  304  */
  305 struct napi_struct {
  306 	/* The poll_list must only be managed by the entity which
  307 	 * changes the state of the NAPI_STATE_SCHED bit.  This means
  308 	 * whoever atomically sets that bit can add this napi_struct
  309 	 * to the per-CPU poll_list, and whoever clears that bit
  310 	 * can remove from the list right before clearing the bit.
  311 	 */
  312 	struct list_head	poll_list;
  313 
  314 	unsigned long		state;
  315 	int			weight;
  316 	unsigned int		gro_count;
  317 	int			(*poll)(struct napi_struct *, int);
  318 #ifdef CONFIG_NETPOLL
  319 	spinlock_t		poll_lock;
  320 	int			poll_owner;
  321 #endif
  322 	struct net_device	*dev;
  323 	struct sk_buff		*gro_list;
  324 	struct sk_buff		*skb;
  325 	struct hrtimer		timer;
  326 	struct list_head	dev_list;
  327 	struct hlist_node	napi_hash_node;
  328 	unsigned int		napi_id;
  329 };
  330 
  331 enum {
  332 	NAPI_STATE_SCHED,	/* Poll is scheduled */
  333 	NAPI_STATE_DISABLE,	/* Disable pending */
  334 	NAPI_STATE_NPSVC,	/* Netpoll - don't dequeue from poll_list */
  335 	NAPI_STATE_HASHED,	/* In NAPI hash (busy polling possible) */
  336 	NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
  337 };
  338 
  339 enum gro_result {
  340 	GRO_MERGED,
  341 	GRO_MERGED_FREE,
  342 	GRO_HELD,
  343 	GRO_NORMAL,
  344 	GRO_DROP,
  345 };
  346 typedef enum gro_result gro_result_t;
  347 
  348 /*
  349  * enum rx_handler_result - Possible return values for rx_handlers.
  350  * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
  351  * further.
  352  * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
  353  * case skb->dev was changed by rx_handler.
  354  * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
  355  * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
  356  *
  357  * rx_handlers are functions called from inside __netif_receive_skb(), to do
  358  * special processing of the skb, prior to delivery to protocol handlers.
  359  *
  360  * Currently, a net_device can only have a single rx_handler registered. Trying
  361  * to register a second rx_handler will return -EBUSY.
  362  *
  363  * To register a rx_handler on a net_device, use netdev_rx_handler_register().
  364  * To unregister a rx_handler on a net_device, use
  365  * netdev_rx_handler_unregister().
  366  *
  367  * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
  368  * do with the skb.
  369  *
  370  * If the rx_handler consumed the skb in some way, it should return
  371  * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
  372  * the skb to be delivered in some other way.
  373  *
  374  * If the rx_handler changed skb->dev, to divert the skb to another
  375  * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
  376  * new device will be called if it exists.
  377  *
  378  * If the rx_handler decides the skb should be ignored, it should return
  379  * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
  380  * are registered on exact device (ptype->dev == skb->dev).
  381  *
  382  * If the rx_handler didn't change skb->dev, but wants the skb to be normally
  383  * delivered, it should return RX_HANDLER_PASS.
  384  *
  385  * A device without a registered rx_handler will behave as if rx_handler
  386  * returned RX_HANDLER_PASS.
  387  */
  388 
  389 enum rx_handler_result {
  390 	RX_HANDLER_CONSUMED,
  391 	RX_HANDLER_ANOTHER,
  392 	RX_HANDLER_EXACT,
  393 	RX_HANDLER_PASS,
  394 };
  395 typedef enum rx_handler_result rx_handler_result_t;
  396 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
  397 
  398 void __napi_schedule(struct napi_struct *n);
  399 void __napi_schedule_irqoff(struct napi_struct *n);
  400 
  401 static inline bool napi_disable_pending(struct napi_struct *n)
  402 {
  403 	return test_bit(NAPI_STATE_DISABLE, &n->state);
  404 }
  405 
  406 /**
  407  *	napi_schedule_prep - check if NAPI can be scheduled
  408  *	@n: NAPI context
  409  *
  410  * Test if NAPI routine is already running, and if not mark
  411  * it as running.  This is used as a condition variable to
  412  * insure only one NAPI poll instance runs.  We also make
  413  * sure there is no pending NAPI disable.
  414  */
  415 static inline bool napi_schedule_prep(struct napi_struct *n)
  416 {
  417 	return !napi_disable_pending(n) &&
  418 		!test_and_set_bit(NAPI_STATE_SCHED, &n->state);
  419 }
  420 
  421 /**
  422  *	napi_schedule - schedule NAPI poll
  423  *	@n: NAPI context
  424  *
  425  * Schedule NAPI poll routine to be called if it is not already
  426  * running.
  427  */
  428 static inline void napi_schedule(struct napi_struct *n)
  429 {
  430 	if (napi_schedule_prep(n))
  431 		__napi_schedule(n);
  432 }
  433 
  434 /**
  435  *	napi_schedule_irqoff - schedule NAPI poll
  436  *	@n: NAPI context
  437  *
  438  * Variant of napi_schedule(), assuming hard irqs are masked.
  439  */
  440 static inline void napi_schedule_irqoff(struct napi_struct *n)
  441 {
  442 	if (napi_schedule_prep(n))
  443 		__napi_schedule_irqoff(n);
  444 }
  445 
  446 /* Try to reschedule poll. Called by dev->poll() after napi_complete().  */
  447 static inline bool napi_reschedule(struct napi_struct *napi)
  448 {
  449 	if (napi_schedule_prep(napi)) {
  450 		__napi_schedule(napi);
  451 		return true;
  452 	}
  453 	return false;
  454 }
  455 
  456 void __napi_complete(struct napi_struct *n);
  457 void napi_complete_done(struct napi_struct *n, int work_done);
  458 /**
  459  *	napi_complete - NAPI processing complete
  460  *	@n: NAPI context
  461  *
  462  * Mark NAPI processing as complete.
  463  * Consider using napi_complete_done() instead.
  464  */
  465 static inline void napi_complete(struct napi_struct *n)
  466 {
  467 	return napi_complete_done(n, 0);
  468 }
  469 
  470 /**
  471  *	napi_hash_add - add a NAPI to global hashtable
  472  *	@napi: NAPI context
  473  *
  474  * Generate a new napi_id and store a @napi under it in napi_hash.
  475  * Used for busy polling (CONFIG_NET_RX_BUSY_POLL).
  476  * Note: This is normally automatically done from netif_napi_add(),
  477  * so might disappear in a future Linux version.
  478  */
  479 void napi_hash_add(struct napi_struct *napi);
  480 
  481 /**
  482  *	napi_hash_del - remove a NAPI from global table
  483  *	@napi: NAPI context
  484  *
  485  * Warning: caller must observe RCU grace period
  486  * before freeing memory containing @napi, if
  487  * this function returns true.
  488  * Note: core networking stack automatically calls it
  489  * from netif_napi_del().
  490  * Drivers might want to call this helper to combine all
  491  * the needed RCU grace periods into a single one.
  492  */
  493 bool napi_hash_del(struct napi_struct *napi);
  494 
  495 /**
  496  *	napi_disable - prevent NAPI from scheduling
  497  *	@n: NAPI context
  498  *
  499  * Stop NAPI from being scheduled on this context.
  500  * Waits till any outstanding processing completes.
  501  */
  502 void napi_disable(struct napi_struct *n);
  503 
  504 /**
  505  *	napi_enable - enable NAPI scheduling
  506  *	@n: NAPI context
  507  *
  508  * Resume NAPI from being scheduled on this context.
  509  * Must be paired with napi_disable.
  510  */
  511 static inline void napi_enable(struct napi_struct *n)
  512 {
  513 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  514 	smp_mb__before_atomic();
  515 	clear_bit(NAPI_STATE_SCHED, &n->state);
  516 	clear_bit(NAPI_STATE_NPSVC, &n->state);
  517 }
  518 
  519 /**
  520  *	napi_synchronize - wait until NAPI is not running
  521  *	@n: NAPI context
  522  *
  523  * Wait until NAPI is done being scheduled on this context.
  524  * Waits till any outstanding processing completes but
  525  * does not disable future activations.
  526  */
  527 static inline void napi_synchronize(const struct napi_struct *n)
  528 {
  529 	if (IS_ENABLED(CONFIG_SMP))
  530 		while (test_bit(NAPI_STATE_SCHED, &n->state))
  531 			msleep(1);
  532 	else
  533 		barrier();
  534 }
  535 
  536 enum netdev_queue_state_t {
  537 	__QUEUE_STATE_DRV_XOFF,
  538 	__QUEUE_STATE_STACK_XOFF,
  539 	__QUEUE_STATE_FROZEN,
  540 };
  541 
  542 #define QUEUE_STATE_DRV_XOFF	(1 << __QUEUE_STATE_DRV_XOFF)
  543 #define QUEUE_STATE_STACK_XOFF	(1 << __QUEUE_STATE_STACK_XOFF)
  544 #define QUEUE_STATE_FROZEN	(1 << __QUEUE_STATE_FROZEN)
  545 
  546 #define QUEUE_STATE_ANY_XOFF	(QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
  547 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
  548 					QUEUE_STATE_FROZEN)
  549 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
  550 					QUEUE_STATE_FROZEN)
  551 
  552 /*
  553  * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
  554  * netif_tx_* functions below are used to manipulate this flag.  The
  555  * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
  556  * queue independently.  The netif_xmit_*stopped functions below are called
  557  * to check if the queue has been stopped by the driver or stack (either
  558  * of the XOFF bits are set in the state).  Drivers should not need to call
  559  * netif_xmit*stopped functions, they should only be using netif_tx_*.
  560  */
  561 
  562 struct netdev_queue {
  563 /*
  564  * read-mostly part
  565  */
  566 	struct net_device	*dev;
  567 	struct Qdisc __rcu	*qdisc;
  568 	struct Qdisc		*qdisc_sleeping;
  569 #ifdef CONFIG_SYSFS
  570 	struct kobject		kobj;
  571 #endif
  572 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  573 	int			numa_node;
  574 #endif
  575 	unsigned long		tx_maxrate;
  576 	/*
  577 	 * Number of TX timeouts for this queue
  578 	 * (/sys/class/net/DEV/Q/trans_timeout)
  579 	 */
  580 	unsigned long		trans_timeout;
  581 /*
  582  * write-mostly part
  583  */
  584 	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
  585 	int			xmit_lock_owner;
  586 	/*
  587 	 * Time (in jiffies) of last Tx
  588 	 */
  589 	unsigned long		trans_start;
  590 
  591 	unsigned long		state;
  592 
  593 #ifdef CONFIG_BQL
  594 	struct dql		dql;
  595 #endif
  596 } ____cacheline_aligned_in_smp;
  597 
  598 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
  599 {
  600 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  601 	return q->numa_node;
  602 #else
  603 	return NUMA_NO_NODE;
  604 #endif
  605 }
  606 
  607 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
  608 {
  609 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  610 	q->numa_node = node;
  611 #endif
  612 }
  613 
  614 #ifdef CONFIG_RPS
  615 /*
  616  * This structure holds an RPS map which can be of variable length.  The
  617  * map is an array of CPUs.
  618  */
  619 struct rps_map {
  620 	unsigned int len;
  621 	struct rcu_head rcu;
  622 	u16 cpus[0];
  623 };
  624 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
  625 
  626 /*
  627  * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
  628  * tail pointer for that CPU's input queue at the time of last enqueue, and
  629  * a hardware filter index.
  630  */
  631 struct rps_dev_flow {
  632 	u16 cpu;
  633 	u16 filter;
  634 	unsigned int last_qtail;
  635 };
  636 #define RPS_NO_FILTER 0xffff
  637 
  638 /*
  639  * The rps_dev_flow_table structure contains a table of flow mappings.
  640  */
  641 struct rps_dev_flow_table {
  642 	unsigned int mask;
  643 	struct rcu_head rcu;
  644 	struct rps_dev_flow flows[0];
  645 };
  646 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
  647     ((_num) * sizeof(struct rps_dev_flow)))
  648 
  649 /*
  650  * The rps_sock_flow_table contains mappings of flows to the last CPU
  651  * on which they were processed by the application (set in recvmsg).
  652  * Each entry is a 32bit value. Upper part is the high-order bits
  653  * of flow hash, lower part is CPU number.
  654  * rps_cpu_mask is used to partition the space, depending on number of
  655  * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
  656  * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
  657  * meaning we use 32-6=26 bits for the hash.
  658  */
  659 struct rps_sock_flow_table {
  660 	u32	mask;
  661 
  662 	u32	ents[0] ____cacheline_aligned_in_smp;
  663 };
  664 #define	RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
  665 
  666 #define RPS_NO_CPU 0xffff
  667 
  668 extern u32 rps_cpu_mask;
  669 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
  670 
  671 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
  672 					u32 hash)
  673 {
  674 	if (table && hash) {
  675 		unsigned int index = hash & table->mask;
  676 		u32 val = hash & ~rps_cpu_mask;
  677 
  678 		/* We only give a hint, preemption can change CPU under us */
  679 		val |= raw_smp_processor_id();
  680 
  681 		if (table->ents[index] != val)
  682 			table->ents[index] = val;
  683 	}
  684 }
  685 
  686 #ifdef CONFIG_RFS_ACCEL
  687 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
  688 			 u16 filter_id);
  689 #endif
  690 #endif /* CONFIG_RPS */
  691 
  692 /* This structure contains an instance of an RX queue. */
  693 struct netdev_rx_queue {
  694 #ifdef CONFIG_RPS
  695 	struct rps_map __rcu		*rps_map;
  696 	struct rps_dev_flow_table __rcu	*rps_flow_table;
  697 #endif
  698 	struct kobject			kobj;
  699 	struct net_device		*dev;
  700 } ____cacheline_aligned_in_smp;
  701 
  702 /*
  703  * RX queue sysfs structures and functions.
  704  */
  705 struct rx_queue_attribute {
  706 	struct attribute attr;
  707 	ssize_t (*show)(struct netdev_rx_queue *queue,
  708 	    struct rx_queue_attribute *attr, char *buf);
  709 	ssize_t (*store)(struct netdev_rx_queue *queue,
  710 	    struct rx_queue_attribute *attr, const char *buf, size_t len);
  711 };
  712 
  713 #ifdef CONFIG_XPS
  714 /*
  715  * This structure holds an XPS map which can be of variable length.  The
  716  * map is an array of queues.
  717  */
  718 struct xps_map {
  719 	unsigned int len;
  720 	unsigned int alloc_len;
  721 	struct rcu_head rcu;
  722 	u16 queues[0];
  723 };
  724 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
  725 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
  726        - sizeof(struct xps_map)) / sizeof(u16))
  727 
  728 /*
  729  * This structure holds all XPS maps for device.  Maps are indexed by CPU.
  730  */
  731 struct xps_dev_maps {
  732 	struct rcu_head rcu;
  733 	struct xps_map __rcu *cpu_map[0];
  734 };
  735 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) +		\
  736     (nr_cpu_ids * sizeof(struct xps_map *)))
  737 #endif /* CONFIG_XPS */
  738 
  739 #define TC_MAX_QUEUE	16
  740 #define TC_BITMASK	15
  741 /* HW offloaded queuing disciplines txq count and offset maps */
  742 struct netdev_tc_txq {
  743 	u16 count;
  744 	u16 offset;
  745 };
  746 
  747 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
  748 /*
  749  * This structure is to hold information about the device
  750  * configured to run FCoE protocol stack.
  751  */
  752 struct netdev_fcoe_hbainfo {
  753 	char	manufacturer[64];
  754 	char	serial_number[64];
  755 	char	hardware_version[64];
  756 	char	driver_version[64];
  757 	char	optionrom_version[64];
  758 	char	firmware_version[64];
  759 	char	model[256];
  760 	char	model_description[256];
  761 };
  762 #endif
  763 
  764 #define MAX_PHYS_ITEM_ID_LEN 32
  765 
  766 /* This structure holds a unique identifier to identify some
  767  * physical item (port for example) used by a netdevice.
  768  */
  769 struct netdev_phys_item_id {
  770 	unsigned char id[MAX_PHYS_ITEM_ID_LEN];
  771 	unsigned char id_len;
  772 };
  773 
  774 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
  775 					    struct netdev_phys_item_id *b)
  776 {
  777 	return a->id_len == b->id_len &&
  778 	       memcmp(a->id, b->id, a->id_len) == 0;
  779 }
  780 
  781 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  782 				       struct sk_buff *skb);
  783 
  784 /* These structures hold the attributes of qdisc and classifiers
  785  * that are being passed to the netdevice through the setup_tc op.
  786  */
  787 enum {
  788 	TC_SETUP_MQPRIO,
  789 	TC_SETUP_CLSU32,
  790 	TC_SETUP_CLSFLOWER,
  791 	TC_SETUP_MATCHALL,
  792 	TC_SETUP_CLSBPF,
  793 };
  794 
  795 struct tc_cls_u32_offload;
  796 
  797 struct tc_to_netdev {
  798 	unsigned int type;
  799 	union {
  800 		u8 tc;
  801 		struct tc_cls_u32_offload *cls_u32;
  802 		struct tc_cls_flower_offload *cls_flower;
  803 		struct tc_cls_matchall_offload *cls_mall;
  804 		struct tc_cls_bpf_offload *cls_bpf;
  805 	};
  806 };
  807 
  808 /* These structures hold the attributes of xdp state that are being passed
  809  * to the netdevice through the xdp op.
  810  */
  811 enum xdp_netdev_command {
  812 	/* Set or clear a bpf program used in the earliest stages of packet
  813 	 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
  814 	 * is responsible for calling bpf_prog_put on any old progs that are
  815 	 * stored. In case of error, the callee need not release the new prog
  816 	 * reference, but on success it takes ownership and must bpf_prog_put
  817 	 * when it is no longer used.
  818 	 */
  819 	XDP_SETUP_PROG,
  820 	/* Check if a bpf program is set on the device.  The callee should
  821 	 * return true if a program is currently attached and running.
  822 	 */
  823 	XDP_QUERY_PROG,
  824 };
  825 
  826 struct netdev_xdp {
  827 	enum xdp_netdev_command command;
  828 	union {
  829 		/* XDP_SETUP_PROG */
  830 		struct bpf_prog *prog;
  831 		/* XDP_QUERY_PROG */
  832 		bool prog_attached;
  833 	};
  834 };
  835 
  836 /*
  837  * This structure defines the management hooks for network devices.
  838  * The following hooks can be defined; unless noted otherwise, they are
  839  * optional and can be filled with a null pointer.
  840  *
  841  * int (*ndo_init)(struct net_device *dev);
  842  *     This function is called once when a network device is registered.
  843  *     The network device can use this for any late stage initialization
  844  *     or semantic validation. It can fail with an error code which will
  845  *     be propagated back to register_netdev.
  846  *
  847  * void (*ndo_uninit)(struct net_device *dev);
  848  *     This function is called when device is unregistered or when registration
  849  *     fails. It is not called if init fails.
  850  *
  851  * int (*ndo_open)(struct net_device *dev);
  852  *     This function is called when a network device transitions to the up
  853  *     state.
  854  *
  855  * int (*ndo_stop)(struct net_device *dev);
  856  *     This function is called when a network device transitions to the down
  857  *     state.
  858  *
  859  * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
  860  *                               struct net_device *dev);
  861  *	Called when a packet needs to be transmitted.
  862  *	Returns NETDEV_TX_OK.  Can return NETDEV_TX_BUSY, but you should stop
  863  *	the queue before that can happen; it's for obsolete devices and weird
  864  *	corner cases, but the stack really does a non-trivial amount
  865  *	of useless work if you return NETDEV_TX_BUSY.
  866  *	Required; cannot be NULL.
  867  *
  868  * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
  869  *		netdev_features_t features);
  870  *	Adjusts the requested feature flags according to device-specific
  871  *	constraints, and returns the resulting flags. Must not modify
  872  *	the device state.
  873  *
  874  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
  875  *                         void *accel_priv, select_queue_fallback_t fallback);
  876  *	Called to decide which queue to use when device supports multiple
  877  *	transmit queues.
  878  *
  879  * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
  880  *	This function is called to allow device receiver to make
  881  *	changes to configuration when multicast or promiscuous is enabled.
  882  *
  883  * void (*ndo_set_rx_mode)(struct net_device *dev);
  884  *	This function is called device changes address list filtering.
  885  *	If driver handles unicast address filtering, it should set
  886  *	IFF_UNICAST_FLT in its priv_flags.
  887  *
  888  * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
  889  *	This function  is called when the Media Access Control address
  890  *	needs to be changed. If this interface is not defined, the
  891  *	MAC address can not be changed.
  892  *
  893  * int (*ndo_validate_addr)(struct net_device *dev);
  894  *	Test if Media Access Control address is valid for the device.
  895  *
  896  * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
  897  *	Called when a user requests an ioctl which can't be handled by
  898  *	the generic interface code. If not defined ioctls return
  899  *	not supported error code.
  900  *
  901  * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
  902  *	Used to set network devices bus interface parameters. This interface
  903  *	is retained for legacy reasons; new devices should use the bus
  904  *	interface (PCI) for low level management.
  905  *
  906  * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
  907  *	Called when a user wants to change the Maximum Transfer Unit
  908  *	of a device. If not defined, any request to change MTU will
  909  *	will return an error.
  910  *
  911  * void (*ndo_tx_timeout)(struct net_device *dev);
  912  *	Callback used when the transmitter has not made any progress
  913  *	for dev->watchdog ticks.
  914  *
  915  * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
  916  *                      struct rtnl_link_stats64 *storage);
  917  * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  918  *	Called when a user wants to get the network device usage
  919  *	statistics. Drivers must do one of the following:
  920  *	1. Define @ndo_get_stats64 to fill in a zero-initialised
  921  *	   rtnl_link_stats64 structure passed by the caller.
  922  *	2. Define @ndo_get_stats to update a net_device_stats structure
  923  *	   (which should normally be dev->stats) and return a pointer to
  924  *	   it. The structure may be changed asynchronously only if each
  925  *	   field is written atomically.
  926  *	3. Update dev->stats asynchronously and atomically, and define
  927  *	   neither operation.
  928  *
  929  * bool (*ndo_has_offload_stats)(int attr_id)
  930  *	Return true if this device supports offload stats of this attr_id.
  931  *
  932  * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
  933  *	void *attr_data)
  934  *	Get statistics for offload operations by attr_id. Write it into the
  935  *	attr_data pointer.
  936  *
  937  * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
  938  *	If device supports VLAN filtering this function is called when a
  939  *	VLAN id is registered.
  940  *
  941  * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
  942  *	If device supports VLAN filtering this function is called when a
  943  *	VLAN id is unregistered.
  944  *
  945  * void (*ndo_poll_controller)(struct net_device *dev);
  946  *
  947  *	SR-IOV management functions.
  948  * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
  949  * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
  950  *			  u8 qos, __be16 proto);
  951  * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
  952  *			  int max_tx_rate);
  953  * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
  954  * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
  955  * int (*ndo_get_vf_config)(struct net_device *dev,
  956  *			    int vf, struct ifla_vf_info *ivf);
  957  * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
  958  * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
  959  *			  struct nlattr *port[]);
  960  *
  961  *      Enable or disable the VF ability to query its RSS Redirection Table and
  962  *      Hash Key. This is needed since on some devices VF share this information
  963  *      with PF and querying it may introduce a theoretical security risk.
  964  * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
  965  * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
  966  * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
  967  * 	Called to setup 'tc' number of traffic classes in the net device. This
  968  * 	is always called from the stack with the rtnl lock held and netif tx
  969  * 	queues stopped. This allows the netdevice to perform queue management
  970  * 	safely.
  971  *
  972  *	Fiber Channel over Ethernet (FCoE) offload functions.
  973  * int (*ndo_fcoe_enable)(struct net_device *dev);
  974  *	Called when the FCoE protocol stack wants to start using LLD for FCoE
  975  *	so the underlying device can perform whatever needed configuration or
  976  *	initialization to support acceleration of FCoE traffic.
  977  *
  978  * int (*ndo_fcoe_disable)(struct net_device *dev);
  979  *	Called when the FCoE protocol stack wants to stop using LLD for FCoE
  980  *	so the underlying device can perform whatever needed clean-ups to
  981  *	stop supporting acceleration of FCoE traffic.
  982  *
  983  * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
  984  *			     struct scatterlist *sgl, unsigned int sgc);
  985  *	Called when the FCoE Initiator wants to initialize an I/O that
  986  *	is a possible candidate for Direct Data Placement (DDP). The LLD can
  987  *	perform necessary setup and returns 1 to indicate the device is set up
  988  *	successfully to perform DDP on this I/O, otherwise this returns 0.
  989  *
  990  * int (*ndo_fcoe_ddp_done)(struct net_device *dev,  u16 xid);
  991  *	Called when the FCoE Initiator/Target is done with the DDPed I/O as
  992  *	indicated by the FC exchange id 'xid', so the underlying device can
  993  *	clean up and reuse resources for later DDP requests.
  994  *
  995  * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
  996  *			      struct scatterlist *sgl, unsigned int sgc);
  997  *	Called when the FCoE Target wants to initialize an I/O that
  998  *	is a possible candidate for Direct Data Placement (DDP). The LLD can
  999  *	perform necessary setup and returns 1 to indicate the device is set up
 1000  *	successfully to perform DDP on this I/O, otherwise this returns 0.
 1001  *
 1002  * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
 1003  *			       struct netdev_fcoe_hbainfo *hbainfo);
 1004  *	Called when the FCoE Protocol stack wants information on the underlying
 1005  *	device. This information is utilized by the FCoE protocol stack to
 1006  *	register attributes with Fiber Channel management service as per the
 1007  *	FC-GS Fabric Device Management Information(FDMI) specification.
 1008  *
 1009  * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
 1010  *	Called when the underlying device wants to override default World Wide
 1011  *	Name (WWN) generation mechanism in FCoE protocol stack to pass its own
 1012  *	World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
 1013  *	protocol stack to use.
 1014  *
 1015  *	RFS acceleration.
 1016  * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
 1017  *			    u16 rxq_index, u32 flow_id);
 1018  *	Set hardware filter for RFS.  rxq_index is the target queue index;
 1019  *	flow_id is a flow ID to be passed to rps_may_expire_flow() later.
 1020  *	Return the filter ID on success, or a negative error code.
 1021  *
 1022  *	Slave management functions (for bridge, bonding, etc).
 1023  * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
 1024  *	Called to make another netdev an underling.
 1025  *
 1026  * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
 1027  *	Called to release previously enslaved netdev.
 1028  *
 1029  *      Feature/offload setting functions.
 1030  * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
 1031  *	Called to update device configuration to new features. Passed
 1032  *	feature set might be less than what was returned by ndo_fix_features()).
 1033  *	Must return >0 or -errno if it changed dev->features itself.
 1034  *
 1035  * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
 1036  *		      struct net_device *dev,
 1037  *		      const unsigned char *addr, u16 vid, u16 flags)
 1038  *	Adds an FDB entry to dev for addr.
 1039  * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
 1040  *		      struct net_device *dev,
 1041  *		      const unsigned char *addr, u16 vid)
 1042  *	Deletes the FDB entry from dev coresponding to addr.
 1043  * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
 1044  *		       struct net_device *dev, struct net_device *filter_dev,
 1045  *		       int *idx)
 1046  *	Used to add FDB entries to dump requests. Implementers should add
 1047  *	entries to skb and update idx with the number of entries.
 1048  *
 1049  * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
 1050  *			     u16 flags)
 1051  * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
 1052  *			     struct net_device *dev, u32 filter_mask,
 1053  *			     int nlflags)
 1054  * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
 1055  *			     u16 flags);
 1056  *
 1057  * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
 1058  *	Called to change device carrier. Soft-devices (like dummy, team, etc)
 1059  *	which do not represent real hardware may define this to allow their
 1060  *	userspace components to manage their virtual carrier state. Devices
 1061  *	that determine carrier state from physical hardware properties (eg
 1062  *	network cables) or protocol-dependent mechanisms (eg
 1063  *	USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
 1064  *
 1065  * int (*ndo_get_phys_port_id)(struct net_device *dev,
 1066  *			       struct netdev_phys_item_id *ppid);
 1067  *	Called to get ID of physical port of this device. If driver does
 1068  *	not implement this, it is assumed that the hw is not able to have
 1069  *	multiple net devices on single physical port.
 1070  *
 1071  * void (*ndo_udp_tunnel_add)(struct net_device *dev,
 1072  *			      struct udp_tunnel_info *ti);
 1073  *	Called by UDP tunnel to notify a driver about the UDP port and socket
 1074  *	address family that a UDP tunnel is listnening to. It is called only
 1075  *	when a new port starts listening. The operation is protected by the
 1076  *	RTNL.
 1077  *
 1078  * void (*ndo_udp_tunnel_del)(struct net_device *dev,
 1079  *			      struct udp_tunnel_info *ti);
 1080  *	Called by UDP tunnel to notify the driver about a UDP port and socket
 1081  *	address family that the UDP tunnel is not listening to anymore. The
 1082  *	operation is protected by the RTNL.
 1083  *
 1084  * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
 1085  *				 struct net_device *dev)
 1086  *	Called by upper layer devices to accelerate switching or other
 1087  *	station functionality into hardware. 'pdev is the lowerdev
 1088  *	to use for the offload and 'dev' is the net device that will
 1089  *	back the offload. Returns a pointer to the private structure
 1090  *	the upper layer will maintain.
 1091  * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
 1092  *	Called by upper layer device to delete the station created
 1093  *	by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
 1094  *	the station and priv is the structure returned by the add
 1095  *	operation.
 1096  * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
 1097  *				      struct net_device *dev,
 1098  *				      void *priv);
 1099  *	Callback to use for xmit over the accelerated station. This
 1100  *	is used in place of ndo_start_xmit on accelerated net
 1101  *	devices.
 1102  * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
 1103  *					   struct net_device *dev
 1104  *					   netdev_features_t features);
 1105  *	Called by core transmit path to determine if device is capable of
 1106  *	performing offload operations on a given packet. This is to give
 1107  *	the device an opportunity to implement any restrictions that cannot
 1108  *	be otherwise expressed by feature flags. The check is called with
 1109  *	the set of features that the stack has calculated and it returns
 1110  *	those the driver believes to be appropriate.
 1111  * int (*ndo_set_tx_maxrate)(struct net_device *dev,
 1112  *			     int queue_index, u32 maxrate);
 1113  *	Called when a user wants to set a max-rate limitation of specific
 1114  *	TX queue.
 1115  * int (*ndo_get_iflink)(const struct net_device *dev);
 1116  *	Called to get the iflink value of this device.
 1117  * void (*ndo_change_proto_down)(struct net_device *dev,
 1118  *				 bool proto_down);
 1119  *	This function is used to pass protocol port error state information
 1120  *	to the switch driver. The switch driver can react to the proto_down
 1121  *      by doing a phys down on the associated switch port.
 1122  * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
 1123  *	This function is used to get egress tunnel information for given skb.
 1124  *	This is useful for retrieving outer tunnel header parameters while
 1125  *	sampling packet.
 1126  * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
 1127  *	This function is used to specify the headroom that the skb must
 1128  *	consider when allocation skb during packet reception. Setting
 1129  *	appropriate rx headroom value allows avoiding skb head copy on
 1130  *	forward. Setting a negative value resets the rx headroom to the
 1131  *	default value.
 1132  * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp);
 1133  *	This function is used to set or query state related to XDP on the
 1134  *	netdevice. See definition of enum xdp_netdev_command for details.
 1135  *
 1136  */
 1137 struct net_device_ops {
 1138 	int			(*ndo_init)(struct net_device *dev);
 1139 	void			(*ndo_uninit)(struct net_device *dev);
 1140 	int			(*ndo_open)(struct net_device *dev);
 1141 	int			(*ndo_stop)(struct net_device *dev);
 1142 	netdev_tx_t		(*ndo_start_xmit)(struct sk_buff *skb,
 1143 						  struct net_device *dev);
 1144 	netdev_features_t	(*ndo_features_check)(struct sk_buff *skb,
 1145 						      struct net_device *dev,
 1146 						      netdev_features_t features);
 1147 	u16			(*ndo_select_queue)(struct net_device *dev,
 1148 						    struct sk_buff *skb,
 1149 						    void *accel_priv,
 1150 						    select_queue_fallback_t fallback);
 1151 	void			(*ndo_change_rx_flags)(struct net_device *dev,
 1152 						       int flags);
 1153 	void			(*ndo_set_rx_mode)(struct net_device *dev);
 1154 	int			(*ndo_set_mac_address)(struct net_device *dev,
 1155 						       void *addr);
 1156 	int			(*ndo_validate_addr)(struct net_device *dev);
 1157 	int			(*ndo_do_ioctl)(struct net_device *dev,
 1158 					        struct ifreq *ifr, int cmd);
 1159 	int			(*ndo_set_config)(struct net_device *dev,
 1160 					          struct ifmap *map);
 1161 	int			(*ndo_change_mtu)(struct net_device *dev,
 1162 						  int new_mtu);
 1163 	int			(*ndo_neigh_setup)(struct net_device *dev,
 1164 						   struct neigh_parms *);
 1165 	void			(*ndo_tx_timeout) (struct net_device *dev);
 1166 
 1167 	struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
 1168 						     struct rtnl_link_stats64 *storage);
 1169 	bool			(*ndo_has_offload_stats)(int attr_id);
 1170 	int			(*ndo_get_offload_stats)(int attr_id,
 1171 							 const struct net_device *dev,
 1172 							 void *attr_data);
 1173 	struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
 1174 
 1175 	int			(*ndo_vlan_rx_add_vid)(struct net_device *dev,
 1176 						       __be16 proto, u16 vid);
 1177 	int			(*ndo_vlan_rx_kill_vid)(struct net_device *dev,
 1178 						        __be16 proto, u16 vid);
 1179 #ifdef CONFIG_NET_POLL_CONTROLLER
 1180 	void                    (*ndo_poll_controller)(struct net_device *dev);
 1181 	int			(*ndo_netpoll_setup)(struct net_device *dev,
 1182 						     struct netpoll_info *info);
 1183 	void			(*ndo_netpoll_cleanup)(struct net_device *dev);
 1184 #endif
 1185 #ifdef CONFIG_NET_RX_BUSY_POLL
 1186 	int			(*ndo_busy_poll)(struct napi_struct *dev);
 1187 #endif
 1188 	int			(*ndo_set_vf_mac)(struct net_device *dev,
 1189 						  int queue, u8 *mac);
 1190 	int			(*ndo_set_vf_vlan)(struct net_device *dev,
 1191 						   int queue, u16 vlan,
 1192 						   u8 qos, __be16 proto);
 1193 	int			(*ndo_set_vf_rate)(struct net_device *dev,
 1194 						   int vf, int min_tx_rate,
 1195 						   int max_tx_rate);
 1196 	int			(*ndo_set_vf_spoofchk)(struct net_device *dev,
 1197 						       int vf, bool setting);
 1198 	int			(*ndo_set_vf_trust)(struct net_device *dev,
 1199 						    int vf, bool setting);
 1200 	int			(*ndo_get_vf_config)(struct net_device *dev,
 1201 						     int vf,
 1202 						     struct ifla_vf_info *ivf);
 1203 	int			(*ndo_set_vf_link_state)(struct net_device *dev,
 1204 							 int vf, int link_state);
 1205 	int			(*ndo_get_vf_stats)(struct net_device *dev,
 1206 						    int vf,
 1207 						    struct ifla_vf_stats
 1208 						    *vf_stats);
 1209 	int			(*ndo_set_vf_port)(struct net_device *dev,
 1210 						   int vf,
 1211 						   struct nlattr *port[]);
 1212 	int			(*ndo_get_vf_port)(struct net_device *dev,
 1213 						   int vf, struct sk_buff *skb);
 1214 	int			(*ndo_set_vf_guid)(struct net_device *dev,
 1215 						   int vf, u64 guid,
 1216 						   int guid_type);
 1217 	int			(*ndo_set_vf_rss_query_en)(
 1218 						   struct net_device *dev,
 1219 						   int vf, bool setting);
 1220 	int			(*ndo_setup_tc)(struct net_device *dev,
 1221 						u32 handle,
 1222 						__be16 protocol,
 1223 						struct tc_to_netdev *tc);
 1224 #if IS_ENABLED(CONFIG_FCOE)
 1225 	int			(*ndo_fcoe_enable)(struct net_device *dev);
 1226 	int			(*ndo_fcoe_disable)(struct net_device *dev);
 1227 	int			(*ndo_fcoe_ddp_setup)(struct net_device *dev,
 1228 						      u16 xid,
 1229 						      struct scatterlist *sgl,
 1230 						      unsigned int sgc);
 1231 	int			(*ndo_fcoe_ddp_done)(struct net_device *dev,
 1232 						     u16 xid);
 1233 	int			(*ndo_fcoe_ddp_target)(struct net_device *dev,
 1234 						       u16 xid,
 1235 						       struct scatterlist *sgl,
 1236 						       unsigned int sgc);
 1237 	int			(*ndo_fcoe_get_hbainfo)(struct net_device *dev,
 1238 							struct netdev_fcoe_hbainfo *hbainfo);
 1239 #endif
 1240 
 1241 #if IS_ENABLED(CONFIG_LIBFCOE)
 1242 #define NETDEV_FCOE_WWNN 0
 1243 #define NETDEV_FCOE_WWPN 1
 1244 	int			(*ndo_fcoe_get_wwn)(struct net_device *dev,
 1245 						    u64 *wwn, int type);
 1246 #endif
 1247 
 1248 #ifdef CONFIG_RFS_ACCEL
 1249 	int			(*ndo_rx_flow_steer)(struct net_device *dev,
 1250 						     const struct sk_buff *skb,
 1251 						     u16 rxq_index,
 1252 						     u32 flow_id);
 1253 #endif
 1254 	int			(*ndo_add_slave)(struct net_device *dev,
 1255 						 struct net_device *slave_dev);
 1256 	int			(*ndo_del_slave)(struct net_device *dev,
 1257 						 struct net_device *slave_dev);
 1258 	netdev_features_t	(*ndo_fix_features)(struct net_device *dev,
 1259 						    netdev_features_t features);
 1260 	int			(*ndo_set_features)(struct net_device *dev,
 1261 						    netdev_features_t features);
 1262 	int			(*ndo_neigh_construct)(struct net_device *dev,
 1263 						       struct neighbour *n);
 1264 	void			(*ndo_neigh_destroy)(struct net_device *dev,
 1265 						     struct neighbour *n);
 1266 
 1267 	int			(*ndo_fdb_add)(struct ndmsg *ndm,
 1268 					       struct nlattr *tb[],
 1269 					       struct net_device *dev,
 1270 					       const unsigned char *addr,
 1271 					       u16 vid,
 1272 					       u16 flags);
 1273 	int			(*ndo_fdb_del)(struct ndmsg *ndm,
 1274 					       struct nlattr *tb[],
 1275 					       struct net_device *dev,
 1276 					       const unsigned char *addr,
 1277 					       u16 vid);
 1278 	int			(*ndo_fdb_dump)(struct sk_buff *skb,
 1279 						struct netlink_callback *cb,
 1280 						struct net_device *dev,
 1281 						struct net_device *filter_dev,
 1282 						int *idx);
 1283 
 1284 	int			(*ndo_bridge_setlink)(struct net_device *dev,
 1285 						      struct nlmsghdr *nlh,
 1286 						      u16 flags);
 1287 	int			(*ndo_bridge_getlink)(struct sk_buff *skb,
 1288 						      u32 pid, u32 seq,
 1289 						      struct net_device *dev,
 1290 						      u32 filter_mask,
 1291 						      int nlflags);
 1292 	int			(*ndo_bridge_dellink)(struct net_device *dev,
 1293 						      struct nlmsghdr *nlh,
 1294 						      u16 flags);
 1295 	int			(*ndo_change_carrier)(struct net_device *dev,
 1296 						      bool new_carrier);
 1297 	int			(*ndo_get_phys_port_id)(struct net_device *dev,
 1298 							struct netdev_phys_item_id *ppid);
 1299 	int			(*ndo_get_phys_port_name)(struct net_device *dev,
 1300 							  char *name, size_t len);
 1301 	void			(*ndo_udp_tunnel_add)(struct net_device *dev,
 1302 						      struct udp_tunnel_info *ti);
 1303 	void			(*ndo_udp_tunnel_del)(struct net_device *dev,
 1304 						      struct udp_tunnel_info *ti);
 1305 	void*			(*ndo_dfwd_add_station)(struct net_device *pdev,
 1306 							struct net_device *dev);
 1307 	void			(*ndo_dfwd_del_station)(struct net_device *pdev,
 1308 							void *priv);
 1309 
 1310 	netdev_tx_t		(*ndo_dfwd_start_xmit) (struct sk_buff *skb,
 1311 							struct net_device *dev,
 1312 							void *priv);
 1313 	int			(*ndo_get_lock_subclass)(struct net_device *dev);
 1314 	int			(*ndo_set_tx_maxrate)(struct net_device *dev,
 1315 						      int queue_index,
 1316 						      u32 maxrate);
 1317 	int			(*ndo_get_iflink)(const struct net_device *dev);
 1318 	int			(*ndo_change_proto_down)(struct net_device *dev,
 1319 							 bool proto_down);
 1320 	int			(*ndo_fill_metadata_dst)(struct net_device *dev,
 1321 						       struct sk_buff *skb);
 1322 	void			(*ndo_set_rx_headroom)(struct net_device *dev,
 1323 						       int needed_headroom);
 1324 	int			(*ndo_xdp)(struct net_device *dev,
 1325 					   struct netdev_xdp *xdp);
 1326 };
 1327 
 1328 /**
 1329  * enum net_device_priv_flags - &struct net_device priv_flags
 1330  *
 1331  * These are the &struct net_device, they are only set internally
 1332  * by drivers and used in the kernel. These flags are invisible to
 1333  * userspace; this means that the order of these flags can change
 1334  * during any kernel release.
 1335  *
 1336  * You should have a pretty good reason to be extending these flags.
 1337  *
 1338  * @IFF_802_1Q_VLAN: 802.1Q VLAN device
 1339  * @IFF_EBRIDGE: Ethernet bridging device
 1340  * @IFF_BONDING: bonding master or slave
 1341  * @IFF_ISATAP: ISATAP interface (RFC4214)
 1342  * @IFF_WAN_HDLC: WAN HDLC device
 1343  * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
 1344  *	release skb->dst
 1345  * @IFF_DONT_BRIDGE: disallow bridging this ether dev
 1346  * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
 1347  * @IFF_MACVLAN_PORT: device used as macvlan port
 1348  * @IFF_BRIDGE_PORT: device used as bridge port
 1349  * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
 1350  * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
 1351  * @IFF_UNICAST_FLT: Supports unicast filtering
 1352  * @IFF_TEAM_PORT: device used as team port
 1353  * @IFF_SUPP_NOFCS: device supports sending custom FCS
 1354  * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
 1355  *	change when it's running
 1356  * @IFF_MACVLAN: Macvlan device
 1357  * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
 1358  *	underlying stacked devices
 1359  * @IFF_IPVLAN_MASTER: IPvlan master device
 1360  * @IFF_IPVLAN_SLAVE: IPvlan slave device
 1361  * @IFF_L3MDEV_MASTER: device is an L3 master device
 1362  * @IFF_NO_QUEUE: device can run without qdisc attached
 1363  * @IFF_OPENVSWITCH: device is a Open vSwitch master
 1364  * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
 1365  * @IFF_TEAM: device is a team device
 1366  * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
 1367  * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
 1368  *	entity (i.e. the master device for bridged veth)
 1369  * @IFF_MACSEC: device is a MACsec device
 1370  */
 1371 enum netdev_priv_flags {
 1372 	IFF_802_1Q_VLAN			= 1<<0,
 1373 	IFF_EBRIDGE			= 1<<1,
 1374 	IFF_BONDING			= 1<<2,
 1375 	IFF_ISATAP			= 1<<3,
 1376 	IFF_WAN_HDLC			= 1<<4,
 1377 	IFF_XMIT_DST_RELEASE		= 1<<5,
 1378 	IFF_DONT_BRIDGE			= 1<<6,
 1379 	IFF_DISABLE_NETPOLL		= 1<<7,
 1380 	IFF_MACVLAN_PORT		= 1<<8,
 1381 	IFF_BRIDGE_PORT			= 1<<9,
 1382 	IFF_OVS_DATAPATH		= 1<<10,
 1383 	IFF_TX_SKB_SHARING		= 1<<11,
 1384 	IFF_UNICAST_FLT			= 1<<12,
 1385 	IFF_TEAM_PORT			= 1<<13,
 1386 	IFF_SUPP_NOFCS			= 1<<14,
 1387 	IFF_LIVE_ADDR_CHANGE		= 1<<15,
 1388 	IFF_MACVLAN			= 1<<16,
 1389 	IFF_XMIT_DST_RELEASE_PERM	= 1<<17,
 1390 	IFF_IPVLAN_MASTER		= 1<<18,
 1391 	IFF_IPVLAN_SLAVE		= 1<<19,
 1392 	IFF_L3MDEV_MASTER		= 1<<20,
 1393 	IFF_NO_QUEUE			= 1<<21,
 1394 	IFF_OPENVSWITCH			= 1<<22,
 1395 	IFF_L3MDEV_SLAVE		= 1<<23,
 1396 	IFF_TEAM			= 1<<24,
 1397 	IFF_RXFH_CONFIGURED		= 1<<25,
 1398 	IFF_PHONY_HEADROOM		= 1<<26,
 1399 	IFF_MACSEC			= 1<<27,
 1400 };
 1401 
 1402 #define IFF_802_1Q_VLAN			IFF_802_1Q_VLAN
 1403 #define IFF_EBRIDGE			IFF_EBRIDGE
 1404 #define IFF_BONDING			IFF_BONDING
 1405 #define IFF_ISATAP			IFF_ISATAP
 1406 #define IFF_WAN_HDLC			IFF_WAN_HDLC
 1407 #define IFF_XMIT_DST_RELEASE		IFF_XMIT_DST_RELEASE
 1408 #define IFF_DONT_BRIDGE			IFF_DONT_BRIDGE
 1409 #define IFF_DISABLE_NETPOLL		IFF_DISABLE_NETPOLL
 1410 #define IFF_MACVLAN_PORT		IFF_MACVLAN_PORT
 1411 #define IFF_BRIDGE_PORT			IFF_BRIDGE_PORT
 1412 #define IFF_OVS_DATAPATH		IFF_OVS_DATAPATH
 1413 #define IFF_TX_SKB_SHARING		IFF_TX_SKB_SHARING
 1414 #define IFF_UNICAST_FLT			IFF_UNICAST_FLT
 1415 #define IFF_TEAM_PORT			IFF_TEAM_PORT
 1416 #define IFF_SUPP_NOFCS			IFF_SUPP_NOFCS
 1417 #define IFF_LIVE_ADDR_CHANGE		IFF_LIVE_ADDR_CHANGE
 1418 #define IFF_MACVLAN			IFF_MACVLAN
 1419 #define IFF_XMIT_DST_RELEASE_PERM	IFF_XMIT_DST_RELEASE_PERM
 1420 #define IFF_IPVLAN_MASTER		IFF_IPVLAN_MASTER
 1421 #define IFF_IPVLAN_SLAVE		IFF_IPVLAN_SLAVE
 1422 #define IFF_L3MDEV_MASTER		IFF_L3MDEV_MASTER
 1423 #define IFF_NO_QUEUE			IFF_NO_QUEUE
 1424 #define IFF_OPENVSWITCH			IFF_OPENVSWITCH
 1425 #define IFF_L3MDEV_SLAVE		IFF_L3MDEV_SLAVE
 1426 #define IFF_TEAM			IFF_TEAM
 1427 #define IFF_RXFH_CONFIGURED		IFF_RXFH_CONFIGURED
 1428 #define IFF_MACSEC			IFF_MACSEC
 1429 
 1430 /**
 1431  *	struct net_device - The DEVICE structure.
 1432  *		Actually, this whole structure is a big mistake.  It mixes I/O
 1433  *		data with strictly "high-level" data, and it has to know about
 1434  *		almost every data structure used in the INET module.
 1435  *
 1436  *	@name:	This is the first field of the "visible" part of this structure
 1437  *		(i.e. as seen by users in the "Space.c" file).  It is the name
 1438  *	 	of the interface.
 1439  *
 1440  *	@name_hlist: 	Device name hash chain, please keep it close to name[]
 1441  *	@ifalias:	SNMP alias
 1442  *	@mem_end:	Shared memory end
 1443  *	@mem_start:	Shared memory start
 1444  *	@base_addr:	Device I/O address
 1445  *	@irq:		Device IRQ number
 1446  *
 1447  *	@carrier_changes:	Stats to monitor carrier on<->off transitions
 1448  *
 1449  *	@state:		Generic network queuing layer state, see netdev_state_t
 1450  *	@dev_list:	The global list of network devices
 1451  *	@napi_list:	List entry used for polling NAPI devices
 1452  *	@unreg_list:	List entry  when we are unregistering the
 1453  *			device; see the function unregister_netdev
 1454  *	@close_list:	List entry used when we are closing the device
 1455  *	@ptype_all:     Device-specific packet handlers for all protocols
 1456  *	@ptype_specific: Device-specific, protocol-specific packet handlers
 1457  *
 1458  *	@adj_list:	Directly linked devices, like slaves for bonding
 1459  *	@all_adj_list:	All linked devices, *including* neighbours
 1460  *	@features:	Currently active device features
 1461  *	@hw_features:	User-changeable features
 1462  *
 1463  *	@wanted_features:	User-requested features
 1464  *	@vlan_features:		Mask of features inheritable by VLAN devices
 1465  *
 1466  *	@hw_enc_features:	Mask of features inherited by encapsulating devices
 1467  *				This field indicates what encapsulation
 1468  *				offloads the hardware is capable of doing,
 1469  *				and drivers will need to set them appropriately.
 1470  *
 1471  *	@mpls_features:	Mask of features inheritable by MPLS
 1472  *
 1473  *	@ifindex:	interface index
 1474  *	@group:		The group the device belongs to
 1475  *
 1476  *	@stats:		Statistics struct, which was left as a legacy, use
 1477  *			rtnl_link_stats64 instead
 1478  *
 1479  *	@rx_dropped:	Dropped packets by core network,
 1480  *			do not use this in drivers
 1481  *	@tx_dropped:	Dropped packets by core network,
 1482  *			do not use this in drivers
 1483  *	@rx_nohandler:	nohandler dropped packets by core network on
 1484  *			inactive devices, do not use this in drivers
 1485  *
 1486  *	@wireless_handlers:	List of functions to handle Wireless Extensions,
 1487  *				instead of ioctl,
 1488  *				see <net/iw_handler.h> for details.
 1489  *	@wireless_data:	Instance data managed by the core of wireless extensions
 1490  *
 1491  *	@netdev_ops:	Includes several pointers to callbacks,
 1492  *			if one wants to override the ndo_*() functions
 1493  *	@ethtool_ops:	Management operations
 1494  *	@ndisc_ops:	Includes callbacks for different IPv6 neighbour
 1495  *			discovery handling. Necessary for e.g. 6LoWPAN.
 1496  *	@header_ops:	Includes callbacks for creating,parsing,caching,etc
 1497  *			of Layer 2 headers.
 1498  *
 1499  *	@flags:		Interface flags (a la BSD)
 1500  *	@priv_flags:	Like 'flags' but invisible to userspace,
 1501  *			see if.h for the definitions
 1502  *	@gflags:	Global flags ( kept as legacy )
 1503  *	@padded:	How much padding added by alloc_netdev()
 1504  *	@operstate:	RFC2863 operstate
 1505  *	@link_mode:	Mapping policy to operstate
 1506  *	@if_port:	Selectable AUI, TP, ...
 1507  *	@dma:		DMA channel
 1508  *	@mtu:		Interface MTU value
 1509  *	@type:		Interface hardware type
 1510  *	@hard_header_len: Maximum hardware header length.
 1511  *
 1512  *	@needed_headroom: Extra headroom the hardware may need, but not in all
 1513  *			  cases can this be guaranteed
 1514  *	@needed_tailroom: Extra tailroom the hardware may need, but not in all
 1515  *			  cases can this be guaranteed. Some cases also use
 1516  *			  LL_MAX_HEADER instead to allocate the skb
 1517  *
 1518  *	interface address info:
 1519  *
 1520  * 	@perm_addr:		Permanent hw address
 1521  * 	@addr_assign_type:	Hw address assignment type
 1522  * 	@addr_len:		Hardware address length
 1523  *	@neigh_priv_len:	Used in neigh_alloc()
 1524  * 	@dev_id:		Used to differentiate devices that share
 1525  * 				the same link layer address
 1526  * 	@dev_port:		Used to differentiate devices that share
 1527  * 				the same function
 1528  *	@addr_list_lock:	XXX: need comments on this one
 1529  *	@uc_promisc:		Counter that indicates promiscuous mode
 1530  *				has been enabled due to the need to listen to
 1531  *				additional unicast addresses in a device that
 1532  *				does not implement ndo_set_rx_mode()
 1533  *	@uc:			unicast mac addresses
 1534  *	@mc:			multicast mac addresses
 1535  *	@dev_addrs:		list of device hw addresses
 1536  *	@queues_kset:		Group of all Kobjects in the Tx and RX queues
 1537  *	@promiscuity:		Number of times the NIC is told to work in
 1538  *				promiscuous mode; if it becomes 0 the NIC will
 1539  *				exit promiscuous mode
 1540  *	@allmulti:		Counter, enables or disables allmulticast mode
 1541  *
 1542  *	@vlan_info:	VLAN info
 1543  *	@dsa_ptr:	dsa specific data
 1544  *	@tipc_ptr:	TIPC specific data
 1545  *	@atalk_ptr:	AppleTalk link
 1546  *	@ip_ptr:	IPv4 specific data
 1547  *	@dn_ptr:	DECnet specific data
 1548  *	@ip6_ptr:	IPv6 specific data
 1549  *	@ax25_ptr:	AX.25 specific data
 1550  *	@ieee80211_ptr:	IEEE 802.11 specific data, assign before registering
 1551  *
 1552  *	@last_rx:	Time of last Rx
 1553  *	@dev_addr:	Hw address (before bcast,
 1554  *			because most packets are unicast)
 1555  *
 1556  *	@_rx:			Array of RX queues
 1557  *	@num_rx_queues:		Number of RX queues
 1558  *				allocated at register_netdev() time
 1559  *	@real_num_rx_queues: 	Number of RX queues currently active in device
 1560  *
 1561  *	@rx_handler:		handler for received packets
 1562  *	@rx_handler_data: 	XXX: need comments on this one
 1563  *	@ingress_queue:		XXX: need comments on this one
 1564  *	@broadcast:		hw bcast address
 1565  *
 1566  *	@rx_cpu_rmap:	CPU reverse-mapping for RX completion interrupts,
 1567  *			indexed by RX queue number. Assigned by driver.
 1568  *			This must only be set if the ndo_rx_flow_steer
 1569  *			operation is defined
 1570  *	@index_hlist:		Device index hash chain
 1571  *
 1572  *	@_tx:			Array of TX queues
 1573  *	@num_tx_queues:		Number of TX queues allocated at alloc_netdev_mq() time
 1574  *	@real_num_tx_queues: 	Number of TX queues currently active in device
 1575  *	@qdisc:			Root qdisc from userspace point of view
 1576  *	@tx_queue_len:		Max frames per queue allowed
 1577  *	@tx_global_lock: 	XXX: need comments on this one
 1578  *
 1579  *	@xps_maps:	XXX: need comments on this one
 1580  *
 1581  *	@watchdog_timeo:	Represents the timeout that is used by
 1582  *				the watchdog (see dev_watchdog())
 1583  *	@watchdog_timer:	List of timers
 1584  *
 1585  *	@pcpu_refcnt:		Number of references to this device
 1586  *	@todo_list:		Delayed register/unregister
 1587  *	@link_watch_list:	XXX: need comments on this one
 1588  *
 1589  *	@reg_state:		Register/unregister state machine
 1590  *	@dismantle:		Device is going to be freed
 1591  *	@rtnl_link_state:	This enum represents the phases of creating
 1592  *				a new link
 1593  *
 1594  *	@destructor:		Called from unregister,
 1595  *				can be used to call free_netdev
 1596  *	@npinfo:		XXX: need comments on this one
 1597  * 	@nd_net:		Network namespace this network device is inside
 1598  *
 1599  * 	@ml_priv:	Mid-layer private
 1600  * 	@lstats:	Loopback statistics
 1601  * 	@tstats:	Tunnel statistics
 1602  * 	@dstats:	Dummy statistics
 1603  * 	@vstats:	Virtual ethernet statistics
 1604  *
 1605  *	@garp_port:	GARP
 1606  *	@mrp_port:	MRP
 1607  *
 1608  *	@dev:		Class/net/name entry
 1609  *	@sysfs_groups:	Space for optional device, statistics and wireless
 1610  *			sysfs groups
 1611  *
 1612  *	@sysfs_rx_queue_group:	Space for optional per-rx queue attributes
 1613  *	@rtnl_link_ops:	Rtnl_link_ops
 1614  *
 1615  *	@gso_max_size:	Maximum size of generic segmentation offload
 1616  *	@gso_max_segs:	Maximum number of segments that can be passed to the
 1617  *			NIC for GSO
 1618  *
 1619  *	@dcbnl_ops:	Data Center Bridging netlink ops
 1620  *	@num_tc:	Number of traffic classes in the net device
 1621  *	@tc_to_txq:	XXX: need comments on this one
 1622  *	@prio_tc_map	XXX: need comments on this one
 1623  *
 1624  *	@fcoe_ddp_xid:	Max exchange id for FCoE LRO by ddp
 1625  *
 1626  *	@priomap:	XXX: need comments on this one
 1627  *	@phydev:	Physical device may attach itself
 1628  *			for hardware timestamping
 1629  *
 1630  *	@qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
 1631  *	@qdisc_running_key: lockdep class annotating Qdisc->running seqcount
 1632  *
 1633  *	@proto_down:	protocol port state information can be sent to the
 1634  *			switch driver and used to set the phys state of the
 1635  *			switch port.
 1636  *
 1637  *	FIXME: cleanup struct net_device such that network protocol info
 1638  *	moves out.
 1639  */
 1640 
 1641 struct net_device {
 1642 	char			name[IFNAMSIZ];
 1643 	struct hlist_node	name_hlist;
 1644 	char 			*ifalias;
 1645 	/*
 1646 	 *	I/O specific fields
 1647 	 *	FIXME: Merge these and struct ifmap into one
 1648 	 */
 1649 	unsigned long		mem_end;
 1650 	unsigned long		mem_start;
 1651 	unsigned long		base_addr;
 1652 	int			irq;
 1653 
 1654 	atomic_t		carrier_changes;
 1655 
 1656 	/*
 1657 	 *	Some hardware also needs these fields (state,dev_list,
 1658 	 *	napi_list,unreg_list,close_list) but they are not
 1659 	 *	part of the usual set specified in Space.c.
 1660 	 */
 1661 
 1662 	unsigned long		state;
 1663 
 1664 	struct list_head	dev_list;
 1665 	struct list_head	napi_list;
 1666 	struct list_head	unreg_list;
 1667 	struct list_head	close_list;
 1668 	struct list_head	ptype_all;
 1669 	struct list_head	ptype_specific;
 1670 
 1671 	struct {
 1672 		struct list_head upper;
 1673 		struct list_head lower;
 1674 	} adj_list;
 1675 
 1676 	struct {
 1677 		struct list_head upper;
 1678 		struct list_head lower;
 1679 	} all_adj_list;
 1680 
 1681 	netdev_features_t	features;
 1682 	netdev_features_t	hw_features;
 1683 	netdev_features_t	wanted_features;
 1684 	netdev_features_t	vlan_features;
 1685 	netdev_features_t	hw_enc_features;
 1686 	netdev_features_t	mpls_features;
 1687 	netdev_features_t	gso_partial_features;
 1688 
 1689 	int			ifindex;
 1690 	int			group;
 1691 
 1692 	struct net_device_stats	stats;
 1693 
 1694 	atomic_long_t		rx_dropped;
 1695 	atomic_long_t		tx_dropped;
 1696 	atomic_long_t		rx_nohandler;
 1697 
 1698 #ifdef CONFIG_WIRELESS_EXT
 1699 	const struct iw_handler_def *wireless_handlers;
 1700 	struct iw_public_data	*wireless_data;
 1701 #endif
 1702 	const struct net_device_ops *netdev_ops;
 1703 	const struct ethtool_ops *ethtool_ops;
 1704 #ifdef CONFIG_NET_SWITCHDEV
 1705 	const struct switchdev_ops *switchdev_ops;
 1706 #endif
 1707 #ifdef CONFIG_NET_L3_MASTER_DEV
 1708 	const struct l3mdev_ops	*l3mdev_ops;
 1709 #endif
 1710 #if IS_ENABLED(CONFIG_IPV6)
 1711 	const struct ndisc_ops *ndisc_ops;
 1712 #endif
 1713 
 1714 	const struct header_ops *header_ops;
 1715 
 1716 	unsigned int		flags;
 1717 	unsigned int		priv_flags;
 1718 
 1719 	unsigned short		gflags;
 1720 	unsigned short		padded;
 1721 
 1722 	unsigned char		operstate;
 1723 	unsigned char		link_mode;
 1724 
 1725 	unsigned char		if_port;
 1726 	unsigned char		dma;
 1727 
 1728 	unsigned int		mtu;
 1729 	unsigned short		type;
 1730 	unsigned short		hard_header_len;
 1731 
 1732 	unsigned short		needed_headroom;
 1733 	unsigned short		needed_tailroom;
 1734 
 1735 	/* Interface address info. */
 1736 	unsigned char		perm_addr[MAX_ADDR_LEN];
 1737 	unsigned char		addr_assign_type;
 1738 	unsigned char		addr_len;
 1739 	unsigned short		neigh_priv_len;
 1740 	unsigned short          dev_id;
 1741 	unsigned short          dev_port;
 1742 	spinlock_t		addr_list_lock;
 1743 	unsigned char		name_assign_type;
 1744 	bool			uc_promisc;
 1745 	struct netdev_hw_addr_list	uc;
 1746 	struct netdev_hw_addr_list	mc;
 1747 	struct netdev_hw_addr_list	dev_addrs;
 1748 
 1749 #ifdef CONFIG_SYSFS
 1750 	struct kset		*queues_kset;
 1751 #endif
 1752 	unsigned int		promiscuity;
 1753 	unsigned int		allmulti;
 1754 
 1755 
 1756 	/* Protocol-specific pointers */
 1757 
 1758 #if IS_ENABLED(CONFIG_VLAN_8021Q)
 1759 	struct vlan_info __rcu	*vlan_info;
 1760 #endif
 1761 #if IS_ENABLED(CONFIG_NET_DSA)
 1762 	struct dsa_switch_tree	*dsa_ptr;
 1763 #endif
 1764 #if IS_ENABLED(CONFIG_TIPC)
 1765 	struct tipc_bearer __rcu *tipc_ptr;
 1766 #endif
 1767 	void 			*atalk_ptr;
 1768 	struct in_device __rcu	*ip_ptr;
 1769 	struct dn_dev __rcu     *dn_ptr;
 1770 	struct inet6_dev __rcu	*ip6_ptr;
 1771 	void			*ax25_ptr;
 1772 	struct wireless_dev	*ieee80211_ptr;
 1773 	struct wpan_dev		*ieee802154_ptr;
 1774 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
 1775 	struct mpls_dev __rcu	*mpls_ptr;
 1776 #endif
 1777 
 1778 /*
 1779  * Cache lines mostly used on receive path (including eth_type_trans())
 1780  */
 1781 	unsigned long		last_rx;
 1782 
 1783 	/* Interface address info used in eth_type_trans() */
 1784 	unsigned char		*dev_addr;
 1785 
 1786 #ifdef CONFIG_SYSFS
 1787 	struct netdev_rx_queue	*_rx;
 1788 
 1789 	unsigned int		num_rx_queues;
 1790 	unsigned int		real_num_rx_queues;
 1791 #endif
 1792 
 1793 	unsigned long		gro_flush_timeout;
 1794 	rx_handler_func_t __rcu	*rx_handler;
 1795 	void __rcu		*rx_handler_data;
 1796 
 1797 #ifdef CONFIG_NET_CLS_ACT
 1798 	struct tcf_proto __rcu  *ingress_cl_list;
 1799 #endif
 1800 	struct netdev_queue __rcu *ingress_queue;
 1801 #ifdef CONFIG_NETFILTER_INGRESS
 1802 	struct nf_hook_entry __rcu *nf_hooks_ingress;
 1803 #endif
 1804 
 1805 	unsigned char		broadcast[MAX_ADDR_LEN];
 1806 #ifdef CONFIG_RFS_ACCEL
 1807 	struct cpu_rmap		*rx_cpu_rmap;
 1808 #endif
 1809 	struct hlist_node	index_hlist;
 1810 
 1811 /*
 1812  * Cache lines mostly used on transmit path
 1813  */
 1814 	struct netdev_queue	*_tx ____cacheline_aligned_in_smp;
 1815 	unsigned int		num_tx_queues;
 1816 	unsigned int		real_num_tx_queues;
 1817 	struct Qdisc		*qdisc;
 1818 #ifdef CONFIG_NET_SCHED
 1819 	DECLARE_HASHTABLE	(qdisc_hash, 4);
 1820 #endif
 1821 	unsigned long		tx_queue_len;
 1822 	spinlock_t		tx_global_lock;
 1823 	int			watchdog_timeo;
 1824 
 1825 #ifdef CONFIG_XPS
 1826 	struct xps_dev_maps __rcu *xps_maps;
 1827 #endif
 1828 #ifdef CONFIG_NET_CLS_ACT
 1829 	struct tcf_proto __rcu  *egress_cl_list;
 1830 #endif
 1831 
 1832 	/* These may be needed for future network-power-down code. */
 1833 	struct timer_list	watchdog_timer;
 1834 
 1835 	int __percpu		*pcpu_refcnt;
 1836 	struct list_head	todo_list;
 1837 
 1838 	struct list_head	link_watch_list;
 1839 
 1840 	enum { NETREG_UNINITIALIZED=0,
 1841 	       NETREG_REGISTERED,	/* completed register_netdevice */
 1842 	       NETREG_UNREGISTERING,	/* called unregister_netdevice */
 1843 	       NETREG_UNREGISTERED,	/* completed unregister todo */
 1844 	       NETREG_RELEASED,		/* called free_netdev */
 1845 	       NETREG_DUMMY,		/* dummy device for NAPI poll */
 1846 	} reg_state:8;
 1847 
 1848 	bool dismantle;
 1849 
 1850 	enum {
 1851 		RTNL_LINK_INITIALIZED,
 1852 		RTNL_LINK_INITIALIZING,
 1853 	} rtnl_link_state:16;
 1854 
 1855 	void (*destructor)(struct net_device *dev);
 1856 
 1857 #ifdef CONFIG_NETPOLL
 1858 	struct netpoll_info __rcu	*npinfo;
 1859 #endif
 1860 
 1861 	possible_net_t			nd_net;
 1862 
 1863 	/* mid-layer private */
 1864 	union {
 1865 		void					*ml_priv;
 1866 		struct pcpu_lstats __percpu		*lstats;
 1867 		struct pcpu_sw_netstats __percpu	*tstats;
 1868 		struct pcpu_dstats __percpu		*dstats;
 1869 		struct pcpu_vstats __percpu		*vstats;
 1870 	};
 1871 
 1872 	struct garp_port __rcu	*garp_port;
 1873 	struct mrp_port __rcu	*mrp_port;
 1874 
 1875 	struct device		dev;
 1876 	const struct attribute_group *sysfs_groups[4];
 1877 	const struct attribute_group *sysfs_rx_queue_group;
 1878 
 1879 	const struct rtnl_link_ops *rtnl_link_ops;
 1880 
 1881 	/* for setting kernel sock attribute on TCP connection setup */
 1882 #define GSO_MAX_SIZE		65536
 1883 	unsigned int		gso_max_size;
 1884 #define GSO_MAX_SEGS		65535
 1885 	u16			gso_max_segs;
 1886 
 1887 #ifdef CONFIG_DCB
 1888 	const struct dcbnl_rtnl_ops *dcbnl_ops;
 1889 #endif
 1890 	u8			num_tc;
 1891 	struct netdev_tc_txq	tc_to_txq[TC_MAX_QUEUE];
 1892 	u8			prio_tc_map[TC_BITMASK + 1];
 1893 
 1894 #if IS_ENABLED(CONFIG_FCOE)
 1895 	unsigned int		fcoe_ddp_xid;
 1896 #endif
 1897 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
 1898 	struct netprio_map __rcu *priomap;
 1899 #endif
 1900 	struct phy_device	*phydev;
 1901 	struct lock_class_key	*qdisc_tx_busylock;
 1902 	struct lock_class_key	*qdisc_running_key;
 1903 	bool			proto_down;
 1904 };
 1905 #define to_net_dev(d) container_of(d, struct net_device, dev)
 1906 
 1907 #define	NETDEV_ALIGN		32
 1908 
 1909 static inline
 1910 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
 1911 {
 1912 	return dev->prio_tc_map[prio & TC_BITMASK];
 1913 }
 1914 
 1915 static inline
 1916 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
 1917 {
 1918 	if (tc >= dev->num_tc)
 1919 		return -EINVAL;
 1920 
 1921 	dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
 1922 	return 0;
 1923 }
 1924 
 1925 static inline
 1926 void netdev_reset_tc(struct net_device *dev)
 1927 {
 1928 	dev->num_tc = 0;
 1929 	memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
 1930 	memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
 1931 }
 1932 
 1933 static inline
 1934 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
 1935 {
 1936 	if (tc >= dev->num_tc)
 1937 		return -EINVAL;
 1938 
 1939 	dev->tc_to_txq[tc].count = count;
 1940 	dev->tc_to_txq[tc].offset = offset;
 1941 	return 0;
 1942 }
 1943 
 1944 static inline
 1945 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
 1946 {
 1947 	if (num_tc > TC_MAX_QUEUE)
 1948 		return -EINVAL;
 1949 
 1950 	dev->num_tc = num_tc;
 1951 	return 0;
 1952 }
 1953 
 1954 static inline
 1955 int netdev_get_num_tc(struct net_device *dev)
 1956 {
 1957 	return dev->num_tc;
 1958 }
 1959 
 1960 static inline
 1961 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
 1962 					 unsigned int index)
 1963 {
 1964 	return &dev->_tx[index];
 1965 }
 1966 
 1967 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
 1968 						    const struct sk_buff *skb)
 1969 {
 1970 	return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
 1971 }
 1972 
 1973 static inline void netdev_for_each_tx_queue(struct net_device *dev,
 1974 					    void (*f)(struct net_device *,
 1975 						      struct netdev_queue *,
 1976 						      void *),
 1977 					    void *arg)
 1978 {
 1979 	unsigned int i;
 1980 
 1981 	for (i = 0; i < dev->num_tx_queues; i++)
 1982 		f(dev, &dev->_tx[i], arg);
 1983 }
 1984 
 1985 #define netdev_lockdep_set_classes(dev)				\
 1986 {								\
 1987 	static struct lock_class_key qdisc_tx_busylock_key;	\
 1988 	static struct lock_class_key qdisc_running_key;		\
 1989 	static struct lock_class_key qdisc_xmit_lock_key;	\
 1990 	static struct lock_class_key dev_addr_list_lock_key;	\
 1991 	unsigned int i;						\
 1992 								\
 1993 	(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key;	\
 1994 	(dev)->qdisc_running_key = &qdisc_running_key;		\
 1995 	lockdep_set_class(&(dev)->addr_list_lock,		\
 1996 			  &dev_addr_list_lock_key); 		\
 1997 	for (i = 0; i < (dev)->num_tx_queues; i++)		\
 1998 		lockdep_set_class(&(dev)->_tx[i]._xmit_lock,	\
 1999 				  &qdisc_xmit_lock_key);	\
 2000 }
 2001 
 2002 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
 2003 				    struct sk_buff *skb,
 2004 				    void *accel_priv);
 2005 
 2006 /* returns the headroom that the master device needs to take in account
 2007  * when forwarding to this dev
 2008  */
 2009 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
 2010 {
 2011 	return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
 2012 }
 2013 
 2014 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
 2015 {
 2016 	if (dev->netdev_ops->ndo_set_rx_headroom)
 2017 		dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
 2018 }
 2019 
 2020 /* set the device rx headroom to the dev's default */
 2021 static inline void netdev_reset_rx_headroom(struct net_device *dev)
 2022 {
 2023 	netdev_set_rx_headroom(dev, -1);
 2024 }
 2025 
 2026 /*
 2027  * Net namespace inlines
 2028  */
 2029 static inline
 2030 struct net *dev_net(const struct net_device *dev)
 2031 {
 2032 	return read_pnet(&dev->nd_net);
 2033 }
 2034 
 2035 static inline
 2036 void dev_net_set(struct net_device *dev, struct net *net)
 2037 {
 2038 	write_pnet(&dev->nd_net, net);
 2039 }
 2040 
 2041 static inline bool netdev_uses_dsa(struct net_device *dev)
 2042 {
 2043 #if IS_ENABLED(CONFIG_NET_DSA)
 2044 	if (dev->dsa_ptr != NULL)
 2045 		return dsa_uses_tagged_protocol(dev->dsa_ptr);
 2046 #endif
 2047 	return false;
 2048 }
 2049 
 2050 /**
 2051  *	netdev_priv - access network device private data
 2052  *	@dev: network device
 2053  *
 2054  * Get network device private data
 2055  */
 2056 static inline void *netdev_priv(const struct net_device *dev)
 2057 {
 2058 	return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
 2059 }
 2060 
 2061 /* Set the sysfs physical device reference for the network logical device
 2062  * if set prior to registration will cause a symlink during initialization.
 2063  */
 2064 #define SET_NETDEV_DEV(net, pdev)	((net)->dev.parent = (pdev))
 2065 
 2066 /* Set the sysfs device type for the network logical device to allow
 2067  * fine-grained identification of different network device types. For
 2068  * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
 2069  */
 2070 #define SET_NETDEV_DEVTYPE(net, devtype)	((net)->dev.type = (devtype))
 2071 
 2072 /* Default NAPI poll() weight
 2073  * Device drivers are strongly advised to not use bigger value
 2074  */
 2075 #define NAPI_POLL_WEIGHT 64
 2076 
 2077 /**
 2078  *	netif_napi_add - initialize a NAPI context
 2079  *	@dev:  network device
 2080  *	@napi: NAPI context
 2081  *	@poll: polling function
 2082  *	@weight: default weight
 2083  *
 2084  * netif_napi_add() must be used to initialize a NAPI context prior to calling
 2085  * *any* of the other NAPI-related functions.
 2086  */
 2087 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
 2088 		    int (*poll)(struct napi_struct *, int), int weight);
 2089 
 2090 /**
 2091  *	netif_tx_napi_add - initialize a NAPI context
 2092  *	@dev:  network device
 2093  *	@napi: NAPI context
 2094  *	@poll: polling function
 2095  *	@weight: default weight
 2096  *
 2097  * This variant of netif_napi_add() should be used from drivers using NAPI
 2098  * to exclusively poll a TX queue.
 2099  * This will avoid we add it into napi_hash[], thus polluting this hash table.
 2100  */
 2101 static inline void netif_tx_napi_add(struct net_device *dev,
 2102 				     struct napi_struct *napi,
 2103 				     int (*poll)(struct napi_struct *, int),
 2104 				     int weight)
 2105 {
 2106 	set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
 2107 	netif_napi_add(dev, napi, poll, weight);
 2108 }
 2109 
 2110 /**
 2111  *  netif_napi_del - remove a NAPI context
 2112  *  @napi: NAPI context
 2113  *
 2114  *  netif_napi_del() removes a NAPI context from the network device NAPI list
 2115  */
 2116 void netif_napi_del(struct napi_struct *napi);
 2117 
 2118 struct napi_gro_cb {
 2119 	/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
 2120 	void	*frag0;
 2121 
 2122 	/* Length of frag0. */
 2123 	unsigned int frag0_len;
 2124 
 2125 	/* This indicates where we are processing relative to skb->data. */
 2126 	int	data_offset;
 2127 
 2128 	/* This is non-zero if the packet cannot be merged with the new skb. */
 2129 	u16	flush;
 2130 
 2131 	/* Save the IP ID here and check when we get to the transport layer */
 2132 	u16	flush_id;
 2133 
 2134 	/* Number of segments aggregated. */
 2135 	u16	count;
 2136 
 2137 	/* Start offset for remote checksum offload */
 2138 	u16	gro_remcsum_start;
 2139 
 2140 	/* jiffies when first packet was created/queued */
 2141 	unsigned long age;
 2142 
 2143 	/* Used in ipv6_gro_receive() and foo-over-udp */
 2144 	u16	proto;
 2145 
 2146 	/* This is non-zero if the packet may be of the same flow. */
 2147 	u8	same_flow:1;
 2148 
 2149 	/* Used in tunnel GRO receive */
 2150 	u8	encap_mark:1;
 2151 
 2152 	/* GRO checksum is valid */
 2153 	u8	csum_valid:1;
 2154 
 2155 	/* Number of checksums via CHECKSUM_UNNECESSARY */
 2156 	u8	csum_cnt:3;
 2157 
 2158 	/* Free the skb? */
 2159 	u8	free:2;
 2160 #define NAPI_GRO_FREE		  1
 2161 #define NAPI_GRO_FREE_STOLEN_HEAD 2
 2162 
 2163 	/* Used in foo-over-udp, set in udp[46]_gro_receive */
 2164 	u8	is_ipv6:1;
 2165 
 2166 	/* Used in GRE, set in fou/gue_gro_receive */
 2167 	u8	is_fou:1;
 2168 
 2169 	/* Used to determine if flush_id can be ignored */
 2170 	u8	is_atomic:1;
 2171 
 2172 	/* 5 bit hole */
 2173 
 2174 	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
 2175 	__wsum	csum;
 2176 
 2177 	/* used in skb_gro_receive() slow path */
 2178 	struct sk_buff *last;
 2179 };
 2180 
 2181 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
 2182 
 2183 struct packet_type {
 2184 	__be16			type;	/* This is really htons(ether_type). */
 2185 	struct net_device	*dev;	/* NULL is wildcarded here	     */
 2186 	int			(*func) (struct sk_buff *,
 2187 					 struct net_device *,
 2188 					 struct packet_type *,
 2189 					 struct net_device *);
 2190 	bool			(*id_match)(struct packet_type *ptype,
 2191 					    struct sock *sk);
 2192 	void			*af_packet_priv;
 2193 	struct list_head	list;
 2194 };
 2195 
 2196 struct offload_callbacks {
 2197 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
 2198 						netdev_features_t features);
 2199 	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
 2200 						 struct sk_buff *skb);
 2201 	int			(*gro_complete)(struct sk_buff *skb, int nhoff);
 2202 };
 2203 
 2204 struct packet_offload {
 2205 	__be16			 type;	/* This is really htons(ether_type). */
 2206 	u16			 priority;
 2207 	struct offload_callbacks callbacks;
 2208 	struct list_head	 list;
 2209 };
 2210 
 2211 /* often modified stats are per-CPU, other are shared (netdev->stats) */
 2212 struct pcpu_sw_netstats {
 2213 	u64     rx_packets;
 2214 	u64     rx_bytes;
 2215 	u64     tx_packets;
 2216 	u64     tx_bytes;
 2217 	struct u64_stats_sync   syncp;
 2218 };
 2219 
 2220 #define __netdev_alloc_pcpu_stats(type, gfp)				\
 2221 ({									\
 2222 	typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
 2223 	if (pcpu_stats)	{						\
 2224 		int __cpu;						\
 2225 		for_each_possible_cpu(__cpu) {				\
 2226 			typeof(type) *stat;				\
 2227 			stat = per_cpu_ptr(pcpu_stats, __cpu);		\
 2228 			u64_stats_init(&stat->syncp);			\
 2229 		}							\
 2230 	}								\
 2231 	pcpu_stats;							\
 2232 })
 2233 
 2234 #define netdev_alloc_pcpu_stats(type)					\
 2235 	__netdev_alloc_pcpu_stats(type, GFP_KERNEL)
 2236 
 2237 enum netdev_lag_tx_type {
 2238 	NETDEV_LAG_TX_TYPE_UNKNOWN,
 2239 	NETDEV_LAG_TX_TYPE_RANDOM,
 2240 	NETDEV_LAG_TX_TYPE_BROADCAST,
 2241 	NETDEV_LAG_TX_TYPE_ROUNDROBIN,
 2242 	NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
 2243 	NETDEV_LAG_TX_TYPE_HASH,
 2244 };
 2245 
 2246 struct netdev_lag_upper_info {
 2247 	enum netdev_lag_tx_type tx_type;
 2248 };
 2249 
 2250 struct netdev_lag_lower_state_info {
 2251 	u8 link_up : 1,
 2252 	   tx_enabled : 1;
 2253 };
 2254 
 2255 #include <linux/notifier.h>
 2256 
 2257 /* netdevice notifier chain. Please remember to update the rtnetlink
 2258  * notification exclusion list in rtnetlink_event() when adding new
 2259  * types.
 2260  */
 2261 #define NETDEV_UP	0x0001	/* For now you can't veto a device up/down */
 2262 #define NETDEV_DOWN	0x0002
 2263 #define NETDEV_REBOOT	0x0003	/* Tell a protocol stack a network interface
 2264 				   detected a hardware crash and restarted
 2265 				   - we can use this eg to kick tcp sessions
 2266 				   once done */
 2267 #define NETDEV_CHANGE	0x0004	/* Notify device state change */
 2268 #define NETDEV_REGISTER 0x0005
 2269 #define NETDEV_UNREGISTER	0x0006
 2270 #define NETDEV_CHANGEMTU	0x0007 /* notify after mtu change happened */
 2271 #define NETDEV_CHANGEADDR	0x0008
 2272 #define NETDEV_GOING_DOWN	0x0009
 2273 #define NETDEV_CHANGENAME	0x000A
 2274 #define NETDEV_FEAT_CHANGE	0x000B
 2275 #define NETDEV_BONDING_FAILOVER 0x000C
 2276 #define NETDEV_PRE_UP		0x000D
 2277 #define NETDEV_PRE_TYPE_CHANGE	0x000E
 2278 #define NETDEV_POST_TYPE_CHANGE	0x000F
 2279 #define NETDEV_POST_INIT	0x0010
 2280 #define NETDEV_UNREGISTER_FINAL 0x0011
 2281 #define NETDEV_RELEASE		0x0012
 2282 #define NETDEV_NOTIFY_PEERS	0x0013
 2283 #define NETDEV_JOIN		0x0014
 2284 #define NETDEV_CHANGEUPPER	0x0015
 2285 #define NETDEV_RESEND_IGMP	0x0016
 2286 #define NETDEV_PRECHANGEMTU	0x0017 /* notify before mtu change happened */
 2287 #define NETDEV_CHANGEINFODATA	0x0018
 2288 #define NETDEV_BONDING_INFO	0x0019
 2289 #define NETDEV_PRECHANGEUPPER	0x001A
 2290 #define NETDEV_CHANGELOWERSTATE	0x001B
 2291 #define NETDEV_UDP_TUNNEL_PUSH_INFO	0x001C
 2292 #define NETDEV_CHANGE_TX_QUEUE_LEN	0x001E
 2293 
 2294 int register_netdevice_notifier(struct notifier_block *nb);
 2295 int unregister_netdevice_notifier(struct notifier_block *nb);
 2296 
 2297 struct netdev_notifier_info {
 2298 	struct net_device *dev;
 2299 };
 2300 
 2301 struct netdev_notifier_change_info {
 2302 	struct netdev_notifier_info info; /* must be first */
 2303 	unsigned int flags_changed;
 2304 };
 2305 
 2306 struct netdev_notifier_changeupper_info {
 2307 	struct netdev_notifier_info info; /* must be first */
 2308 	struct net_device *upper_dev; /* new upper dev */
 2309 	bool master; /* is upper dev master */
 2310 	bool linking; /* is the notification for link or unlink */
 2311 	void *upper_info; /* upper dev info */
 2312 };
 2313 
 2314 struct netdev_notifier_changelowerstate_info {
 2315 	struct netdev_notifier_info info; /* must be first */
 2316 	void *lower_state_info; /* is lower dev state */
 2317 };
 2318 
 2319 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
 2320 					     struct net_device *dev)
 2321 {
 2322 	info->dev = dev;
 2323 }
 2324 
 2325 static inline struct net_device *
 2326 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
 2327 {
 2328 	return info->dev;
 2329 }
 2330 
 2331 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
 2332 
 2333 
 2334 extern rwlock_t				dev_base_lock;		/* Device list lock */
 2335 
 2336 #define for_each_netdev(net, d)		\
 2337 		list_for_each_entry(d, &(net)->dev_base_head, dev_list)
 2338 #define for_each_netdev_reverse(net, d)	\
 2339 		list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
 2340 #define for_each_netdev_rcu(net, d)		\
 2341 		list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
 2342 #define for_each_netdev_safe(net, d, n)	\
 2343 		list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
 2344 #define for_each_netdev_continue(net, d)		\
 2345 		list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
 2346 #define for_each_netdev_continue_rcu(net, d)		\
 2347 	list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
 2348 #define for_each_netdev_in_bond_rcu(bond, slave)	\
 2349 		for_each_netdev_rcu(&init_net, slave)	\
 2350 			if (netdev_master_upper_dev_get_rcu(slave) == (bond))
 2351 #define net_device_entry(lh)	list_entry(lh, struct net_device, dev_list)
 2352 
 2353 static inline struct net_device *next_net_device(struct net_device *dev)
 2354 {
 2355 	struct list_head *lh;
 2356 	struct net *net;
 2357 
 2358 	net = dev_net(dev);
 2359 	lh = dev->dev_list.next;
 2360 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 2361 }
 2362 
 2363 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
 2364 {
 2365 	struct list_head *lh;
 2366 	struct net *net;
 2367 
 2368 	net = dev_net(dev);
 2369 	lh = rcu_dereference(list_next_rcu(&dev->dev_list));
 2370 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 2371 }
 2372 
 2373 static inline struct net_device *first_net_device(struct net *net)
 2374 {
 2375 	return list_empty(&net->dev_base_head) ? NULL :
 2376 		net_device_entry(net->dev_base_head.next);
 2377 }
 2378 
 2379 static inline struct net_device *first_net_device_rcu(struct net *net)
 2380 {
 2381 	struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
 2382 
 2383 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 2384 }
 2385 
 2386 int netdev_boot_setup_check(struct net_device *dev);
 2387 unsigned long netdev_boot_base(const char *prefix, int unit);
 2388 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
 2389 				       const char *hwaddr);
 2390 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
 2391 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
 2392 void dev_add_pack(struct packet_type *pt);
 2393 void dev_remove_pack(struct packet_type *pt);
 2394 void __dev_remove_pack(struct packet_type *pt);
 2395 void dev_add_offload(struct packet_offload *po);
 2396 void dev_remove_offload(struct packet_offload *po);
 2397 
 2398 int dev_get_iflink(const struct net_device *dev);
 2399 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
 2400 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
 2401 				      unsigned short mask);
 2402 struct net_device *dev_get_by_name(struct net *net, const char *name);
 2403 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
 2404 struct net_device *__dev_get_by_name(struct net *net, const char *name);
 2405 int dev_alloc_name(struct net_device *dev, const char *name);
 2406 int dev_open(struct net_device *dev);
 2407 int dev_close(struct net_device *dev);
 2408 int dev_close_many(struct list_head *head, bool unlink);
 2409 void dev_disable_lro(struct net_device *dev);
 2410 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
 2411 int dev_queue_xmit(struct sk_buff *skb);
 2412 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
 2413 int register_netdevice(struct net_device *dev);
 2414 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
 2415 void unregister_netdevice_many(struct list_head *head);
 2416 static inline void unregister_netdevice(struct net_device *dev)
 2417 {
 2418 	unregister_netdevice_queue(dev, NULL);
 2419 }
 2420 
 2421 int netdev_refcnt_read(const struct net_device *dev);
 2422 void free_netdev(struct net_device *dev);
 2423 void netdev_freemem(struct net_device *dev);
 2424 void synchronize_net(void);
 2425 int init_dummy_netdev(struct net_device *dev);
 2426 
 2427 DECLARE_PER_CPU(int, xmit_recursion);
 2428 #define XMIT_RECURSION_LIMIT	10
 2429 
 2430 static inline int dev_recursion_level(void)
 2431 {
 2432 	return this_cpu_read(xmit_recursion);
 2433 }
 2434 
 2435 struct net_device *dev_get_by_index(struct net *net, int ifindex);
 2436 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
 2437 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
 2438 int netdev_get_name(struct net *net, char *name, int ifindex);
 2439 int dev_restart(struct net_device *dev);
 2440 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
 2441 
 2442 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
 2443 {
 2444 	return NAPI_GRO_CB(skb)->data_offset;
 2445 }
 2446 
 2447 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
 2448 {
 2449 	return skb->len - NAPI_GRO_CB(skb)->data_offset;
 2450 }
 2451 
 2452 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
 2453 {
 2454 	NAPI_GRO_CB(skb)->data_offset += len;
 2455 }
 2456 
 2457 static inline void *skb_gro_header_fast(struct sk_buff *skb,
 2458 					unsigned int offset)
 2459 {
 2460 	return NAPI_GRO_CB(skb)->frag0 + offset;
 2461 }
 2462 
 2463 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
 2464 {
 2465 	return NAPI_GRO_CB(skb)->frag0_len < hlen;
 2466 }
 2467 
 2468 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
 2469 					unsigned int offset)
 2470 {
 2471 	if (!pskb_may_pull(skb, hlen))
 2472 		return NULL;
 2473 
 2474 	NAPI_GRO_CB(skb)->frag0 = NULL;
 2475 	NAPI_GRO_CB(skb)->frag0_len = 0;
 2476 	return skb->data + offset;
 2477 }
 2478 
 2479 static inline void *skb_gro_network_header(struct sk_buff *skb)
 2480 {
 2481 	return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
 2482 	       skb_network_offset(skb);
 2483 }
 2484 
 2485 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
 2486 					const void *start, unsigned int len)
 2487 {
 2488 	if (NAPI_GRO_CB(skb)->csum_valid)
 2489 		NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
 2490 						  csum_partial(start, len, 0));
 2491 }
 2492 
 2493 /* GRO checksum functions. These are logical equivalents of the normal
 2494  * checksum functions (in skbuff.h) except that they operate on the GRO
 2495  * offsets and fields in sk_buff.
 2496  */
 2497 
 2498 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
 2499 
 2500 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
 2501 {
 2502 	return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
 2503 }
 2504 
 2505 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
 2506 						      bool zero_okay,
 2507 						      __sum16 check)
 2508 {
 2509 	return ((skb->ip_summed != CHECKSUM_PARTIAL ||
 2510 		skb_checksum_start_offset(skb) <
 2511 		 skb_gro_offset(skb)) &&
 2512 		!skb_at_gro_remcsum_start(skb) &&
 2513 		NAPI_GRO_CB(skb)->csum_cnt == 0 &&
 2514 		(!zero_okay || check));
 2515 }
 2516 
 2517 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
 2518 							   __wsum psum)
 2519 {
 2520 	if (NAPI_GRO_CB(skb)->csum_valid &&
 2521 	    !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
 2522 		return 0;
 2523 
 2524 	NAPI_GRO_CB(skb)->csum = psum;
 2525 
 2526 	return __skb_gro_checksum_complete(skb);
 2527 }
 2528 
 2529 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
 2530 {
 2531 	if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
 2532 		/* Consume a checksum from CHECKSUM_UNNECESSARY */
 2533 		NAPI_GRO_CB(skb)->csum_cnt--;
 2534 	} else {
 2535 		/* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
 2536 		 * verified a new top level checksum or an encapsulated one
 2537 		 * during GRO. This saves work if we fallback to normal path.
 2538 		 */
 2539 		__skb_incr_checksum_unnecessary(skb);
 2540 	}
 2541 }
 2542 
 2543 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check,	\
 2544 				    compute_pseudo)			\
 2545 ({									\
 2546 	__sum16 __ret = 0;						\
 2547 	if (__skb_gro_checksum_validate_needed(skb, zero_okay, check))	\
 2548 		__ret = __skb_gro_checksum_validate_complete(skb,	\
 2549 				compute_pseudo(skb, proto));		\
 2550 	if (__ret)							\
 2551 		__skb_mark_checksum_bad(skb);				\
 2552 	else								\
 2553 		skb_gro_incr_csum_unnecessary(skb);			\
 2554 	__ret;								\
 2555 })
 2556 
 2557 #define skb_gro_checksum_validate(skb, proto, compute_pseudo)		\
 2558 	__skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
 2559 
 2560 #define skb_gro_checksum_validate_zero_check(skb, proto, check,		\
 2561 					     compute_pseudo)		\
 2562 	__skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
 2563 
 2564 #define skb_gro_checksum_simple_validate(skb)				\
 2565 	__skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
 2566 
 2567 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
 2568 {
 2569 	return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
 2570 		!NAPI_GRO_CB(skb)->csum_valid);
 2571 }
 2572 
 2573 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
 2574 					      __sum16 check, __wsum pseudo)
 2575 {
 2576 	NAPI_GRO_CB(skb)->csum = ~pseudo;
 2577 	NAPI_GRO_CB(skb)->csum_valid = 1;
 2578 }
 2579 
 2580 #define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo)	\
 2581 do {									\
 2582 	if (__skb_gro_checksum_convert_check(skb))			\
 2583 		__skb_gro_checksum_convert(skb, check,			\
 2584 					   compute_pseudo(skb, proto));	\
 2585 } while (0)
 2586 
 2587 struct gro_remcsum {
 2588 	int offset;
 2589 	__wsum delta;
 2590 };
 2591 
 2592 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
 2593 {
 2594 	grc->offset = 0;
 2595 	grc->delta = 0;
 2596 }
 2597 
 2598 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
 2599 					    unsigned int off, size_t hdrlen,
 2600 					    int start, int offset,
 2601 					    struct gro_remcsum *grc,
 2602 					    bool nopartial)
 2603 {
 2604 	__wsum delta;
 2605 	size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
 2606 
 2607 	BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
 2608 
 2609 	if (!nopartial) {
 2610 		NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
 2611 		return ptr;
 2612 	}
 2613 
 2614 	ptr = skb_gro_header_fast(skb, off);
 2615 	if (skb_gro_header_hard(skb, off + plen)) {
 2616 		ptr = skb_gro_header_slow(skb, off + plen, off);
 2617 		if (!ptr)
 2618 			return NULL;
 2619 	}
 2620 
 2621 	delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
 2622 			       start, offset);
 2623 
 2624 	/* Adjust skb->csum since we changed the packet */
 2625 	NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
 2626 
 2627 	grc->offset = off + hdrlen + offset;
 2628 	grc->delta = delta;
 2629 
 2630 	return ptr;
 2631 }
 2632 
 2633 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
 2634 					   struct gro_remcsum *grc)
 2635 {
 2636 	void *ptr;
 2637 	size_t plen = grc->offset + sizeof(u16);
 2638 
 2639 	if (!grc->delta)
 2640 		return;
 2641 
 2642 	ptr = skb_gro_header_fast(skb, grc->offset);
 2643 	if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
 2644 		ptr = skb_gro_header_slow(skb, plen, grc->offset);
 2645 		if (!ptr)
 2646 			return;
 2647 	}
 2648 
 2649 	remcsum_unadjust((__sum16 *)ptr, grc->delta);
 2650 }
 2651 
 2652 struct skb_csum_offl_spec {
 2653 	__u16		ipv4_okay:1,
 2654 			ipv6_okay:1,
 2655 			encap_okay:1,
 2656 			ip_options_okay:1,
 2657 			ext_hdrs_okay:1,
 2658 			tcp_okay:1,
 2659 			udp_okay:1,
 2660 			sctp_okay:1,
 2661 			vlan_okay:1,
 2662 			no_encapped_ipv6:1,
 2663 			no_not_encapped:1;
 2664 };
 2665 
 2666 bool __skb_csum_offload_chk(struct sk_buff *skb,
 2667 			    const struct skb_csum_offl_spec *spec,
 2668 			    bool *csum_encapped,
 2669 			    bool csum_help);
 2670 
 2671 static inline bool skb_csum_offload_chk(struct sk_buff *skb,
 2672 					const struct skb_csum_offl_spec *spec,
 2673 					bool *csum_encapped,
 2674 					bool csum_help)
 2675 {
 2676 	if (skb->ip_summed != CHECKSUM_PARTIAL)
 2677 		return false;
 2678 
 2679 	return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help);
 2680 }
 2681 
 2682 static inline bool skb_csum_offload_chk_help(struct sk_buff *skb,
 2683 					     const struct skb_csum_offl_spec *spec)
 2684 {
 2685 	bool csum_encapped;
 2686 
 2687 	return skb_csum_offload_chk(skb, spec, &csum_encapped, true);
 2688 }
 2689 
 2690 static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb)
 2691 {
 2692 	static const struct skb_csum_offl_spec csum_offl_spec = {
 2693 		.ipv4_okay = 1,
 2694 		.ip_options_okay = 1,
 2695 		.ipv6_okay = 1,
 2696 		.vlan_okay = 1,
 2697 		.tcp_okay = 1,
 2698 		.udp_okay = 1,
 2699 	};
 2700 
 2701 	return skb_csum_offload_chk_help(skb, &csum_offl_spec);
 2702 }
 2703 
 2704 static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb)
 2705 {
 2706 	static const struct skb_csum_offl_spec csum_offl_spec = {
 2707 		.ipv4_okay = 1,
 2708 		.ip_options_okay = 1,
 2709 		.tcp_okay = 1,
 2710 		.udp_okay = 1,
 2711 		.vlan_okay = 1,
 2712 	};
 2713 
 2714 	return skb_csum_offload_chk_help(skb, &csum_offl_spec);
 2715 }
 2716 
 2717 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
 2718 				  unsigned short type,
 2719 				  const void *daddr, const void *saddr,
 2720 				  unsigned int len)
 2721 {
 2722 	if (!dev->header_ops || !dev->header_ops->create)
 2723 		return 0;
 2724 
 2725 	return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
 2726 }
 2727 
 2728 static inline int dev_parse_header(const struct sk_buff *skb,
 2729 				   unsigned char *haddr)
 2730 {
 2731 	const struct net_device *dev = skb->dev;
 2732 
 2733 	if (!dev->header_ops || !dev->header_ops->parse)
 2734 		return 0;
 2735 	return dev->header_ops->parse(skb, haddr);
 2736 }
 2737 
 2738 /* ll_header must have at least hard_header_len allocated */
 2739 static inline bool dev_validate_header(const struct net_device *dev,
 2740 				       char *ll_header, int len)
 2741 {
 2742 	if (likely(len >= dev->hard_header_len))
 2743 		return true;
 2744 
 2745 	if (capable(CAP_SYS_RAWIO)) {
 2746 		memset(ll_header + len, 0, dev->hard_header_len - len);
 2747 		return true;
 2748 	}
 2749 
 2750 	if (dev->header_ops && dev->header_ops->validate)
 2751 		return dev->header_ops->validate(ll_header, len);
 2752 
 2753 	return false;
 2754 }
 2755 
 2756 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
 2757 int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
 2758 static inline int unregister_gifconf(unsigned int family)
 2759 {
 2760 	return register_gifconf(family, NULL);
 2761 }
 2762 
 2763 #ifdef CONFIG_NET_FLOW_LIMIT
 2764 #define FLOW_LIMIT_HISTORY	(1 << 7)  /* must be ^2 and !overflow buckets */
 2765 struct sd_flow_limit {
 2766 	u64			count;
 2767 	unsigned int		num_buckets;
 2768 	unsigned int		history_head;
 2769 	u16			history[FLOW_LIMIT_HISTORY];
 2770 	u8			buckets[];
 2771 };
 2772 
 2773 extern int netdev_flow_limit_table_len;
 2774 #endif /* CONFIG_NET_FLOW_LIMIT */
 2775 
 2776 /*
 2777  * Incoming packets are placed on per-CPU queues
 2778  */
 2779 struct softnet_data {
 2780 	struct list_head	poll_list;
 2781 	struct sk_buff_head	process_queue;
 2782 
 2783 	/* stats */
 2784 	unsigned int		processed;
 2785 	unsigned int		time_squeeze;
 2786 	unsigned int		received_rps;
 2787 #ifdef CONFIG_RPS
 2788 	struct softnet_data	*rps_ipi_list;
 2789 #endif
 2790 #ifdef CONFIG_NET_FLOW_LIMIT
 2791 	struct sd_flow_limit __rcu *flow_limit;
 2792 #endif
 2793 	struct Qdisc		*output_queue;
 2794 	struct Qdisc		**output_queue_tailp;
 2795 	struct sk_buff		*completion_queue;
 2796 
 2797 #ifdef CONFIG_RPS
 2798 	/* input_queue_head should be written by cpu owning this struct,
 2799 	 * and only read by other cpus. Worth using a cache line.
 2800 	 */
 2801 	unsigned int		input_queue_head ____cacheline_aligned_in_smp;
 2802 
 2803 	/* Elements below can be accessed between CPUs for RPS/RFS */
 2804 	struct call_single_data	csd ____cacheline_aligned_in_smp;
 2805 	struct softnet_data	*rps_ipi_next;
 2806 	unsigned int		cpu;
 2807 	unsigned int		input_queue_tail;
 2808 #endif
 2809 	unsigned int		dropped;
 2810 	struct sk_buff_head	input_pkt_queue;
 2811 	struct napi_struct	backlog;
 2812 
 2813 };
 2814 
 2815 static inline void input_queue_head_incr(struct softnet_data *sd)
 2816 {
 2817 #ifdef CONFIG_RPS
 2818 	sd->input_queue_head++;
 2819 #endif
 2820 }
 2821 
 2822 static inline void input_queue_tail_incr_save(struct softnet_data *sd,
 2823 					      unsigned int *qtail)
 2824 {
 2825 #ifdef CONFIG_RPS
 2826 	*qtail = ++sd->input_queue_tail;
 2827 #endif
 2828 }
 2829 
 2830 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 2831 
 2832 void __netif_schedule(struct Qdisc *q);
 2833 void netif_schedule_queue(struct netdev_queue *txq);
 2834 
 2835 static inline void netif_tx_schedule_all(struct net_device *dev)
 2836 {
 2837 	unsigned int i;
 2838 
 2839 	for (i = 0; i < dev->num_tx_queues; i++)
 2840 		netif_schedule_queue(netdev_get_tx_queue(dev, i));
 2841 }
 2842 
 2843 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
 2844 {
 2845 	clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 2846 }
 2847 
 2848 /**
 2849  *	netif_start_queue - allow transmit
 2850  *	@dev: network device
 2851  *
 2852  *	Allow upper layers to call the device hard_start_xmit routine.
 2853  */
 2854 static inline void netif_start_queue(struct net_device *dev)
 2855 {
 2856 	netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
 2857 }
 2858 
 2859 static inline void netif_tx_start_all_queues(struct net_device *dev)
 2860 {
 2861 	unsigned int i;
 2862 
 2863 	for (i = 0; i < dev->num_tx_queues; i++) {
 2864 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 2865 		netif_tx_start_queue(txq);
 2866 	}
 2867 }
 2868 
 2869 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
 2870 
 2871 /**
 2872  *	netif_wake_queue - restart transmit
 2873  *	@dev: network device
 2874  *
 2875  *	Allow upper layers to call the device hard_start_xmit routine.
 2876  *	Used for flow control when transmit resources are available.
 2877  */
 2878 static inline void netif_wake_queue(struct net_device *dev)
 2879 {
 2880 	netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
 2881 }
 2882 
 2883 static inline void netif_tx_wake_all_queues(struct net_device *dev)
 2884 {
 2885 	unsigned int i;
 2886 
 2887 	for (i = 0; i < dev->num_tx_queues; i++) {
 2888 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 2889 		netif_tx_wake_queue(txq);
 2890 	}
 2891 }
 2892 
 2893 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
 2894 {
 2895 	set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 2896 }
 2897 
 2898 /**
 2899  *	netif_stop_queue - stop transmitted packets
 2900  *	@dev: network device
 2901  *
 2902  *	Stop upper layers calling the device hard_start_xmit routine.
 2903  *	Used for flow control when transmit resources are unavailable.
 2904  */
 2905 static inline void netif_stop_queue(struct net_device *dev)
 2906 {
 2907 	netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
 2908 }
 2909 
 2910 void netif_tx_stop_all_queues(struct net_device *dev);
 2911 
 2912 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
 2913 {
 2914 	return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 2915 }
 2916 
 2917 /**
 2918  *	netif_queue_stopped - test if transmit queue is flowblocked
 2919  *	@dev: network device
 2920  *
 2921  *	Test if transmit queue on device is currently unable to send.
 2922  */
 2923 static inline bool netif_queue_stopped(const struct net_device *dev)
 2924 {
 2925 	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
 2926 }
 2927 
 2928 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
 2929 {
 2930 	return dev_queue->state & QUEUE_STATE_ANY_XOFF;
 2931 }
 2932 
 2933 static inline bool
 2934 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
 2935 {
 2936 	return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
 2937 }
 2938 
 2939 static inline bool
 2940 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
 2941 {
 2942 	return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
 2943 }
 2944 
 2945 /**
 2946  *	netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
 2947  *	@dev_queue: pointer to transmit queue
 2948  *
 2949  * BQL enabled drivers might use this helper in their ndo_start_xmit(),
 2950  * to give appropriate hint to the CPU.
 2951  */
 2952 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
 2953 {
 2954 #ifdef CONFIG_BQL
 2955 	prefetchw(&dev_queue->dql.num_queued);
 2956 #endif
 2957 }
 2958 
 2959 /**
 2960  *	netdev_txq_bql_complete_prefetchw - prefetch bql data for write
 2961  *	@dev_queue: pointer to transmit queue
 2962  *
 2963  * BQL enabled drivers might use this helper in their TX completion path,
 2964  * to give appropriate hint to the CPU.
 2965  */
 2966 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
 2967 {
 2968 #ifdef CONFIG_BQL
 2969 	prefetchw(&dev_queue->dql.limit);
 2970 #endif
 2971 }
 2972 
 2973 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
 2974 					unsigned int bytes)
 2975 {
 2976 #ifdef CONFIG_BQL
 2977 	dql_queued(&dev_queue->dql, bytes);
 2978 
 2979 	if (likely(dql_avail(&dev_queue->dql) >= 0))
 2980 		return;
 2981 
 2982 	set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
 2983 
 2984 	/*
 2985 	 * The XOFF flag must be set before checking the dql_avail below,
 2986 	 * because in netdev_tx_completed_queue we update the dql_completed
 2987 	 * before checking the XOFF flag.
 2988 	 */
 2989 	smp_mb();
 2990 
 2991 	/* check again in case another CPU has just made room avail */
 2992 	if (unlikely(dql_avail(&dev_queue->dql) >= 0))
 2993 		clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
 2994 #endif
 2995 }
 2996 
 2997 /**
 2998  * 	netdev_sent_queue - report the number of bytes queued to hardware
 2999  * 	@dev: network device
 3000  * 	@bytes: number of bytes queued to the hardware device queue
 3001  *
 3002  * 	Report the number of bytes queued for sending/completion to the network
 3003  * 	device hardware queue. @bytes should be a good approximation and should
 3004  * 	exactly match netdev_completed_queue() @bytes
 3005  */
 3006 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
 3007 {
 3008 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
 3009 }
 3010 
 3011 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
 3012 					     unsigned int pkts, unsigned int bytes)
 3013 {
 3014 #ifdef CONFIG_BQL
 3015 	if (unlikely(!bytes))
 3016 		return;
 3017 
 3018 	dql_completed(&dev_queue->dql, bytes);
 3019 
 3020 	/*
 3021 	 * Without the memory barrier there is a small possiblity that
 3022 	 * netdev_tx_sent_queue will miss the update and cause the queue to
 3023 	 * be stopped forever
 3024 	 */
 3025 	smp_mb();
 3026 
 3027 	if (dql_avail(&dev_queue->dql) < 0)
 3028 		return;
 3029 
 3030 	if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
 3031 		netif_schedule_queue(dev_queue);
 3032 #endif
 3033 }
 3034 
 3035 /**
 3036  * 	netdev_completed_queue - report bytes and packets completed by device
 3037  * 	@dev: network device
 3038  * 	@pkts: actual number of packets sent over the medium
 3039  * 	@bytes: actual number of bytes sent over the medium
 3040  *
 3041  * 	Report the number of bytes and packets transmitted by the network device
 3042  * 	hardware queue over the physical medium, @bytes must exactly match the
 3043  * 	@bytes amount passed to netdev_sent_queue()
 3044  */
 3045 static inline void netdev_completed_queue(struct net_device *dev,
 3046 					  unsigned int pkts, unsigned int bytes)
 3047 {
 3048 	netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
 3049 }
 3050 
 3051 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
 3052 {
 3053 #ifdef CONFIG_BQL
 3054 	clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
 3055 	dql_reset(&q->dql);
 3056 #endif
 3057 }
 3058 
 3059 /**
 3060  * 	netdev_reset_queue - reset the packets and bytes count of a network device
 3061  * 	@dev_queue: network device
 3062  *
 3063  * 	Reset the bytes and packet count of a network device and clear the
 3064  * 	software flow control OFF bit for this network device
 3065  */
 3066 static inline void netdev_reset_queue(struct net_device *dev_queue)
 3067 {
 3068 	netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
 3069 }
 3070 
 3071 /**
 3072  * 	netdev_cap_txqueue - check if selected tx queue exceeds device queues
 3073  * 	@dev: network device
 3074  * 	@queue_index: given tx queue index
 3075  *
 3076  * 	Returns 0 if given tx queue index >= number of device tx queues,
 3077  * 	otherwise returns the originally passed tx queue index.
 3078  */
 3079 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
 3080 {
 3081 	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
 3082 		net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
 3083 				     dev->name, queue_index,
 3084 				     dev->real_num_tx_queues);
 3085 		return 0;
 3086 	}
 3087 
 3088 	return queue_index;
 3089 }
 3090 
 3091 /**
 3092  *	netif_running - test if up
 3093  *	@dev: network device
 3094  *
 3095  *	Test if the device has been brought up.
 3096  */
 3097 static inline bool netif_running(const struct net_device *dev)
 3098 {
 3099 	return test_bit(__LINK_STATE_START, &dev->state);
 3100 }
 3101 
 3102 /*
 3103  * Routines to manage the subqueues on a device.  We only need start,
 3104  * stop, and a check if it's stopped.  All other device management is
 3105  * done at the overall netdevice level.
 3106  * Also test the device if we're multiqueue.
 3107  */
 3108 
 3109 /**
 3110  *	netif_start_subqueue - allow sending packets on subqueue
 3111  *	@dev: network device
 3112  *	@queue_index: sub queue index
 3113  *
 3114  * Start individual transmit queue of a device with multiple transmit queues.
 3115  */
 3116 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
 3117 {
 3118 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 3119 
 3120 	netif_tx_start_queue(txq);
 3121 }
 3122 
 3123 /**
 3124  *	netif_stop_subqueue - stop sending packets on subqueue
 3125  *	@dev: network device
 3126  *	@queue_index: sub queue index
 3127  *
 3128  * Stop individual transmit queue of a device with multiple transmit queues.
 3129  */
 3130 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
 3131 {
 3132 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 3133 	netif_tx_stop_queue(txq);
 3134 }
 3135 
 3136 /**
 3137  *	netif_subqueue_stopped - test status of subqueue
 3138  *	@dev: network device
 3139  *	@queue_index: sub queue index
 3140  *
 3141  * Check individual transmit queue of a device with multiple transmit queues.
 3142  */
 3143 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
 3144 					    u16 queue_index)
 3145 {
 3146 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 3147 
 3148 	return netif_tx_queue_stopped(txq);
 3149 }
 3150 
 3151 static inline bool netif_subqueue_stopped(const struct net_device *dev,
 3152 					  struct sk_buff *skb)
 3153 {
 3154 	return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
 3155 }
 3156 
 3157 void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
 3158 
 3159 #ifdef CONFIG_XPS
 3160 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
 3161 			u16 index);
 3162 #else
 3163 static inline int netif_set_xps_queue(struct net_device *dev,
 3164 				      const struct cpumask *mask,
 3165 				      u16 index)
 3166 {
 3167 	return 0;
 3168 }
 3169 #endif
 3170 
 3171 u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
 3172 		  unsigned int num_tx_queues);
 3173 
 3174 /*
 3175  * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
 3176  * as a distribution range limit for the returned value.
 3177  */
 3178 static inline u16 skb_tx_hash(const struct net_device *dev,
 3179 			      struct sk_buff *skb)
 3180 {
 3181 	return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
 3182 }
 3183 
 3184 /**
 3185  *	netif_is_multiqueue - test if device has multiple transmit queues
 3186  *	@dev: network device
 3187  *
 3188  * Check if device has multiple transmit queues
 3189  */
 3190 static inline bool netif_is_multiqueue(const struct net_device *dev)
 3191 {
 3192 	return dev->num_tx_queues > 1;
 3193 }
 3194 
 3195 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
 3196 
 3197 #ifdef CONFIG_SYSFS
 3198 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
 3199 #else
 3200 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
 3201 						unsigned int rxq)
 3202 {
 3203 	return 0;
 3204 }
 3205 #endif
 3206 
 3207 #ifdef CONFIG_SYSFS
 3208 static inline unsigned int get_netdev_rx_queue_index(
 3209 		struct netdev_rx_queue *queue)
 3210 {
 3211 	struct net_device *dev = queue->dev;
 3212 	int index = queue - dev->_rx;
 3213 
 3214 	BUG_ON(index >= dev->num_rx_queues);
 3215 	return index;
 3216 }
 3217 #endif
 3218 
 3219 #define DEFAULT_MAX_NUM_RSS_QUEUES	(8)
 3220 int netif_get_num_default_rss_queues(void);
 3221 
 3222 enum skb_free_reason {
 3223 	SKB_REASON_CONSUMED,
 3224 	SKB_REASON_DROPPED,
 3225 };
 3226 
 3227 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
 3228 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
 3229 
 3230 /*
 3231  * It is not allowed to call kfree_skb() or consume_skb() from hardware
 3232  * interrupt context or with hardware interrupts being disabled.
 3233  * (in_irq() || irqs_disabled())
 3234  *
 3235  * We provide four helpers that can be used in following contexts :
 3236  *
 3237  * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
 3238  *  replacing kfree_skb(skb)
 3239  *
 3240  * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
 3241  *  Typically used in place of consume_skb(skb) in TX completion path
 3242  *
 3243  * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
 3244  *  replacing kfree_skb(skb)
 3245  *
 3246  * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
 3247  *  and consumed a packet. Used in place of consume_skb(skb)
 3248  */
 3249 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
 3250 {
 3251 	__dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
 3252 }
 3253 
 3254 static inline void dev_consume_skb_irq(struct sk_buff *skb)
 3255 {
 3256 	__dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
 3257 }
 3258 
 3259 static inline void dev_kfree_skb_any(struct sk_buff *skb)
 3260 {
 3261 	__dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
 3262 }
 3263 
 3264 static inline void dev_consume_skb_any(struct sk_buff *skb)
 3265 {
 3266 	__dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
 3267 }
 3268 
 3269 int netif_rx(struct sk_buff *skb);
 3270 int netif_rx_ni(struct sk_buff *skb);
 3271 int netif_receive_skb(struct sk_buff *skb);
 3272 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
 3273 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
 3274 struct sk_buff *napi_get_frags(struct napi_struct *napi);
 3275 gro_result_t napi_gro_frags(struct napi_struct *napi);
 3276 struct packet_offload *gro_find_receive_by_type(__be16 type);
 3277 struct packet_offload *gro_find_complete_by_type(__be16 type);
 3278 
 3279 static inline void napi_free_frags(struct napi_struct *napi)
 3280 {
 3281 	kfree_skb(napi->skb);
 3282 	napi->skb = NULL;
 3283 }
 3284 
 3285 bool netdev_is_rx_handler_busy(struct net_device *dev);
 3286 int netdev_rx_handler_register(struct net_device *dev,
 3287 			       rx_handler_func_t *rx_handler,
 3288 			       void *rx_handler_data);
 3289 void netdev_rx_handler_unregister(struct net_device *dev);
 3290 
 3291 bool dev_valid_name(const char *name);
 3292 int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
 3293 int dev_ethtool(struct net *net, struct ifreq *);
 3294 unsigned int dev_get_flags(const struct net_device *);
 3295 int __dev_change_flags(struct net_device *, unsigned int flags);
 3296 int dev_change_flags(struct net_device *, unsigned int);
 3297 void __dev_notify_flags(struct net_device *, unsigned int old_flags,
 3298 			unsigned int gchanges);
 3299 int dev_change_name(struct net_device *, const char *);
 3300 int dev_set_alias(struct net_device *, const char *, size_t);
 3301 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
 3302 int dev_set_mtu(struct net_device *, int);
 3303 void dev_set_group(struct net_device *, int);
 3304 int dev_set_mac_address(struct net_device *, struct sockaddr *);
 3305 int dev_change_carrier(struct net_device *, bool new_carrier);
 3306 int dev_get_phys_port_id(struct net_device *dev,
 3307 			 struct netdev_phys_item_id *ppid);
 3308 int dev_get_phys_port_name(struct net_device *dev,
 3309 			   char *name, size_t len);
 3310 int dev_change_proto_down(struct net_device *dev, bool proto_down);
 3311 int dev_change_xdp_fd(struct net_device *dev, int fd);
 3312 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
 3313 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 3314 				    struct netdev_queue *txq, int *ret);
 3315 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 3316 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 3317 bool is_skb_forwardable(const struct net_device *dev,
 3318 			const struct sk_buff *skb);
 3319 
 3320 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
 3321 
 3322 extern int		netdev_budget;
 3323 
 3324 /* Called by rtnetlink.c:rtnl_unlock() */
 3325 void netdev_run_todo(void);
 3326 
 3327 /**
 3328  *	dev_put - release reference to device
 3329  *	@dev: network device
 3330  *
 3331  * Release reference to device to allow it to be freed.
 3332  */
 3333 static inline void dev_put(struct net_device *dev)
 3334 {
 3335 	this_cpu_dec(*dev->pcpu_refcnt);
 3336 }
 3337 
 3338 /**
 3339  *	dev_hold - get reference to device
 3340  *	@dev: network device
 3341  *
 3342  * Hold reference to device to keep it from being freed.
 3343  */
 3344 static inline void dev_hold(struct net_device *dev)
 3345 {
 3346 	this_cpu_inc(*dev->pcpu_refcnt);
 3347 }
 3348 
 3349 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
 3350  * and _off may be called from IRQ context, but it is caller
 3351  * who is responsible for serialization of these calls.
 3352  *
 3353  * The name carrier is inappropriate, these functions should really be
 3354  * called netif_lowerlayer_*() because they represent the state of any
 3355  * kind of lower layer not just hardware media.
 3356  */
 3357 
 3358 void linkwatch_init_dev(struct net_device *dev);
 3359 void linkwatch_fire_event(struct net_device *dev);
 3360 void linkwatch_forget_dev(struct net_device *dev);
 3361 
 3362 /**
 3363  *	netif_carrier_ok - test if carrier present
 3364  *	@dev: network device
 3365  *
 3366  * Check if carrier is present on device
 3367  */
 3368 static inline bool netif_carrier_ok(const struct net_device *dev)
 3369 {
 3370 	return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
 3371 }
 3372 
 3373 unsigned long dev_trans_start(struct net_device *dev);
 3374 
 3375 void __netdev_watchdog_up(struct net_device *dev);
 3376 
 3377 void netif_carrier_on(struct net_device *dev);
 3378 
 3379 void netif_carrier_off(struct net_device *dev);
 3380 
 3381 /**
 3382  *	netif_dormant_on - mark device as dormant.
 3383  *	@dev: network device
 3384  *
 3385  * Mark device as dormant (as per RFC2863).
 3386  *
 3387  * The dormant state indicates that the relevant interface is not
 3388  * actually in a condition to pass packets (i.e., it is not 'up') but is
 3389  * in a "pending" state, waiting for some external event.  For "on-
 3390  * demand" interfaces, this new state identifies the situation where the
 3391  * interface is waiting for events to place it in the up state.
 3392  */
 3393 static inline void netif_dormant_on(struct net_device *dev)
 3394 {
 3395 	if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
 3396 		linkwatch_fire_event(dev);
 3397 }
 3398 
 3399 /**
 3400  *	netif_dormant_off - set device as not dormant.
 3401  *	@dev: network device
 3402  *
 3403  * Device is not in dormant state.
 3404  */
 3405 static inline void netif_dormant_off(struct net_device *dev)
 3406 {
 3407 	if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
 3408 		linkwatch_fire_event(dev);
 3409 }
 3410 
 3411 /**
 3412  *	netif_dormant - test if carrier present
 3413  *	@dev: network device
 3414  *
 3415  * Check if carrier is present on device
 3416  */
 3417 static inline bool netif_dormant(const struct net_device *dev)
 3418 {
 3419 	return test_bit(__LINK_STATE_DORMANT, &dev->state);
 3420 }
 3421 
 3422 
 3423 /**
 3424  *	netif_oper_up - test if device is operational
 3425  *	@dev: network device
 3426  *
 3427  * Check if carrier is operational
 3428  */
 3429 static inline bool netif_oper_up(const struct net_device *dev)
 3430 {
 3431 	return (dev->operstate == IF_OPER_UP ||
 3432 		dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
 3433 }
 3434 
 3435 /**
 3436  *	netif_device_present - is device available or removed
 3437  *	@dev: network device
 3438  *
 3439  * Check if device has not been removed from system.
 3440  */
 3441 static inline bool netif_device_present(struct net_device *dev)
 3442 {
 3443 	return test_bit(__LINK_STATE_PRESENT, &dev->state);
 3444 }
 3445 
 3446 void netif_device_detach(struct net_device *dev);
 3447 
 3448 void netif_device_attach(struct net_device *dev);
 3449 
 3450 /*
 3451  * Network interface message level settings
 3452  */
 3453 
 3454 enum {
 3455 	NETIF_MSG_DRV		= 0x0001,
 3456 	NETIF_MSG_PROBE		= 0x0002,
 3457 	NETIF_MSG_LINK		= 0x0004,
 3458 	NETIF_MSG_TIMER		= 0x0008,
 3459 	NETIF_MSG_IFDOWN	= 0x0010,
 3460 	NETIF_MSG_IFUP		= 0x0020,
 3461 	NETIF_MSG_RX_ERR	= 0x0040,
 3462 	NETIF_MSG_TX_ERR	= 0x0080,
 3463 	NETIF_MSG_TX_QUEUED	= 0x0100,
 3464 	NETIF_MSG_INTR		= 0x0200,
 3465 	NETIF_MSG_TX_DONE	= 0x0400,
 3466 	NETIF_MSG_RX_STATUS	= 0x0800,
 3467 	NETIF_MSG_PKTDATA	= 0x1000,
 3468 	NETIF_MSG_HW		= 0x2000,
 3469 	NETIF_MSG_WOL		= 0x4000,
 3470 };
 3471 
 3472 #define netif_msg_drv(p)	((p)->msg_enable & NETIF_MSG_DRV)
 3473 #define netif_msg_probe(p)	((p)->msg_enable & NETIF_MSG_PROBE)
 3474 #define netif_msg_link(p)	((p)->msg_enable & NETIF_MSG_LINK)
 3475 #define netif_msg_timer(p)	((p)->msg_enable & NETIF_MSG_TIMER)
 3476 #define netif_msg_ifdown(p)	((p)->msg_enable & NETIF_MSG_IFDOWN)
 3477 #define netif_msg_ifup(p)	((p)->msg_enable & NETIF_MSG_IFUP)
 3478 #define netif_msg_rx_err(p)	((p)->msg_enable & NETIF_MSG_RX_ERR)
 3479 #define netif_msg_tx_err(p)	((p)->msg_enable & NETIF_MSG_TX_ERR)
 3480 #define netif_msg_tx_queued(p)	((p)->msg_enable & NETIF_MSG_TX_QUEUED)
 3481 #define netif_msg_intr(p)	((p)->msg_enable & NETIF_MSG_INTR)
 3482 #define netif_msg_tx_done(p)	((p)->msg_enable & NETIF_MSG_TX_DONE)
 3483 #define netif_msg_rx_status(p)	((p)->msg_enable & NETIF_MSG_RX_STATUS)
 3484 #define netif_msg_pktdata(p)	((p)->msg_enable & NETIF_MSG_PKTDATA)
 3485 #define netif_msg_hw(p)		((p)->msg_enable & NETIF_MSG_HW)
 3486 #define netif_msg_wol(p)	((p)->msg_enable & NETIF_MSG_WOL)
 3487 
 3488 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
 3489 {
 3490 	/* use default */
 3491 	if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
 3492 		return default_msg_enable_bits;
 3493 	if (debug_value == 0)	/* no output */
 3494 		return 0;
 3495 	/* set low N bits */
 3496 	return (1 << debug_value) - 1;
 3497 }
 3498 
 3499 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
 3500 {
 3501 	spin_lock(&txq->_xmit_lock);
 3502 	txq->xmit_lock_owner = cpu;
 3503 }
 3504 
 3505 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
 3506 {
 3507 	spin_lock_bh(&txq->_xmit_lock);
 3508 	txq->xmit_lock_owner = smp_processor_id();
 3509 }
 3510 
 3511 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
 3512 {
 3513 	bool ok = spin_trylock(&txq->_xmit_lock);
 3514 	if (likely(ok))
 3515 		txq->xmit_lock_owner = smp_processor_id();
 3516 	return ok;
 3517 }
 3518 
 3519 static inline void __netif_tx_unlock(struct netdev_queue *txq)
 3520 {
 3521 	txq->xmit_lock_owner = -1;
 3522 	spin_unlock(&txq->_xmit_lock);
 3523 }
 3524 
 3525 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
 3526 {
 3527 	txq->xmit_lock_owner = -1;
 3528 	spin_unlock_bh(&txq->_xmit_lock);
 3529 }
 3530 
 3531 static inline void txq_trans_update(struct netdev_queue *txq)
 3532 {
 3533 	if (txq->xmit_lock_owner != -1)
 3534 		txq->trans_start = jiffies;
 3535 }
 3536 
 3537 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
 3538 static inline void netif_trans_update(struct net_device *dev)
 3539 {
 3540 	struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
 3541 
 3542 	if (txq->trans_start != jiffies)
 3543 		txq->trans_start = jiffies;
 3544 }
 3545 
 3546 /**
 3547  *	netif_tx_lock - grab network device transmit lock
 3548  *	@dev: network device
 3549  *
 3550  * Get network device transmit lock
 3551  */
 3552 static inline void netif_tx_lock(struct net_device *dev)
 3553 {
 3554 	unsigned int i;
 3555 	int cpu;
 3556 
 3557 	spin_lock(&dev->tx_global_lock);
 3558 	cpu = smp_processor_id();
 3559 	for (i = 0; i < dev->num_tx_queues; i++) {
 3560 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 3561 
 3562 		/* We are the only thread of execution doing a
 3563 		 * freeze, but we have to grab the _xmit_lock in
 3564 		 * order to synchronize with threads which are in
 3565 		 * the ->hard_start_xmit() handler and already
 3566 		 * checked the frozen bit.
 3567 		 */
 3568 		__netif_tx_lock(txq, cpu);
 3569 		set_bit(__QUEUE_STATE_FROZEN, &txq->state);
 3570 		__netif_tx_unlock(txq);
 3571 	}
 3572 }
 3573 
 3574 static inline void netif_tx_lock_bh(struct net_device *dev)
 3575 {
 3576 	local_bh_disable();
 3577 	netif_tx_lock(dev);
 3578 }
 3579 
 3580 static inline void netif_tx_unlock(struct net_device *dev)
 3581 {
 3582 	unsigned int i;
 3583 
 3584 	for (i = 0; i < dev->num_tx_queues; i++) {
 3585 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 3586 
 3587 		/* No need to grab the _xmit_lock here.  If the
 3588 		 * queue is not stopped for another reason, we
 3589 		 * force a schedule.
 3590 		 */
 3591 		clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
 3592 		netif_schedule_queue(txq);
 3593 	}
 3594 	spin_unlock(&dev->tx_global_lock);
 3595 }
 3596 
 3597 static inline void netif_tx_unlock_bh(struct net_device *dev)
 3598 {
 3599 	netif_tx_unlock(dev);
 3600 	local_bh_enable();
 3601 }
 3602 
 3603 #define HARD_TX_LOCK(dev, txq, cpu) {			\
 3604 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
 3605 		__netif_tx_lock(txq, cpu);		\
 3606 	}						\
 3607 }
 3608 
 3609 #define HARD_TX_TRYLOCK(dev, txq)			\
 3610 	(((dev->features & NETIF_F_LLTX) == 0) ?	\
 3611 		__netif_tx_trylock(txq) :		\
 3612 		true )
 3613 
 3614 #define HARD_TX_UNLOCK(dev, txq) {			\
 3615 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
 3616 		__netif_tx_unlock(txq);			\
 3617 	}						\
 3618 }
 3619 
 3620 static inline void netif_tx_disable(struct net_device *dev)
 3621 {
 3622 	unsigned int i;
 3623 	int cpu;
 3624 
 3625 	local_bh_disable();
 3626 	cpu = smp_processor_id();
 3627 	for (i = 0; i < dev->num_tx_queues; i++) {
 3628 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 3629 
 3630 		__netif_tx_lock(txq, cpu);
 3631 		netif_tx_stop_queue(txq);
 3632 		__netif_tx_unlock(txq);
 3633 	}
 3634 	local_bh_enable();
 3635 }
 3636 
 3637 static inline void netif_addr_lock(struct net_device *dev)
 3638 {
 3639 	spin_lock(&dev->addr_list_lock);
 3640 }
 3641 
 3642 static inline void netif_addr_lock_nested(struct net_device *dev)
 3643 {
 3644 	int subclass = SINGLE_DEPTH_NESTING;
 3645 
 3646 	if (dev->netdev_ops->ndo_get_lock_subclass)
 3647 		subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
 3648 
 3649 	spin_lock_nested(&dev->addr_list_lock, subclass);
 3650 }
 3651 
 3652 static inline void netif_addr_lock_bh(struct net_device *dev)
 3653 {
 3654 	spin_lock_bh(&dev->addr_list_lock);
 3655 }
 3656 
 3657 static inline void netif_addr_unlock(struct net_device *dev)
 3658 {
 3659 	spin_unlock(&dev->addr_list_lock);
 3660 }
 3661 
 3662 static inline void netif_addr_unlock_bh(struct net_device *dev)
 3663 {
 3664 	spin_unlock_bh(&dev->addr_list_lock);
 3665 }
 3666 
 3667 /*
 3668  * dev_addrs walker. Should be used only for read access. Call with
 3669  * rcu_read_lock held.
 3670  */
 3671 #define for_each_dev_addr(dev, ha) \
 3672 		list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
 3673 
 3674 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
 3675 
 3676 void ether_setup(struct net_device *dev);
 3677 
 3678 /* Support for loadable net-drivers */
 3679 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 3680 				    unsigned char name_assign_type,
 3681 				    void (*setup)(struct net_device *),
 3682 				    unsigned int txqs, unsigned int rxqs);
 3683 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
 3684 	alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
 3685 
 3686 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
 3687 	alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
 3688 			 count)
 3689 
 3690 int register_netdev(struct net_device *dev);
 3691 void unregister_netdev(struct net_device *dev);
 3692 
 3693 /* General hardware address lists handling functions */
 3694 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
 3695 		   struct netdev_hw_addr_list *from_list, int addr_len);
 3696 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
 3697 		      struct netdev_hw_addr_list *from_list, int addr_len);
 3698 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
 3699 		       struct net_device *dev,
 3700 		       int (*sync)(struct net_device *, const unsigned char *),
 3701 		       int (*unsync)(struct net_device *,
 3702 				     const unsigned char *));
 3703 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
 3704 			  struct net_device *dev,
 3705 			  int (*unsync)(struct net_device *,
 3706 					const unsigned char *));
 3707 void __hw_addr_init(struct netdev_hw_addr_list *list);
 3708 
 3709 /* Functions used for device addresses handling */
 3710 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
 3711 		 unsigned char addr_type);
 3712 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
 3713 		 unsigned char addr_type);
 3714 void dev_addr_flush(struct net_device *dev);
 3715 int dev_addr_init(struct net_device *dev);
 3716 
 3717 /* Functions used for unicast addresses handling */
 3718 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
 3719 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
 3720 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
 3721 int dev_uc_sync(struct net_device *to, struct net_device *from);
 3722 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
 3723 void dev_uc_unsync(struct net_device *to, struct net_device *from);
 3724 void dev_uc_flush(struct net_device *dev);
 3725 void dev_uc_init(struct net_device *dev);
 3726 
 3727 /**
 3728  *  __dev_uc_sync - Synchonize device's unicast list
 3729  *  @dev:  device to sync
 3730  *  @sync: function to call if address should be added
 3731  *  @unsync: function to call if address should be removed
 3732  *
 3733  *  Add newly added addresses to the interface, and release
 3734  *  addresses that have been deleted.
 3735  */
 3736 static inline int __dev_uc_sync(struct net_device *dev,
 3737 				int (*sync)(struct net_device *,
 3738 					    const unsigned char *),
 3739 				int (*unsync)(struct net_device *,
 3740 					      const unsigned char *))
 3741 {
 3742 	return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
 3743 }
 3744 
 3745 /**
 3746  *  __dev_uc_unsync - Remove synchronized addresses from device
 3747  *  @dev:  device to sync
 3748  *  @unsync: function to call if address should be removed
 3749  *
 3750  *  Remove all addresses that were added to the device by dev_uc_sync().
 3751  */
 3752 static inline void __dev_uc_unsync(struct net_device *dev,
 3753 				   int (*unsync)(struct net_device *,
 3754 						 const unsigned char *))
 3755 {
 3756 	__hw_addr_unsync_dev(&dev->uc, dev, unsync);
 3757 }
 3758 
 3759 /* Functions used for multicast addresses handling */
 3760 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
 3761 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
 3762 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
 3763 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
 3764 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
 3765 int dev_mc_sync(struct net_device *to, struct net_device *from);
 3766 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
 3767 void dev_mc_unsync(struct net_device *to, struct net_device *from);
 3768 void dev_mc_flush(struct net_device *dev);
 3769 void dev_mc_init(struct net_device *dev);
 3770 
 3771 /**
 3772  *  __dev_mc_sync - Synchonize device's multicast list
 3773  *  @dev:  device to sync
 3774  *  @sync: function to call if address should be added
 3775  *  @unsync: function to call if address should be removed
 3776  *
 3777  *  Add newly added addresses to the interface, and release
 3778  *  addresses that have been deleted.
 3779  */
 3780 static inline int __dev_mc_sync(struct net_device *dev,
 3781 				int (*sync)(struct net_device *,
 3782 					    const unsigned char *),
 3783 				int (*unsync)(struct net_device *,
 3784 					      const unsigned char *))
 3785 {
 3786 	return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
 3787 }
 3788 
 3789 /**
 3790  *  __dev_mc_unsync - Remove synchronized addresses from device
 3791  *  @dev:  device to sync
 3792  *  @unsync: function to call if address should be removed
 3793  *
 3794  *  Remove all addresses that were added to the device by dev_mc_sync().
 3795  */
 3796 static inline void __dev_mc_unsync(struct net_device *dev,
 3797 				   int (*unsync)(struct net_device *,
 3798 						 const unsigned char *))
 3799 {
 3800 	__hw_addr_unsync_dev(&dev->mc, dev, unsync);
 3801 }
 3802 
 3803 /* Functions used for secondary unicast and multicast support */
 3804 void dev_set_rx_mode(struct net_device *dev);
 3805 void __dev_set_rx_mode(struct net_device *dev);
 3806 int dev_set_promiscuity(struct net_device *dev, int inc);
 3807 int dev_set_allmulti(struct net_device *dev, int inc);
 3808 void netdev_state_change(struct net_device *dev);
 3809 void netdev_notify_peers(struct net_device *dev);
 3810 void netdev_features_change(struct net_device *dev);
 3811 /* Load a device via the kmod */
 3812 void dev_load(struct net *net, const char *name);
 3813 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
 3814 					struct rtnl_link_stats64 *storage);
 3815 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
 3816 			     const struct net_device_stats *netdev_stats);
 3817 
 3818 extern int		netdev_max_backlog;
 3819 extern int		netdev_tstamp_prequeue;
 3820 extern int		weight_p;
 3821 
 3822 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
 3823 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
 3824 						     struct list_head **iter);
 3825 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
 3826 						     struct list_head **iter);
 3827 
 3828 /* iterate through upper list, must be called under RCU read lock */
 3829 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
 3830 	for (iter = &(dev)->adj_list.upper, \
 3831 	     updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
 3832 	     updev; \
 3833 	     updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
 3834 
 3835 /* iterate through upper list, must be called under RCU read lock */
 3836 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
 3837 	for (iter = &(dev)->all_adj_list.upper, \
 3838 	     updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
 3839 	     updev; \
 3840 	     updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
 3841 
 3842 void *netdev_lower_get_next_private(struct net_device *dev,
 3843 				    struct list_head **iter);
 3844 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
 3845 					struct list_head **iter);
 3846 
 3847 #define netdev_for_each_lower_private(dev, priv, iter) \
 3848 	for (iter = (dev)->adj_list.lower.next, \
 3849 	     priv = netdev_lower_get_next_private(dev, &(iter)); \
 3850 	     priv; \
 3851 	     priv = netdev_lower_get_next_private(dev, &(iter)))
 3852 
 3853 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
 3854 	for (iter = &(dev)->adj_list.lower, \
 3855 	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
 3856 	     priv; \
 3857 	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
 3858 
 3859 void *netdev_lower_get_next(struct net_device *dev,
 3860 				struct list_head **iter);
 3861 
 3862 #define netdev_for_each_lower_dev(dev, ldev, iter) \
 3863 	for (iter = (dev)->adj_list.lower.next, \
 3864 	     ldev = netdev_lower_get_next(dev, &(iter)); \
 3865 	     ldev; \
 3866 	     ldev = netdev_lower_get_next(dev, &(iter)))
 3867 
 3868 struct net_device *netdev_all_lower_get_next(struct net_device *dev,
 3869 					     struct list_head **iter);
 3870 struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
 3871 						 struct list_head **iter);
 3872 
 3873 #define netdev_for_each_all_lower_dev(dev, ldev, iter) \
 3874 	for (iter = (dev)->all_adj_list.lower.next, \
 3875 	     ldev = netdev_all_lower_get_next(dev, &(iter)); \
 3876 	     ldev; \
 3877 	     ldev = netdev_all_lower_get_next(dev, &(iter)))
 3878 
 3879 #define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
 3880 	for (iter = (dev)->all_adj_list.lower.next, \
 3881 	     ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
 3882 	     ldev; \
 3883 	     ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
 3884 
 3885 void *netdev_adjacent_get_private(struct list_head *adj_list);
 3886 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
 3887 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
 3888 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
 3889 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
 3890 int netdev_master_upper_dev_link(struct net_device *dev,
 3891 				 struct net_device *upper_dev,
 3892 				 void *upper_priv, void *upper_info);
 3893 void netdev_upper_dev_unlink(struct net_device *dev,
 3894 			     struct net_device *upper_dev);
 3895 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
 3896 void *netdev_lower_dev_get_private(struct net_device *dev,
 3897 				   struct net_device *lower_dev);
 3898 void netdev_lower_state_changed(struct net_device *lower_dev,
 3899 				void *lower_state_info);
 3900 int netdev_default_l2upper_neigh_construct(struct net_device *dev,
 3901 					   struct neighbour *n);
 3902 void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
 3903 					  struct neighbour *n);
 3904 
 3905 /* RSS keys are 40 or 52 bytes long */
 3906 #define NETDEV_RSS_KEY_LEN 52
 3907 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
 3908 void netdev_rss_key_fill(void *buffer, size_t len);
 3909 
 3910 int dev_get_nest_level(struct net_device *dev);
 3911 int skb_checksum_help(struct sk_buff *skb);
 3912 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
 3913 				  netdev_features_t features, bool tx_path);
 3914 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
 3915 				    netdev_features_t features);
 3916 
 3917 struct netdev_bonding_info {
 3918 	ifslave	slave;
 3919 	ifbond	master;
 3920 };
 3921 
 3922 struct netdev_notifier_bonding_info {
 3923 	struct netdev_notifier_info info; /* must be first */
 3924 	struct netdev_bonding_info  bonding_info;
 3925 };
 3926 
 3927 void netdev_bonding_info_change(struct net_device *dev,
 3928 				struct netdev_bonding_info *bonding_info);
 3929 
 3930 static inline
 3931 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
 3932 {
 3933 	return __skb_gso_segment(skb, features, true);
 3934 }
 3935 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
 3936 
 3937 static inline bool can_checksum_protocol(netdev_features_t features,
 3938 					 __be16 protocol)
 3939 {
 3940 	if (protocol == htons(ETH_P_FCOE))
 3941 		return !!(features & NETIF_F_FCOE_CRC);
 3942 
 3943 	/* Assume this is an IP checksum (not SCTP CRC) */
 3944 
 3945 	if (features & NETIF_F_HW_CSUM) {
 3946 		/* Can checksum everything */
 3947 		return true;
 3948 	}
 3949 
 3950 	switch (protocol) {
 3951 	case htons(ETH_P_IP):
 3952 		return !!(features & NETIF_F_IP_CSUM);
 3953 	case htons(ETH_P_IPV6):
 3954 		return !!(features & NETIF_F_IPV6_CSUM);
 3955 	default:
 3956 		return false;
 3957 	}
 3958 }
 3959 
 3960 /* Map an ethertype into IP protocol if possible */
 3961 static inline int eproto_to_ipproto(int eproto)
 3962 {
 3963 	switch (eproto) {
 3964 	case htons(ETH_P_IP):
 3965 		return IPPROTO_IP;
 3966 	case htons(ETH_P_IPV6):
 3967 		return IPPROTO_IPV6;
 3968 	default:
 3969 		return -1;
 3970 	}
 3971 }
 3972 
 3973 #ifdef CONFIG_BUG
 3974 void netdev_rx_csum_fault(struct net_device *dev);
 3975 #else
 3976 static inline void netdev_rx_csum_fault(struct net_device *dev)
 3977 {
 3978 }
 3979 #endif
 3980 /* rx skb timestamps */
 3981 void net_enable_timestamp(void);
 3982 void net_disable_timestamp(void);
 3983 
 3984 #ifdef CONFIG_PROC_FS
 3985 int __init dev_proc_init(void);
 3986 #else
 3987 #define dev_proc_init() 0
 3988 #endif
 3989 
 3990 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
 3991 					      struct sk_buff *skb, struct net_device *dev,
 3992 					      bool more)
 3993 {
 3994 	skb->xmit_more = more ? 1 : 0;
 3995 	return ops->ndo_start_xmit(skb, dev);
 3996 }
 3997 
 3998 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
 3999 					    struct netdev_queue *txq, bool more)
 4000 {
 4001 	const struct net_device_ops *ops = dev->netdev_ops;
 4002 	int rc;
 4003 
 4004 	rc = __netdev_start_xmit(ops, skb, dev, more);
 4005 	if (rc == NETDEV_TX_OK)
 4006 		txq_trans_update(txq);
 4007 
 4008 	return rc;
 4009 }
 4010 
 4011 int netdev_class_create_file_ns(struct class_attribute *class_attr,
 4012 				const void *ns);
 4013 void netdev_class_remove_file_ns(struct class_attribute *class_attr,
 4014 				 const void *ns);
 4015 
 4016 static inline int netdev_class_create_file(struct class_attribute *class_attr)
 4017 {
 4018 	return netdev_class_create_file_ns(class_attr, NULL);
 4019 }
 4020 
 4021 static inline void netdev_class_remove_file(struct class_attribute *class_attr)
 4022 {
 4023 	netdev_class_remove_file_ns(class_attr, NULL);
 4024 }
 4025 
 4026 extern struct kobj_ns_type_operations net_ns_type_operations;
 4027 
 4028 const char *netdev_drivername(const struct net_device *dev);
 4029 
 4030 void linkwatch_run_queue(void);
 4031 
 4032 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
 4033 							  netdev_features_t f2)
 4034 {
 4035 	if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
 4036 		if (f1 & NETIF_F_HW_CSUM)
 4037 			f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
 4038 		else
 4039 			f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
 4040 	}
 4041 
 4042 	return f1 & f2;
 4043 }
 4044 
 4045 static inline netdev_features_t netdev_get_wanted_features(
 4046 	struct net_device *dev)
 4047 {
 4048 	return (dev->features & ~dev->hw_features) | dev->wanted_features;
 4049 }
 4050 netdev_features_t netdev_increment_features(netdev_features_t all,
 4051 	netdev_features_t one, netdev_features_t mask);
 4052 
 4053 /* Allow TSO being used on stacked device :
 4054  * Performing the GSO segmentation before last device
 4055  * is a performance improvement.
 4056  */
 4057 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
 4058 							netdev_features_t mask)
 4059 {
 4060 	return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
 4061 }
 4062 
 4063 int __netdev_update_features(struct net_device *dev);
 4064 void netdev_update_features(struct net_device *dev);
 4065 void netdev_change_features(struct net_device *dev);
 4066 
 4067 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 4068 					struct net_device *dev);
 4069 
 4070 netdev_features_t passthru_features_check(struct sk_buff *skb,
 4071 					  struct net_device *dev,
 4072 					  netdev_features_t features);
 4073 netdev_features_t netif_skb_features(struct sk_buff *skb);
 4074 
 4075 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 4076 {
 4077 	netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
 4078 
 4079 	/* check flags correspondence */
 4080 	BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
 4081 	BUILD_BUG_ON(SKB_GSO_UDP     != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
 4082 	BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
 4083 	BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
 4084 	BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
 4085 	BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
 4086 	BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
 4087 	BUILD_BUG_ON(SKB_GSO_GRE     != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
 4088 	BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
 4089 	BUILD_BUG_ON(SKB_GSO_IPXIP4  != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
 4090 	BUILD_BUG_ON(SKB_GSO_IPXIP6  != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
 4091 	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
 4092 	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
 4093 	BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
 4094 	BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
 4095 	BUILD_BUG_ON(SKB_GSO_SCTP    != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
 4096 
 4097 	return (features & feature) == feature;
 4098 }
 4099 
 4100 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
 4101 {
 4102 	return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
 4103 	       (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
 4104 }
 4105 
 4106 static inline bool netif_needs_gso(struct sk_buff *skb,
 4107 				   netdev_features_t features)
 4108 {
 4109 	return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
 4110 		unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
 4111 			 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
 4112 }
 4113 
 4114 static inline void netif_set_gso_max_size(struct net_device *dev,
 4115 					  unsigned int size)
 4116 {
 4117 	dev->gso_max_size = size;
 4118 }
 4119 
 4120 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
 4121 					int pulled_hlen, u16 mac_offset,
 4122 					int mac_len)
 4123 {
 4124 	skb->protocol = protocol;
 4125 	skb->encapsulation = 1;
 4126 	skb_push(skb, pulled_hlen);
 4127 	skb_reset_transport_header(skb);
 4128 	skb->mac_header = mac_offset;
 4129 	skb->network_header = skb->mac_header + mac_len;
 4130 	skb->mac_len = mac_len;
 4131 }
 4132 
 4133 static inline bool netif_is_macsec(const struct net_device *dev)
 4134 {
 4135 	return dev->priv_flags & IFF_MACSEC;
 4136 }
 4137 
 4138 static inline bool netif_is_macvlan(const struct net_device *dev)
 4139 {
 4140 	return dev->priv_flags & IFF_MACVLAN;
 4141 }
 4142 
 4143 static inline bool netif_is_macvlan_port(const struct net_device *dev)
 4144 {
 4145 	return dev->priv_flags & IFF_MACVLAN_PORT;
 4146 }
 4147 
 4148 static inline bool netif_is_ipvlan(const struct net_device *dev)
 4149 {
 4150 	return dev->priv_flags & IFF_IPVLAN_SLAVE;
 4151 }
 4152 
 4153 static inline bool netif_is_ipvlan_port(const struct net_device *dev)
 4154 {
 4155 	return dev->priv_flags & IFF_IPVLAN_MASTER;
 4156 }
 4157 
 4158 static inline bool netif_is_bond_master(const struct net_device *dev)
 4159 {
 4160 	return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
 4161 }
 4162 
 4163 static inline bool netif_is_bond_slave(const struct net_device *dev)
 4164 {
 4165 	return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
 4166 }
 4167 
 4168 static inline bool netif_supports_nofcs(struct net_device *dev)
 4169 {
 4170 	return dev->priv_flags & IFF_SUPP_NOFCS;
 4171 }
 4172 
 4173 static inline bool netif_is_l3_master(const struct net_device *dev)
 4174 {
 4175 	return dev->priv_flags & IFF_L3MDEV_MASTER;
 4176 }
 4177 
 4178 static inline bool netif_is_l3_slave(const struct net_device *dev)
 4179 {
 4180 	return dev->priv_flags & IFF_L3MDEV_SLAVE;
 4181 }
 4182 
 4183 static inline bool netif_is_bridge_master(const struct net_device *dev)
 4184 {
 4185 	return dev->priv_flags & IFF_EBRIDGE;
 4186 }
 4187 
 4188 static inline bool netif_is_bridge_port(const struct net_device *dev)
 4189 {
 4190 	return dev->priv_flags & IFF_BRIDGE_PORT;
 4191 }
 4192 
 4193 static inline bool netif_is_ovs_master(const struct net_device *dev)
 4194 {
 4195 	return dev->priv_flags & IFF_OPENVSWITCH;
 4196 }
 4197 
 4198 static inline bool netif_is_team_master(const struct net_device *dev)
 4199 {
 4200 	return dev->priv_flags & IFF_TEAM;
 4201 }
 4202 
 4203 static inline bool netif_is_team_port(const struct net_device *dev)
 4204 {
 4205 	return dev->priv_flags & IFF_TEAM_PORT;
 4206 }
 4207 
 4208 static inline bool netif_is_lag_master(const struct net_device *dev)
 4209 {
 4210 	return netif_is_bond_master(dev) || netif_is_team_master(dev);
 4211 }
 4212 
 4213 static inline bool netif_is_lag_port(const struct net_device *dev)
 4214 {
 4215 	return netif_is_bond_slave(dev) || netif_is_team_port(dev);
 4216 }
 4217 
 4218 static inline bool netif_is_rxfh_configured(const struct net_device *dev)
 4219 {
 4220 	return dev->priv_flags & IFF_RXFH_CONFIGURED;
 4221 }
 4222 
 4223 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
 4224 static inline void netif_keep_dst(struct net_device *dev)
 4225 {
 4226 	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
 4227 }
 4228 
 4229 /* return true if dev can't cope with mtu frames that need vlan tag insertion */
 4230 static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
 4231 {
 4232 	/* TODO: reserve and use an additional IFF bit, if we get more users */
 4233 	return dev->priv_flags & IFF_MACSEC;
 4234 }
 4235 
 4236 extern struct pernet_operations __net_initdata loopback_net_ops;
 4237 
 4238 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 4239 
 4240 /* netdev_printk helpers, similar to dev_printk */
 4241 
 4242 static inline const char *netdev_name(const struct net_device *dev)
 4243 {
 4244 	if (!dev->name[0] || strchr(dev->name, '%'))
 4245 		return "(unnamed net_device)";
 4246 	return dev->name;
 4247 }
 4248 
 4249 static inline const char *netdev_reg_state(const struct net_device *dev)
 4250 {
 4251 	switch (dev->reg_state) {
 4252 	case NETREG_UNINITIALIZED: return " (uninitialized)";
 4253 	case NETREG_REGISTERED: return "";
 4254 	case NETREG_UNREGISTERING: return " (unregistering)";
 4255 	case NETREG_UNREGISTERED: return " (unregistered)";
 4256 	case NETREG_RELEASED: return " (released)";
 4257 	case NETREG_DUMMY: return " (dummy)";
 4258 	}
 4259 
 4260 	WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
 4261 	return " (unknown)";
 4262 }
 4263 
 4264 __printf(3, 4)
 4265 void netdev_printk(const char *level, const struct net_device *dev,
 4266 		   const char *format, ...);
 4267 __printf(2, 3)
 4268 void netdev_emerg(const struct net_device *dev, const char *format, ...);
 4269 __printf(2, 3)
 4270 void netdev_alert(const struct net_device *dev, const char *format, ...);
 4271 __printf(2, 3)
 4272 void netdev_crit(const struct net_device *dev, const char *format, ...);
 4273 __printf(2, 3)
 4274 void netdev_err(const struct net_device *dev, const char *format, ...);
 4275 __printf(2, 3)
 4276 void netdev_warn(const struct net_device *dev, const char *format, ...);
 4277 __printf(2, 3)
 4278 void netdev_notice(const struct net_device *dev, const char *format, ...);
 4279 __printf(2, 3)
 4280 void netdev_info(const struct net_device *dev, const char *format, ...);
 4281 
 4282 #define MODULE_ALIAS_NETDEV(device) \
 4283 	MODULE_ALIAS("netdev-" device)
 4284 
 4285 #if defined(CONFIG_DYNAMIC_DEBUG)
 4286 #define netdev_dbg(__dev, format, args...)			\
 4287 do {								\
 4288 	dynamic_netdev_dbg(__dev, format, ##args);		\
 4289 } while (0)
 4290 #elif defined(DEBUG)
 4291 #define netdev_dbg(__dev, format, args...)			\
 4292 	netdev_printk(KERN_DEBUG, __dev, format, ##args)
 4293 #else
 4294 #define netdev_dbg(__dev, format, args...)			\
 4295 ({								\
 4296 	if (0)							\
 4297 		netdev_printk(KERN_DEBUG, __dev, format, ##args); \
 4298 })
 4299 #endif
 4300 
 4301 #if defined(VERBOSE_DEBUG)
 4302 #define netdev_vdbg	netdev_dbg
 4303 #else
 4304 
 4305 #define netdev_vdbg(dev, format, args...)			\
 4306 ({								\
 4307 	if (0)							\
 4308 		netdev_printk(KERN_DEBUG, dev, format, ##args);	\
 4309 	0;							\
 4310 })
 4311 #endif
 4312 
 4313 /*
 4314  * netdev_WARN() acts like dev_printk(), but with the key difference
 4315  * of using a WARN/WARN_ON to get the message out, including the
 4316  * file/line information and a backtrace.
 4317  */
 4318 #define netdev_WARN(dev, format, args...)			\
 4319 	WARN(1, "netdevice: %s%s\n" format, netdev_name(dev),	\
 4320 	     netdev_reg_state(dev), ##args)
 4321 
 4322 /* netif printk helpers, similar to netdev_printk */
 4323 
 4324 #define netif_printk(priv, type, level, dev, fmt, args...)	\
 4325 do {					  			\
 4326 	if (netif_msg_##type(priv))				\
 4327 		netdev_printk(level, (dev), fmt, ##args);	\
 4328 } while (0)
 4329 
 4330 #define netif_level(level, priv, type, dev, fmt, args...)	\
 4331 do {								\
 4332 	if (netif_msg_##type(priv))				\
 4333 		netdev_##level(dev, fmt, ##args);		\
 4334 } while (0)
 4335 
 4336 #define netif_emerg(priv, type, dev, fmt, args...)		\
 4337 	netif_level(emerg, priv, type, dev, fmt, ##args)
 4338 #define netif_alert(priv, type, dev, fmt, args...)		\
 4339 	netif_level(alert, priv, type, dev, fmt, ##args)
 4340 #define netif_crit(priv, type, dev, fmt, args...)		\
 4341 	netif_level(crit, priv, type, dev, fmt, ##args)
 4342 #define netif_err(priv, type, dev, fmt, args...)		\
 4343 	netif_level(err, priv, type, dev, fmt, ##args)
 4344 #define netif_warn(priv, type, dev, fmt, args...)		\
 4345 	netif_level(warn, priv, type, dev, fmt, ##args)
 4346 #define netif_notice(priv, type, dev, fmt, args...)		\
 4347 	netif_level(notice, priv, type, dev, fmt, ##args)
 4348 #define netif_info(priv, type, dev, fmt, args...)		\
 4349 	netif_level(info, priv, type, dev, fmt, ##args)
 4350 
 4351 #if defined(CONFIG_DYNAMIC_DEBUG)
 4352 #define netif_dbg(priv, type, netdev, format, args...)		\
 4353 do {								\
 4354 	if (netif_msg_##type(priv))				\
 4355 		dynamic_netdev_dbg(netdev, format, ##args);	\
 4356 } while (0)
 4357 #elif defined(DEBUG)
 4358 #define netif_dbg(priv, type, dev, format, args...)		\
 4359 	netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
 4360 #else
 4361 #define netif_dbg(priv, type, dev, format, args...)			\
 4362 ({									\
 4363 	if (0)								\
 4364 		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
 4365 	0;								\
 4366 })
 4367 #endif
 4368 
 4369 #if defined(VERBOSE_DEBUG)
 4370 #define netif_vdbg	netif_dbg
 4371 #else
 4372 #define netif_vdbg(priv, type, dev, format, args...)		\
 4373 ({								\
 4374 	if (0)							\
 4375 		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
 4376 	0;							\
 4377 })
 4378 #endif
 4379 
 4380 /*
 4381  *	The list of packet types we will receive (as opposed to discard)
 4382  *	and the routines to invoke.
 4383  *
 4384  *	Why 16. Because with 16 the only overlap we get on a hash of the
 4385  *	low nibble of the protocol value is RARP/SNAP/X.25.
 4386  *
 4387  *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
 4388  *             sure which should go first, but I bet it won't make much
 4389  *             difference if we are running VLANs.  The good news is that
 4390  *             this protocol won't be in the list unless compiled in, so
 4391  *             the average user (w/out VLANs) will not be adversely affected.
 4392  *             --BLG
 4393  *
 4394  *		0800	IP
 4395  *		8100    802.1Q VLAN
 4396  *		0001	802.3
 4397  *		0002	AX.25
 4398  *		0004	802.2
 4399  *		8035	RARP
 4400  *		0005	SNAP
 4401  *		0805	X.25
 4402  *		0806	ARP
 4403  *		8137	IPX
 4404  *		0009	Localtalk
 4405  *		86DD	IPv6
 4406  */
 4407 #define PTYPE_HASH_SIZE	(16)
 4408 #define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1)
 4409 
 4410 #endif	/* _LINUX_NETDEVICE_H */            | 
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
| Ядро | Модуль | Правило | Верификатор | Вердикт | Статус | Время создания | Описание проблемы | 
| linux-4.9-rc1.tar.xz | drivers/net/ethernet/cadence/macb.ko | 331_1a | CPAchecker | Bug | Fixed | 2016-11-19 01:42:27 | L0253 | 
Комментарий
Reported: 19 Nov 2016
[В начало]